1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 - 2021
4  *
5  * Richard van Schagen <vschagen@icloud.com>
6  * Christian Marangi <ansuelsmth@gmail.com
7  */
8 
9 #include <crypto/aes.h>
10 #include <crypto/ctr.h>
11 #include <crypto/hmac.h>
12 #include <crypto/sha1.h>
13 #include <crypto/sha2.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/scatterlist.h>
18 
19 #include "eip93-cipher.h"
20 #include "eip93-hash.h"
21 #include "eip93-common.h"
22 #include "eip93-main.h"
23 #include "eip93-regs.h"
24 
eip93_parse_ctrl_stat_err(struct eip93_device * eip93,int err)25 int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err)
26 {
27 	u32 ext_err;
28 
29 	if (!err)
30 		return 0;
31 
32 	switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) {
33 	case EIP93_PE_CTRL_PE_AUTH_ERR:
34 	case EIP93_PE_CTRL_PE_PAD_ERR:
35 		return -EBADMSG;
36 	/* let software handle anti-replay errors */
37 	case EIP93_PE_CTRL_PE_SEQNUM_ERR:
38 		return 0;
39 	case EIP93_PE_CTRL_PE_EXT_ERR:
40 		break;
41 	default:
42 		dev_err(eip93->dev, "Unhandled error 0x%08x\n", err);
43 		return -EINVAL;
44 	}
45 
46 	/* Parse additional ext errors */
47 	ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err);
48 	switch (ext_err) {
49 	case EIP93_PE_CTRL_PE_EXT_ERR_BUS:
50 	case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING:
51 		return -EIO;
52 	case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER:
53 		return -EACCES;
54 	case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP:
55 	case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO:
56 	case EIP93_PE_CTRL_PE_EXT_ERR_SPI:
57 		return -EINVAL;
58 	case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH:
59 	case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH:
60 	case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR:
61 		return -EBADMSG;
62 	default:
63 		dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err);
64 		return -EINVAL;
65 	}
66 }
67 
eip93_ring_next_wptr(struct eip93_device * eip93,struct eip93_desc_ring * ring)68 static void *eip93_ring_next_wptr(struct eip93_device *eip93,
69 				  struct eip93_desc_ring *ring)
70 {
71 	void *ptr = ring->write;
72 
73 	if ((ring->write == ring->read - ring->offset) ||
74 	    (ring->read == ring->base && ring->write == ring->base_end))
75 		return ERR_PTR(-ENOMEM);
76 
77 	if (ring->write == ring->base_end)
78 		ring->write = ring->base;
79 	else
80 		ring->write += ring->offset;
81 
82 	return ptr;
83 }
84 
eip93_ring_next_rptr(struct eip93_device * eip93,struct eip93_desc_ring * ring)85 static void *eip93_ring_next_rptr(struct eip93_device *eip93,
86 				  struct eip93_desc_ring *ring)
87 {
88 	void *ptr = ring->read;
89 
90 	if (ring->write == ring->read)
91 		return ERR_PTR(-ENOENT);
92 
93 	if (ring->read == ring->base_end)
94 		ring->read = ring->base;
95 	else
96 		ring->read += ring->offset;
97 
98 	return ptr;
99 }
100 
eip93_put_descriptor(struct eip93_device * eip93,struct eip93_descriptor * desc)101 int eip93_put_descriptor(struct eip93_device *eip93,
102 			 struct eip93_descriptor *desc)
103 {
104 	struct eip93_descriptor *cdesc;
105 	struct eip93_descriptor *rdesc;
106 
107 	rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr);
108 	if (IS_ERR(rdesc))
109 		return -ENOENT;
110 
111 	cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr);
112 	if (IS_ERR(cdesc))
113 		return -ENOENT;
114 
115 	memset(rdesc, 0, sizeof(struct eip93_descriptor));
116 
117 	memcpy(cdesc, desc, sizeof(struct eip93_descriptor));
118 
119 	return 0;
120 }
121 
eip93_get_descriptor(struct eip93_device * eip93)122 void *eip93_get_descriptor(struct eip93_device *eip93)
123 {
124 	struct eip93_descriptor *cdesc;
125 	void *ptr;
126 
127 	cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr);
128 	if (IS_ERR(cdesc))
129 		return ERR_PTR(-ENOENT);
130 
131 	memset(cdesc, 0, sizeof(struct eip93_descriptor));
132 
133 	ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr);
134 	if (IS_ERR(ptr))
135 		return ERR_PTR(-ENOENT);
136 
137 	return ptr;
138 }
139 
eip93_free_sg_copy(const int len,struct scatterlist ** sg)140 static void eip93_free_sg_copy(const int len, struct scatterlist **sg)
141 {
142 	if (!*sg || !len)
143 		return;
144 
145 	free_pages((unsigned long)sg_virt(*sg), get_order(len));
146 	kfree(*sg);
147 	*sg = NULL;
148 }
149 
eip93_make_sg_copy(struct scatterlist * src,struct scatterlist ** dst,const u32 len,const bool copy)150 static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst,
151 			      const u32 len, const bool copy)
152 {
153 	void *pages;
154 
155 	*dst = kmalloc(sizeof(**dst), GFP_KERNEL);
156 	if (!*dst)
157 		return -ENOMEM;
158 
159 	pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA,
160 					 get_order(len));
161 	if (!pages) {
162 		kfree(*dst);
163 		*dst = NULL;
164 		return -ENOMEM;
165 	}
166 
167 	sg_init_table(*dst, 1);
168 	sg_set_buf(*dst, pages, len);
169 
170 	/* copy only as requested */
171 	if (copy)
172 		sg_copy_to_buffer(src, sg_nents(src), pages, len);
173 
174 	return 0;
175 }
176 
eip93_is_sg_aligned(struct scatterlist * sg,u32 len,const int blksize)177 static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len,
178 				const int blksize)
179 {
180 	int nents;
181 
182 	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
183 		if (!IS_ALIGNED(sg->offset, 4))
184 			return false;
185 
186 		if (len <= sg->length) {
187 			if (!IS_ALIGNED(len, blksize))
188 				return false;
189 
190 			return true;
191 		}
192 
193 		if (!IS_ALIGNED(sg->length, blksize))
194 			return false;
195 
196 		len -= sg->length;
197 	}
198 	return false;
199 }
200 
check_valid_request(struct eip93_cipher_reqctx * rctx)201 int check_valid_request(struct eip93_cipher_reqctx *rctx)
202 {
203 	struct scatterlist *src = rctx->sg_src;
204 	struct scatterlist *dst = rctx->sg_dst;
205 	u32 textsize = rctx->textsize;
206 	u32 authsize = rctx->authsize;
207 	u32 blksize = rctx->blksize;
208 	u32 totlen_src = rctx->assoclen + rctx->textsize;
209 	u32 totlen_dst = rctx->assoclen + rctx->textsize;
210 	u32 copy_len;
211 	bool src_align, dst_align;
212 	int src_nents, dst_nents;
213 	int err = -EINVAL;
214 
215 	if (!IS_CTR(rctx->flags)) {
216 		if (!IS_ALIGNED(textsize, blksize))
217 			return err;
218 	}
219 
220 	if (authsize) {
221 		if (IS_ENCRYPT(rctx->flags))
222 			totlen_dst += authsize;
223 		else
224 			totlen_src += authsize;
225 	}
226 
227 	src_nents = sg_nents_for_len(src, totlen_src);
228 	if (src_nents < 0)
229 		return src_nents;
230 
231 	dst_nents = sg_nents_for_len(dst, totlen_dst);
232 	if (dst_nents < 0)
233 		return dst_nents;
234 
235 	if (src == dst) {
236 		src_nents = max(src_nents, dst_nents);
237 		dst_nents = src_nents;
238 		if (unlikely((totlen_src || totlen_dst) && !src_nents))
239 			return err;
240 
241 	} else {
242 		if (unlikely(totlen_src && !src_nents))
243 			return err;
244 
245 		if (unlikely(totlen_dst && !dst_nents))
246 			return err;
247 	}
248 
249 	if (authsize) {
250 		if (dst_nents == 1 && src_nents == 1) {
251 			src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
252 			if (src ==  dst)
253 				dst_align = src_align;
254 			else
255 				dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
256 		} else {
257 			src_align = false;
258 			dst_align = false;
259 		}
260 	} else {
261 		src_align = eip93_is_sg_aligned(src, totlen_src, blksize);
262 		if (src == dst)
263 			dst_align = src_align;
264 		else
265 			dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize);
266 	}
267 
268 	copy_len = max(totlen_src, totlen_dst);
269 	if (!src_align) {
270 		err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true);
271 		if (err)
272 			return err;
273 	}
274 
275 	if (!dst_align) {
276 		err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false);
277 		if (err)
278 			return err;
279 	}
280 
281 	src_nents = sg_nents_for_len(rctx->sg_src, totlen_src);
282 	if (src_nents < 0)
283 		return src_nents;
284 
285 	dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst);
286 	if (dst_nents < 0)
287 		return dst_nents;
288 
289 	rctx->src_nents = src_nents;
290 	rctx->dst_nents = dst_nents;
291 
292 	return 0;
293 }
294 
295 /*
296  * Set sa_record function:
297  * Even sa_record is set to "0", keep " = 0" for readability.
298  */
eip93_set_sa_record(struct sa_record * sa_record,const unsigned int keylen,const u32 flags)299 void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen,
300 			 const u32 flags)
301 {
302 	/* Reset cmd word */
303 	sa_record->sa_cmd0_word = 0;
304 	sa_record->sa_cmd1_word = 0;
305 
306 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE;
307 	if (!IS_ECB(flags))
308 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV;
309 
310 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC;
311 
312 	switch ((flags & EIP93_ALG_MASK)) {
313 	case EIP93_ALG_AES:
314 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES;
315 		sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH,
316 						      keylen >> 3);
317 		break;
318 	case EIP93_ALG_3DES:
319 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES;
320 		break;
321 	case EIP93_ALG_DES:
322 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES;
323 		break;
324 	default:
325 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL;
326 	}
327 
328 	switch ((flags & EIP93_HASH_MASK)) {
329 	case EIP93_HASH_SHA256:
330 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256;
331 		break;
332 	case EIP93_HASH_SHA224:
333 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224;
334 		break;
335 	case EIP93_HASH_SHA1:
336 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1;
337 		break;
338 	case EIP93_HASH_MD5:
339 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5;
340 		break;
341 	default:
342 		sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL;
343 	}
344 
345 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO;
346 
347 	switch ((flags & EIP93_MODE_MASK)) {
348 	case EIP93_MODE_CBC:
349 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC;
350 		break;
351 	case EIP93_MODE_CTR:
352 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR;
353 		break;
354 	case EIP93_MODE_ECB:
355 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB;
356 		break;
357 	}
358 
359 	sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD;
360 	if (IS_HASH(flags)) {
361 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD;
362 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST;
363 	}
364 
365 	if (IS_HMAC(flags)) {
366 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC;
367 		sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER;
368 	}
369 
370 	sa_record->sa_spi = 0x0;
371 	sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF;
372 	sa_record->sa_seqmum_mask[1] = 0x0;
373 }
374 
375 /*
376  * Poor mans Scatter/gather function:
377  * Create a Descriptor for every segment to avoid copying buffers.
378  * For performance better to wait for hardware to perform multiple DMA
379  */
eip93_scatter_combine(struct eip93_device * eip93,struct eip93_cipher_reqctx * rctx,u32 datalen,u32 split,int offsetin)380 static int eip93_scatter_combine(struct eip93_device *eip93,
381 				 struct eip93_cipher_reqctx *rctx,
382 				 u32 datalen, u32 split, int offsetin)
383 {
384 	struct eip93_descriptor *cdesc = rctx->cdesc;
385 	struct scatterlist *sgsrc = rctx->sg_src;
386 	struct scatterlist *sgdst = rctx->sg_dst;
387 	unsigned int remainin = sg_dma_len(sgsrc);
388 	unsigned int remainout = sg_dma_len(sgdst);
389 	dma_addr_t saddr = sg_dma_address(sgsrc);
390 	dma_addr_t daddr = sg_dma_address(sgdst);
391 	dma_addr_t state_addr;
392 	u32 src_addr, dst_addr, len, n;
393 	bool nextin = false;
394 	bool nextout = false;
395 	int offsetout = 0;
396 	int err;
397 
398 	if (IS_ECB(rctx->flags))
399 		rctx->sa_state_base = 0;
400 
401 	if (split < datalen) {
402 		state_addr = rctx->sa_state_ctr_base;
403 		n = split;
404 	} else {
405 		state_addr = rctx->sa_state_base;
406 		n = datalen;
407 	}
408 
409 	do {
410 		if (nextin) {
411 			sgsrc = sg_next(sgsrc);
412 			remainin = sg_dma_len(sgsrc);
413 			if (remainin == 0)
414 				continue;
415 
416 			saddr = sg_dma_address(sgsrc);
417 			offsetin = 0;
418 			nextin = false;
419 		}
420 
421 		if (nextout) {
422 			sgdst = sg_next(sgdst);
423 			remainout = sg_dma_len(sgdst);
424 			if (remainout == 0)
425 				continue;
426 
427 			daddr = sg_dma_address(sgdst);
428 			offsetout = 0;
429 			nextout = false;
430 		}
431 		src_addr = saddr + offsetin;
432 		dst_addr = daddr + offsetout;
433 
434 		if (remainin == remainout) {
435 			len = remainin;
436 			if (len > n) {
437 				len = n;
438 				remainin -= n;
439 				remainout -= n;
440 				offsetin += n;
441 				offsetout += n;
442 			} else {
443 				nextin = true;
444 				nextout = true;
445 			}
446 		} else if (remainin < remainout) {
447 			len = remainin;
448 			if (len > n) {
449 				len = n;
450 				remainin -= n;
451 				remainout -= n;
452 				offsetin += n;
453 				offsetout += n;
454 			} else {
455 				offsetout += len;
456 				remainout -= len;
457 				nextin = true;
458 			}
459 		} else {
460 			len = remainout;
461 			if (len > n) {
462 				len = n;
463 				remainin -= n;
464 				remainout -= n;
465 				offsetin += n;
466 				offsetout += n;
467 			} else {
468 				offsetin += len;
469 				remainin -= len;
470 				nextout = true;
471 			}
472 		}
473 		n -= len;
474 
475 		cdesc->src_addr = src_addr;
476 		cdesc->dst_addr = dst_addr;
477 		cdesc->state_addr = state_addr;
478 		cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY,
479 						   EIP93_PE_LENGTH_HOST_READY);
480 		cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len);
481 
482 		if (n == 0) {
483 			n = datalen - split;
484 			split = datalen;
485 			state_addr = rctx->sa_state_base;
486 		}
487 
488 		if (n == 0)
489 			cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS,
490 						     EIP93_DESC_LAST);
491 
492 		/*
493 		 * Loop - Delay - No need to rollback
494 		 * Maybe refine by slowing down at EIP93_RING_BUSY
495 		 */
496 again:
497 		scoped_guard(spinlock_irqsave, &eip93->ring->write_lock)
498 			err = eip93_put_descriptor(eip93, cdesc);
499 		if (err) {
500 			usleep_range(EIP93_RING_BUSY_DELAY,
501 				     EIP93_RING_BUSY_DELAY * 2);
502 			goto again;
503 		}
504 		/* Writing new descriptor count starts DMA action */
505 		writel(1, eip93->base + EIP93_REG_PE_CD_COUNT);
506 	} while (n);
507 
508 	return -EINPROGRESS;
509 }
510 
eip93_send_req(struct crypto_async_request * async,const u8 * reqiv,struct eip93_cipher_reqctx * rctx)511 int eip93_send_req(struct crypto_async_request *async,
512 		   const u8 *reqiv, struct eip93_cipher_reqctx *rctx)
513 {
514 	struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm);
515 	struct eip93_device *eip93 = ctx->eip93;
516 	struct scatterlist *src = rctx->sg_src;
517 	struct scatterlist *dst = rctx->sg_dst;
518 	struct sa_state *sa_state;
519 	struct eip93_descriptor cdesc;
520 	u32 flags = rctx->flags;
521 	int offsetin = 0, err;
522 	u32 datalen = rctx->assoclen + rctx->textsize;
523 	u32 split = datalen;
524 	u32 start, end, ctr, blocks;
525 	u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
526 	int crypto_async_idr;
527 
528 	rctx->sa_state_ctr = NULL;
529 	rctx->sa_state = NULL;
530 
531 	if (IS_ECB(flags))
532 		goto skip_iv;
533 
534 	memcpy(iv, reqiv, rctx->ivsize);
535 
536 	rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL);
537 	if (!rctx->sa_state)
538 		return -ENOMEM;
539 
540 	sa_state = rctx->sa_state;
541 
542 	memcpy(sa_state->state_iv, iv, rctx->ivsize);
543 	if (IS_RFC3686(flags)) {
544 		sa_state->state_iv[0] = ctx->sa_nonce;
545 		sa_state->state_iv[1] = iv[0];
546 		sa_state->state_iv[2] = iv[1];
547 		sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1);
548 	} else if (!IS_HMAC(flags) && IS_CTR(flags)) {
549 		/* Compute data length. */
550 		blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE);
551 		ctr = be32_to_cpu((__be32 __force)iv[3]);
552 		/* Check 32bit counter overflow. */
553 		start = ctr;
554 		end = start + blocks - 1;
555 		if (end < start) {
556 			split = AES_BLOCK_SIZE * -start;
557 			/*
558 			 * Increment the counter manually to cope with
559 			 * the hardware counter overflow.
560 			 */
561 			iv[3] = 0xffffffff;
562 			crypto_inc((u8 *)iv, AES_BLOCK_SIZE);
563 
564 			rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr),
565 						     GFP_KERNEL);
566 			if (!rctx->sa_state_ctr) {
567 				err = -ENOMEM;
568 				goto free_sa_state;
569 			}
570 
571 			memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize);
572 			memcpy(sa_state->state_iv, iv, rctx->ivsize);
573 
574 			rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr,
575 								 sizeof(*rctx->sa_state_ctr),
576 								 DMA_TO_DEVICE);
577 			err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base);
578 			if (err)
579 				goto free_sa_state_ctr;
580 		}
581 	}
582 
583 	rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state,
584 					     sizeof(*rctx->sa_state), DMA_TO_DEVICE);
585 	err = dma_mapping_error(eip93->dev, rctx->sa_state_base);
586 	if (err)
587 		goto free_sa_state_ctr_dma;
588 
589 skip_iv:
590 
591 	cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN,
592 					     EIP93_PE_CTRL_HOST_READY);
593 	cdesc.sa_addr = rctx->sa_record_base;
594 	cdesc.arc4_addr = 0;
595 
596 	scoped_guard(spinlock_bh, &eip93->ring->idr_lock)
597 		crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0,
598 					     EIP93_RING_NUM - 1, GFP_ATOMIC);
599 
600 	cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) |
601 			FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags);
602 
603 	rctx->cdesc = &cdesc;
604 
605 	/* map DMA_BIDIRECTIONAL to invalidate cache on destination
606 	 * implies __dma_cache_wback_inv
607 	 */
608 	if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) {
609 		err = -ENOMEM;
610 		goto free_sa_state_ctr_dma;
611 	}
612 
613 	if (src != dst &&
614 	    !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) {
615 		err = -ENOMEM;
616 		goto free_sg_dma;
617 	}
618 
619 	return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin);
620 
621 free_sg_dma:
622 	dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL);
623 free_sa_state_ctr_dma:
624 	if (rctx->sa_state_ctr)
625 		dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
626 				 sizeof(*rctx->sa_state_ctr),
627 				 DMA_TO_DEVICE);
628 free_sa_state_ctr:
629 	kfree(rctx->sa_state_ctr);
630 	if (rctx->sa_state)
631 		dma_unmap_single(eip93->dev, rctx->sa_state_base,
632 				 sizeof(*rctx->sa_state),
633 				 DMA_TO_DEVICE);
634 free_sa_state:
635 	kfree(rctx->sa_state);
636 
637 	return err;
638 }
639 
eip93_unmap_dma(struct eip93_device * eip93,struct eip93_cipher_reqctx * rctx,struct scatterlist * reqsrc,struct scatterlist * reqdst)640 void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
641 		     struct scatterlist *reqsrc, struct scatterlist *reqdst)
642 {
643 	u32 len = rctx->assoclen + rctx->textsize;
644 	u32 authsize = rctx->authsize;
645 	u32 flags = rctx->flags;
646 	u32 *otag;
647 	int i;
648 
649 	if (rctx->sg_src == rctx->sg_dst) {
650 		dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
651 			     DMA_BIDIRECTIONAL);
652 		goto process_tag;
653 	}
654 
655 	dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents,
656 		     DMA_TO_DEVICE);
657 
658 	if (rctx->sg_src != reqsrc)
659 		eip93_free_sg_copy(len +  rctx->authsize, &rctx->sg_src);
660 
661 	dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents,
662 		     DMA_BIDIRECTIONAL);
663 
664 	/* SHA tags need conversion from net-to-host */
665 process_tag:
666 	if (IS_DECRYPT(flags))
667 		authsize = 0;
668 
669 	if (authsize) {
670 		if (!IS_HASH_MD5(flags)) {
671 			otag = sg_virt(rctx->sg_dst) + len;
672 			for (i = 0; i < (authsize / 4); i++)
673 				otag[i] = be32_to_cpu((__be32 __force)otag[i]);
674 		}
675 	}
676 
677 	if (rctx->sg_dst != reqdst) {
678 		sg_copy_from_buffer(reqdst, sg_nents(reqdst),
679 				    sg_virt(rctx->sg_dst), len + authsize);
680 		eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst);
681 	}
682 }
683 
eip93_handle_result(struct eip93_device * eip93,struct eip93_cipher_reqctx * rctx,u8 * reqiv)684 void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx,
685 			 u8 *reqiv)
686 {
687 	if (rctx->sa_state_ctr)
688 		dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base,
689 				 sizeof(*rctx->sa_state_ctr),
690 				 DMA_FROM_DEVICE);
691 
692 	if (rctx->sa_state)
693 		dma_unmap_single(eip93->dev, rctx->sa_state_base,
694 				 sizeof(*rctx->sa_state),
695 				 DMA_FROM_DEVICE);
696 
697 	if (!IS_ECB(rctx->flags))
698 		memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize);
699 
700 	kfree(rctx->sa_state_ctr);
701 	kfree(rctx->sa_state);
702 }
703 
eip93_hmac_setkey(u32 ctx_flags,const u8 * key,unsigned int keylen,unsigned int hashlen,u8 * dest_ipad,u8 * dest_opad,bool skip_ipad)704 int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen,
705 		      unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad,
706 		      bool skip_ipad)
707 {
708 	u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE];
709 	struct crypto_ahash *ahash_tfm;
710 	struct eip93_hash_reqctx *rctx;
711 	struct ahash_request *req;
712 	DECLARE_CRYPTO_WAIT(wait);
713 	struct scatterlist sg[1];
714 	const char *alg_name;
715 	int i, ret;
716 
717 	switch (ctx_flags & EIP93_HASH_MASK) {
718 	case EIP93_HASH_SHA256:
719 		alg_name = "sha256-eip93";
720 		break;
721 	case EIP93_HASH_SHA224:
722 		alg_name = "sha224-eip93";
723 		break;
724 	case EIP93_HASH_SHA1:
725 		alg_name = "sha1-eip93";
726 		break;
727 	case EIP93_HASH_MD5:
728 		alg_name = "md5-eip93";
729 		break;
730 	default: /* Impossible */
731 		return -EINVAL;
732 	}
733 
734 	ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC);
735 	if (IS_ERR(ahash_tfm))
736 		return PTR_ERR(ahash_tfm);
737 
738 	req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC);
739 	if (!req) {
740 		ret = -ENOMEM;
741 		goto err_ahash;
742 	}
743 
744 	rctx = ahash_request_ctx_dma(req);
745 	crypto_init_wait(&wait);
746 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
747 				   crypto_req_done, &wait);
748 
749 	/* Hash the key if > SHA256_BLOCK_SIZE */
750 	if (keylen > SHA256_BLOCK_SIZE) {
751 		sg_init_one(&sg[0], key, keylen);
752 
753 		ahash_request_set_crypt(req, sg, ipad, keylen);
754 		ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
755 		if (ret)
756 			goto err_req;
757 
758 		keylen = hashlen;
759 	} else {
760 		memcpy(ipad, key, keylen);
761 	}
762 
763 	/* Copy to opad */
764 	memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen);
765 	memcpy(opad, ipad, SHA256_BLOCK_SIZE);
766 
767 	/* Pad with HMAC constants */
768 	for (i = 0; i < SHA256_BLOCK_SIZE; i++) {
769 		ipad[i] ^= HMAC_IPAD_VALUE;
770 		opad[i] ^= HMAC_OPAD_VALUE;
771 	}
772 
773 	if (skip_ipad) {
774 		memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE);
775 	} else {
776 		/* Hash ipad */
777 		sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE);
778 		ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE);
779 		ret = crypto_ahash_init(req);
780 		if (ret)
781 			goto err_req;
782 
783 		/* Disable HASH_FINALIZE for ipad hash */
784 		rctx->partial_hash = true;
785 
786 		ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
787 		if (ret)
788 			goto err_req;
789 	}
790 
791 	/* Hash opad */
792 	sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE);
793 	ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE);
794 	ret = crypto_ahash_init(req);
795 	if (ret)
796 		goto err_req;
797 
798 	/* Disable HASH_FINALIZE for opad hash */
799 	rctx->partial_hash = true;
800 
801 	ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
802 	if (ret)
803 		goto err_req;
804 
805 	if (!IS_HASH_MD5(ctx_flags)) {
806 		for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) {
807 			u32 *ipad_hash = (u32 *)dest_ipad;
808 			u32 *opad_hash = (u32 *)dest_opad;
809 
810 			if (!skip_ipad)
811 				ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]);
812 			opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]);
813 		}
814 	}
815 
816 err_req:
817 	ahash_request_free(req);
818 err_ahash:
819 	crypto_free_ahash(ahash_tfm);
820 
821 	return ret;
822 }
823