1 /*
2  * Intel IXP4xx NPE-C crypto driver
3  *
4  * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of version 2 of the GNU General Public License
8  * as published by the Free Software Foundation.
9  *
10  */
11 
12 #include <linux/platform_device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/crypto.h>
16 #include <linux/kernel.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
21 
22 #include <crypto/ctr.h>
23 #include <crypto/des.h>
24 #include <crypto/aes.h>
25 #include <crypto/sha.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aead.h>
28 #include <crypto/authenc.h>
29 #include <crypto/scatterwalk.h>
30 
31 #include <mach/npe.h>
32 #include <mach/qmgr.h>
33 
34 #define MAX_KEYLEN 32
35 
36 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
37 #define NPE_CTX_LEN 80
38 #define AES_BLOCK128 16
39 
40 #define NPE_OP_HASH_VERIFY   0x01
41 #define NPE_OP_CCM_ENABLE    0x04
42 #define NPE_OP_CRYPT_ENABLE  0x08
43 #define NPE_OP_HASH_ENABLE   0x10
44 #define NPE_OP_NOT_IN_PLACE  0x20
45 #define NPE_OP_HMAC_DISABLE  0x40
46 #define NPE_OP_CRYPT_ENCRYPT 0x80
47 
48 #define NPE_OP_CCM_GEN_MIC   0xcc
49 #define NPE_OP_HASH_GEN_ICV  0x50
50 #define NPE_OP_ENC_GEN_KEY   0xc9
51 
52 #define MOD_ECB     0x0000
53 #define MOD_CTR     0x1000
54 #define MOD_CBC_ENC 0x2000
55 #define MOD_CBC_DEC 0x3000
56 #define MOD_CCM_ENC 0x4000
57 #define MOD_CCM_DEC 0x5000
58 
59 #define KEYLEN_128  4
60 #define KEYLEN_192  6
61 #define KEYLEN_256  8
62 
63 #define CIPH_DECR   0x0000
64 #define CIPH_ENCR   0x0400
65 
66 #define MOD_DES     0x0000
67 #define MOD_TDEA2   0x0100
68 #define MOD_3DES   0x0200
69 #define MOD_AES     0x0800
70 #define MOD_AES128  (0x0800 | KEYLEN_128)
71 #define MOD_AES192  (0x0900 | KEYLEN_192)
72 #define MOD_AES256  (0x0a00 | KEYLEN_256)
73 
74 #define MAX_IVLEN   16
75 #define NPE_ID      2  /* NPE C */
76 #define NPE_QLEN    16
77 /* Space for registering when the first
78  * NPE_QLEN crypt_ctl are busy */
79 #define NPE_QLEN_TOTAL 64
80 
81 #define SEND_QID    29
82 #define RECV_QID    30
83 
84 #define CTL_FLAG_UNUSED		0x0000
85 #define CTL_FLAG_USED		0x1000
86 #define CTL_FLAG_PERFORM_ABLK	0x0001
87 #define CTL_FLAG_GEN_ICV	0x0002
88 #define CTL_FLAG_GEN_REVAES	0x0004
89 #define CTL_FLAG_PERFORM_AEAD	0x0008
90 #define CTL_FLAG_MASK		0x000f
91 
92 #define HMAC_IPAD_VALUE   0x36
93 #define HMAC_OPAD_VALUE   0x5C
94 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
95 
96 #define MD5_DIGEST_SIZE   16
97 
98 struct buffer_desc {
99 	u32 phys_next;
100 #ifdef __ARMEB__
101 	u16 buf_len;
102 	u16 pkt_len;
103 #else
104 	u16 pkt_len;
105 	u16 buf_len;
106 #endif
107 	u32 phys_addr;
108 	u32 __reserved[4];
109 	struct buffer_desc *next;
110 	enum dma_data_direction dir;
111 };
112 
113 struct crypt_ctl {
114 #ifdef __ARMEB__
115 	u8 mode;		/* NPE_OP_*  operation mode */
116 	u8 init_len;
117 	u16 reserved;
118 #else
119 	u16 reserved;
120 	u8 init_len;
121 	u8 mode;		/* NPE_OP_*  operation mode */
122 #endif
123 	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
124 	u32 icv_rev_aes;	/* icv or rev aes */
125 	u32 src_buf;
126 	u32 dst_buf;
127 #ifdef __ARMEB__
128 	u16 auth_offs;		/* Authentication start offset */
129 	u16 auth_len;		/* Authentication data length */
130 	u16 crypt_offs;		/* Cryption start offset */
131 	u16 crypt_len;		/* Cryption data length */
132 #else
133 	u16 auth_len;		/* Authentication data length */
134 	u16 auth_offs;		/* Authentication start offset */
135 	u16 crypt_len;		/* Cryption data length */
136 	u16 crypt_offs;		/* Cryption start offset */
137 #endif
138 	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
139 	u32 crypto_ctx;		/* NPE Crypto Param structure address */
140 
141 	/* Used by Host: 4*4 bytes*/
142 	unsigned ctl_flags;
143 	union {
144 		struct ablkcipher_request *ablk_req;
145 		struct aead_request *aead_req;
146 		struct crypto_tfm *tfm;
147 	} data;
148 	struct buffer_desc *regist_buf;
149 	u8 *regist_ptr;
150 };
151 
152 struct ablk_ctx {
153 	struct buffer_desc *src;
154 	struct buffer_desc *dst;
155 };
156 
157 struct aead_ctx {
158 	struct buffer_desc *buffer;
159 	struct scatterlist ivlist;
160 	/* used when the hmac is not on one sg entry */
161 	u8 *hmac_virt;
162 	int encrypt;
163 };
164 
165 struct ix_hash_algo {
166 	u32 cfgword;
167 	unsigned char *icv;
168 };
169 
170 struct ix_sa_dir {
171 	unsigned char *npe_ctx;
172 	dma_addr_t npe_ctx_phys;
173 	int npe_ctx_idx;
174 	u8 npe_mode;
175 };
176 
177 struct ixp_ctx {
178 	struct ix_sa_dir encrypt;
179 	struct ix_sa_dir decrypt;
180 	int authkey_len;
181 	u8 authkey[MAX_KEYLEN];
182 	int enckey_len;
183 	u8 enckey[MAX_KEYLEN];
184 	u8 salt[MAX_IVLEN];
185 	u8 nonce[CTR_RFC3686_NONCE_SIZE];
186 	unsigned salted;
187 	atomic_t configuring;
188 	struct completion completion;
189 };
190 
191 struct ixp_alg {
192 	struct crypto_alg crypto;
193 	const struct ix_hash_algo *hash;
194 	u32 cfg_enc;
195 	u32 cfg_dec;
196 
197 	int registered;
198 };
199 
200 static const struct ix_hash_algo hash_alg_md5 = {
201 	.cfgword	= 0xAA010004,
202 	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
203 			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
204 };
205 static const struct ix_hash_algo hash_alg_sha1 = {
206 	.cfgword	= 0x00000005,
207 	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
208 			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
209 };
210 
211 static struct npe *npe_c;
212 static struct dma_pool *buffer_pool = NULL;
213 static struct dma_pool *ctx_pool = NULL;
214 
215 static struct crypt_ctl *crypt_virt = NULL;
216 static dma_addr_t crypt_phys;
217 
218 static int support_aes = 1;
219 
dev_release(struct device * dev)220 static void dev_release(struct device *dev)
221 {
222 	return;
223 }
224 
225 #define DRIVER_NAME "ixp4xx_crypto"
226 static struct platform_device pseudo_dev = {
227 	.name = DRIVER_NAME,
228 	.id   = 0,
229 	.num_resources = 0,
230 	.dev  = {
231 		.coherent_dma_mask = DMA_BIT_MASK(32),
232 		.release = dev_release,
233 	}
234 };
235 
236 static struct device *dev = &pseudo_dev.dev;
237 
crypt_virt2phys(struct crypt_ctl * virt)238 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
239 {
240 	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
241 }
242 
crypt_phys2virt(dma_addr_t phys)243 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
244 {
245 	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
246 }
247 
cipher_cfg_enc(struct crypto_tfm * tfm)248 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
249 {
250 	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
251 }
252 
cipher_cfg_dec(struct crypto_tfm * tfm)253 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
254 {
255 	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
256 }
257 
ix_hash(struct crypto_tfm * tfm)258 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
259 {
260 	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
261 }
262 
setup_crypt_desc(void)263 static int setup_crypt_desc(void)
264 {
265 	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
266 	crypt_virt = dma_alloc_coherent(dev,
267 			NPE_QLEN * sizeof(struct crypt_ctl),
268 			&crypt_phys, GFP_KERNEL);
269 	if (!crypt_virt)
270 		return -ENOMEM;
271 	memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
272 	return 0;
273 }
274 
275 static spinlock_t desc_lock;
get_crypt_desc(void)276 static struct crypt_ctl *get_crypt_desc(void)
277 {
278 	int i;
279 	static int idx = 0;
280 	unsigned long flags;
281 
282 	spin_lock_irqsave(&desc_lock, flags);
283 
284 	if (unlikely(!crypt_virt))
285 		setup_crypt_desc();
286 	if (unlikely(!crypt_virt)) {
287 		spin_unlock_irqrestore(&desc_lock, flags);
288 		return NULL;
289 	}
290 	i = idx;
291 	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
292 		if (++idx >= NPE_QLEN)
293 			idx = 0;
294 		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
295 		spin_unlock_irqrestore(&desc_lock, flags);
296 		return crypt_virt +i;
297 	} else {
298 		spin_unlock_irqrestore(&desc_lock, flags);
299 		return NULL;
300 	}
301 }
302 
303 static spinlock_t emerg_lock;
get_crypt_desc_emerg(void)304 static struct crypt_ctl *get_crypt_desc_emerg(void)
305 {
306 	int i;
307 	static int idx = NPE_QLEN;
308 	struct crypt_ctl *desc;
309 	unsigned long flags;
310 
311 	desc = get_crypt_desc();
312 	if (desc)
313 		return desc;
314 	if (unlikely(!crypt_virt))
315 		return NULL;
316 
317 	spin_lock_irqsave(&emerg_lock, flags);
318 	i = idx;
319 	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
320 		if (++idx >= NPE_QLEN_TOTAL)
321 			idx = NPE_QLEN;
322 		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
323 		spin_unlock_irqrestore(&emerg_lock, flags);
324 		return crypt_virt +i;
325 	} else {
326 		spin_unlock_irqrestore(&emerg_lock, flags);
327 		return NULL;
328 	}
329 }
330 
free_buf_chain(struct device * dev,struct buffer_desc * buf,u32 phys)331 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
332 {
333 	while (buf) {
334 		struct buffer_desc *buf1;
335 		u32 phys1;
336 
337 		buf1 = buf->next;
338 		phys1 = buf->phys_next;
339 		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
340 		dma_pool_free(buffer_pool, buf, phys);
341 		buf = buf1;
342 		phys = phys1;
343 	}
344 }
345 
346 static struct tasklet_struct crypto_done_tasklet;
347 
finish_scattered_hmac(struct crypt_ctl * crypt)348 static void finish_scattered_hmac(struct crypt_ctl *crypt)
349 {
350 	struct aead_request *req = crypt->data.aead_req;
351 	struct aead_ctx *req_ctx = aead_request_ctx(req);
352 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
353 	int authsize = crypto_aead_authsize(tfm);
354 	int decryptlen = req->cryptlen - authsize;
355 
356 	if (req_ctx->encrypt) {
357 		scatterwalk_map_and_copy(req_ctx->hmac_virt,
358 			req->src, decryptlen, authsize, 1);
359 	}
360 	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
361 }
362 
one_packet(dma_addr_t phys)363 static void one_packet(dma_addr_t phys)
364 {
365 	struct crypt_ctl *crypt;
366 	struct ixp_ctx *ctx;
367 	int failed;
368 
369 	failed = phys & 0x1 ? -EBADMSG : 0;
370 	phys &= ~0x3;
371 	crypt = crypt_phys2virt(phys);
372 
373 	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
374 	case CTL_FLAG_PERFORM_AEAD: {
375 		struct aead_request *req = crypt->data.aead_req;
376 		struct aead_ctx *req_ctx = aead_request_ctx(req);
377 
378 		free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
379 		if (req_ctx->hmac_virt) {
380 			finish_scattered_hmac(crypt);
381 		}
382 		req->base.complete(&req->base, failed);
383 		break;
384 	}
385 	case CTL_FLAG_PERFORM_ABLK: {
386 		struct ablkcipher_request *req = crypt->data.ablk_req;
387 		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
388 
389 		if (req_ctx->dst) {
390 			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
391 		}
392 		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
393 		req->base.complete(&req->base, failed);
394 		break;
395 	}
396 	case CTL_FLAG_GEN_ICV:
397 		ctx = crypto_tfm_ctx(crypt->data.tfm);
398 		dma_pool_free(ctx_pool, crypt->regist_ptr,
399 				crypt->regist_buf->phys_addr);
400 		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
401 		if (atomic_dec_and_test(&ctx->configuring))
402 			complete(&ctx->completion);
403 		break;
404 	case CTL_FLAG_GEN_REVAES:
405 		ctx = crypto_tfm_ctx(crypt->data.tfm);
406 		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
407 		if (atomic_dec_and_test(&ctx->configuring))
408 			complete(&ctx->completion);
409 		break;
410 	default:
411 		BUG();
412 	}
413 	crypt->ctl_flags = CTL_FLAG_UNUSED;
414 }
415 
irqhandler(void * _unused)416 static void irqhandler(void *_unused)
417 {
418 	tasklet_schedule(&crypto_done_tasklet);
419 }
420 
crypto_done_action(unsigned long arg)421 static void crypto_done_action(unsigned long arg)
422 {
423 	int i;
424 
425 	for(i=0; i<4; i++) {
426 		dma_addr_t phys = qmgr_get_entry(RECV_QID);
427 		if (!phys)
428 			return;
429 		one_packet(phys);
430 	}
431 	tasklet_schedule(&crypto_done_tasklet);
432 }
433 
init_ixp_crypto(void)434 static int init_ixp_crypto(void)
435 {
436 	int ret = -ENODEV;
437 	u32 msg[2] = { 0, 0 };
438 
439 	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
440 				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
441 		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
442 		return ret;
443 	}
444 	npe_c = npe_request(NPE_ID);
445 	if (!npe_c)
446 		return ret;
447 
448 	if (!npe_running(npe_c)) {
449 		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
450 		if (ret) {
451 			return ret;
452 		}
453 		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
454 			goto npe_error;
455 	} else {
456 		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
457 			goto npe_error;
458 
459 		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
460 			goto npe_error;
461 	}
462 
463 	switch ((msg[1]>>16) & 0xff) {
464 	case 3:
465 		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
466 				npe_name(npe_c));
467 		support_aes = 0;
468 		break;
469 	case 4:
470 	case 5:
471 		support_aes = 1;
472 		break;
473 	default:
474 		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
475 			npe_name(npe_c));
476 		return -ENODEV;
477 	}
478 	/* buffer_pool will also be used to sometimes store the hmac,
479 	 * so assure it is large enough
480 	 */
481 	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
482 	buffer_pool = dma_pool_create("buffer", dev,
483 			sizeof(struct buffer_desc), 32, 0);
484 	ret = -ENOMEM;
485 	if (!buffer_pool) {
486 		goto err;
487 	}
488 	ctx_pool = dma_pool_create("context", dev,
489 			NPE_CTX_LEN, 16, 0);
490 	if (!ctx_pool) {
491 		goto err;
492 	}
493 	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
494 				 "ixp_crypto:out", NULL);
495 	if (ret)
496 		goto err;
497 	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
498 				 "ixp_crypto:in", NULL);
499 	if (ret) {
500 		qmgr_release_queue(SEND_QID);
501 		goto err;
502 	}
503 	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
504 	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
505 
506 	qmgr_enable_irq(RECV_QID);
507 	return 0;
508 
509 npe_error:
510 	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
511 	ret = -EIO;
512 err:
513 	if (ctx_pool)
514 		dma_pool_destroy(ctx_pool);
515 	if (buffer_pool)
516 		dma_pool_destroy(buffer_pool);
517 	npe_release(npe_c);
518 	return ret;
519 }
520 
release_ixp_crypto(void)521 static void release_ixp_crypto(void)
522 {
523 	qmgr_disable_irq(RECV_QID);
524 	tasklet_kill(&crypto_done_tasklet);
525 
526 	qmgr_release_queue(SEND_QID);
527 	qmgr_release_queue(RECV_QID);
528 
529 	dma_pool_destroy(ctx_pool);
530 	dma_pool_destroy(buffer_pool);
531 
532 	npe_release(npe_c);
533 
534 	if (crypt_virt) {
535 		dma_free_coherent(dev,
536 			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
537 			crypt_virt, crypt_phys);
538 	}
539 	return;
540 }
541 
reset_sa_dir(struct ix_sa_dir * dir)542 static void reset_sa_dir(struct ix_sa_dir *dir)
543 {
544 	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
545 	dir->npe_ctx_idx = 0;
546 	dir->npe_mode = 0;
547 }
548 
init_sa_dir(struct ix_sa_dir * dir)549 static int init_sa_dir(struct ix_sa_dir *dir)
550 {
551 	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
552 	if (!dir->npe_ctx) {
553 		return -ENOMEM;
554 	}
555 	reset_sa_dir(dir);
556 	return 0;
557 }
558 
free_sa_dir(struct ix_sa_dir * dir)559 static void free_sa_dir(struct ix_sa_dir *dir)
560 {
561 	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
562 	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
563 }
564 
init_tfm(struct crypto_tfm * tfm)565 static int init_tfm(struct crypto_tfm *tfm)
566 {
567 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
568 	int ret;
569 
570 	atomic_set(&ctx->configuring, 0);
571 	ret = init_sa_dir(&ctx->encrypt);
572 	if (ret)
573 		return ret;
574 	ret = init_sa_dir(&ctx->decrypt);
575 	if (ret) {
576 		free_sa_dir(&ctx->encrypt);
577 	}
578 	return ret;
579 }
580 
init_tfm_ablk(struct crypto_tfm * tfm)581 static int init_tfm_ablk(struct crypto_tfm *tfm)
582 {
583 	tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
584 	return init_tfm(tfm);
585 }
586 
init_tfm_aead(struct crypto_tfm * tfm)587 static int init_tfm_aead(struct crypto_tfm *tfm)
588 {
589 	tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
590 	return init_tfm(tfm);
591 }
592 
exit_tfm(struct crypto_tfm * tfm)593 static void exit_tfm(struct crypto_tfm *tfm)
594 {
595 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
596 	free_sa_dir(&ctx->encrypt);
597 	free_sa_dir(&ctx->decrypt);
598 }
599 
register_chain_var(struct crypto_tfm * tfm,u8 xpad,u32 target,int init_len,u32 ctx_addr,const u8 * key,int key_len)600 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
601 		int init_len, u32 ctx_addr, const u8 *key, int key_len)
602 {
603 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
604 	struct crypt_ctl *crypt;
605 	struct buffer_desc *buf;
606 	int i;
607 	u8 *pad;
608 	u32 pad_phys, buf_phys;
609 
610 	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
611 	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
612 	if (!pad)
613 		return -ENOMEM;
614 	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
615 	if (!buf) {
616 		dma_pool_free(ctx_pool, pad, pad_phys);
617 		return -ENOMEM;
618 	}
619 	crypt = get_crypt_desc_emerg();
620 	if (!crypt) {
621 		dma_pool_free(ctx_pool, pad, pad_phys);
622 		dma_pool_free(buffer_pool, buf, buf_phys);
623 		return -EAGAIN;
624 	}
625 
626 	memcpy(pad, key, key_len);
627 	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
628 	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
629 		pad[i] ^= xpad;
630 	}
631 
632 	crypt->data.tfm = tfm;
633 	crypt->regist_ptr = pad;
634 	crypt->regist_buf = buf;
635 
636 	crypt->auth_offs = 0;
637 	crypt->auth_len = HMAC_PAD_BLOCKLEN;
638 	crypt->crypto_ctx = ctx_addr;
639 	crypt->src_buf = buf_phys;
640 	crypt->icv_rev_aes = target;
641 	crypt->mode = NPE_OP_HASH_GEN_ICV;
642 	crypt->init_len = init_len;
643 	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
644 
645 	buf->next = 0;
646 	buf->buf_len = HMAC_PAD_BLOCKLEN;
647 	buf->pkt_len = 0;
648 	buf->phys_addr = pad_phys;
649 
650 	atomic_inc(&ctx->configuring);
651 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
652 	BUG_ON(qmgr_stat_overflow(SEND_QID));
653 	return 0;
654 }
655 
setup_auth(struct crypto_tfm * tfm,int encrypt,unsigned authsize,const u8 * key,int key_len,unsigned digest_len)656 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
657 		const u8 *key, int key_len, unsigned digest_len)
658 {
659 	u32 itarget, otarget, npe_ctx_addr;
660 	unsigned char *cinfo;
661 	int init_len, ret = 0;
662 	u32 cfgword;
663 	struct ix_sa_dir *dir;
664 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
665 	const struct ix_hash_algo *algo;
666 
667 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
668 	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
669 	algo = ix_hash(tfm);
670 
671 	/* write cfg word to cryptinfo */
672 	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
673 #ifndef __ARMEB__
674 	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
675 #endif
676 	*(u32*)cinfo = cpu_to_be32(cfgword);
677 	cinfo += sizeof(cfgword);
678 
679 	/* write ICV to cryptinfo */
680 	memcpy(cinfo, algo->icv, digest_len);
681 	cinfo += digest_len;
682 
683 	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
684 				+ sizeof(algo->cfgword);
685 	otarget = itarget + digest_len;
686 	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
687 	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
688 
689 	dir->npe_ctx_idx += init_len;
690 	dir->npe_mode |= NPE_OP_HASH_ENABLE;
691 
692 	if (!encrypt)
693 		dir->npe_mode |= NPE_OP_HASH_VERIFY;
694 
695 	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
696 			init_len, npe_ctx_addr, key, key_len);
697 	if (ret)
698 		return ret;
699 	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
700 			init_len, npe_ctx_addr, key, key_len);
701 }
702 
gen_rev_aes_key(struct crypto_tfm * tfm)703 static int gen_rev_aes_key(struct crypto_tfm *tfm)
704 {
705 	struct crypt_ctl *crypt;
706 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
707 	struct ix_sa_dir *dir = &ctx->decrypt;
708 
709 	crypt = get_crypt_desc_emerg();
710 	if (!crypt) {
711 		return -EAGAIN;
712 	}
713 	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
714 
715 	crypt->data.tfm = tfm;
716 	crypt->crypt_offs = 0;
717 	crypt->crypt_len = AES_BLOCK128;
718 	crypt->src_buf = 0;
719 	crypt->crypto_ctx = dir->npe_ctx_phys;
720 	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
721 	crypt->mode = NPE_OP_ENC_GEN_KEY;
722 	crypt->init_len = dir->npe_ctx_idx;
723 	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
724 
725 	atomic_inc(&ctx->configuring);
726 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
727 	BUG_ON(qmgr_stat_overflow(SEND_QID));
728 	return 0;
729 }
730 
setup_cipher(struct crypto_tfm * tfm,int encrypt,const u8 * key,int key_len)731 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
732 		const u8 *key, int key_len)
733 {
734 	u8 *cinfo;
735 	u32 cipher_cfg;
736 	u32 keylen_cfg = 0;
737 	struct ix_sa_dir *dir;
738 	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
739 	u32 *flags = &tfm->crt_flags;
740 
741 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
742 	cinfo = dir->npe_ctx;
743 
744 	if (encrypt) {
745 		cipher_cfg = cipher_cfg_enc(tfm);
746 		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
747 	} else {
748 		cipher_cfg = cipher_cfg_dec(tfm);
749 	}
750 	if (cipher_cfg & MOD_AES) {
751 		switch (key_len) {
752 			case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
753 			case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
754 			case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
755 			default:
756 				*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
757 				return -EINVAL;
758 		}
759 		cipher_cfg |= keylen_cfg;
760 	} else if (cipher_cfg & MOD_3DES) {
761 		const u32 *K = (const u32 *)key;
762 		if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
763 			     !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
764 		{
765 			*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
766 			return -EINVAL;
767 		}
768 	} else {
769 		u32 tmp[DES_EXPKEY_WORDS];
770 		if (des_ekey(tmp, key) == 0) {
771 			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
772 		}
773 	}
774 	/* write cfg word to cryptinfo */
775 	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
776 	cinfo += sizeof(cipher_cfg);
777 
778 	/* write cipher key to cryptinfo */
779 	memcpy(cinfo, key, key_len);
780 	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
781 	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
782 		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
783 		key_len = DES3_EDE_KEY_SIZE;
784 	}
785 	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
786 	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
787 	if ((cipher_cfg & MOD_AES) && !encrypt) {
788 		return gen_rev_aes_key(tfm);
789 	}
790 	return 0;
791 }
792 
chainup_buffers(struct device * dev,struct scatterlist * sg,unsigned nbytes,struct buffer_desc * buf,gfp_t flags,enum dma_data_direction dir)793 static struct buffer_desc *chainup_buffers(struct device *dev,
794 		struct scatterlist *sg,	unsigned nbytes,
795 		struct buffer_desc *buf, gfp_t flags,
796 		enum dma_data_direction dir)
797 {
798 	for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
799 		unsigned len = min(nbytes, sg->length);
800 		struct buffer_desc *next_buf;
801 		u32 next_buf_phys;
802 		void *ptr;
803 
804 		nbytes -= len;
805 		ptr = page_address(sg_page(sg)) + sg->offset;
806 		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
807 		if (!next_buf) {
808 			buf = NULL;
809 			break;
810 		}
811 		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
812 		buf->next = next_buf;
813 		buf->phys_next = next_buf_phys;
814 		buf = next_buf;
815 
816 		buf->phys_addr = sg_dma_address(sg);
817 		buf->buf_len = len;
818 		buf->dir = dir;
819 	}
820 	buf->next = NULL;
821 	buf->phys_next = 0;
822 	return buf;
823 }
824 
ablk_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int key_len)825 static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
826 			unsigned int key_len)
827 {
828 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
829 	u32 *flags = &tfm->base.crt_flags;
830 	int ret;
831 
832 	init_completion(&ctx->completion);
833 	atomic_inc(&ctx->configuring);
834 
835 	reset_sa_dir(&ctx->encrypt);
836 	reset_sa_dir(&ctx->decrypt);
837 
838 	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
839 	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
840 
841 	ret = setup_cipher(&tfm->base, 0, key, key_len);
842 	if (ret)
843 		goto out;
844 	ret = setup_cipher(&tfm->base, 1, key, key_len);
845 	if (ret)
846 		goto out;
847 
848 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
849 		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
850 			ret = -EINVAL;
851 		} else {
852 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
853 		}
854 	}
855 out:
856 	if (!atomic_dec_and_test(&ctx->configuring))
857 		wait_for_completion(&ctx->completion);
858 	return ret;
859 }
860 
ablk_rfc3686_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int key_len)861 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
862 		unsigned int key_len)
863 {
864 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
865 
866 	/* the nonce is stored in bytes at end of key */
867 	if (key_len < CTR_RFC3686_NONCE_SIZE)
868 		return -EINVAL;
869 
870 	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
871 			CTR_RFC3686_NONCE_SIZE);
872 
873 	key_len -= CTR_RFC3686_NONCE_SIZE;
874 	return ablk_setkey(tfm, key, key_len);
875 }
876 
ablk_perform(struct ablkcipher_request * req,int encrypt)877 static int ablk_perform(struct ablkcipher_request *req, int encrypt)
878 {
879 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
880 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
881 	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
882 	struct ix_sa_dir *dir;
883 	struct crypt_ctl *crypt;
884 	unsigned int nbytes = req->nbytes;
885 	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
886 	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
887 	struct buffer_desc src_hook;
888 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
889 				GFP_KERNEL : GFP_ATOMIC;
890 
891 	if (qmgr_stat_full(SEND_QID))
892 		return -EAGAIN;
893 	if (atomic_read(&ctx->configuring))
894 		return -EAGAIN;
895 
896 	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
897 
898 	crypt = get_crypt_desc();
899 	if (!crypt)
900 		return -ENOMEM;
901 
902 	crypt->data.ablk_req = req;
903 	crypt->crypto_ctx = dir->npe_ctx_phys;
904 	crypt->mode = dir->npe_mode;
905 	crypt->init_len = dir->npe_ctx_idx;
906 
907 	crypt->crypt_offs = 0;
908 	crypt->crypt_len = nbytes;
909 
910 	BUG_ON(ivsize && !req->info);
911 	memcpy(crypt->iv, req->info, ivsize);
912 	if (req->src != req->dst) {
913 		struct buffer_desc dst_hook;
914 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
915 		/* This was never tested by Intel
916 		 * for more than one dst buffer, I think. */
917 		BUG_ON(req->dst->length < nbytes);
918 		req_ctx->dst = NULL;
919 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
920 					flags, DMA_FROM_DEVICE))
921 			goto free_buf_dest;
922 		src_direction = DMA_TO_DEVICE;
923 		req_ctx->dst = dst_hook.next;
924 		crypt->dst_buf = dst_hook.phys_next;
925 	} else {
926 		req_ctx->dst = NULL;
927 	}
928 	req_ctx->src = NULL;
929 	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
930 				flags, src_direction))
931 		goto free_buf_src;
932 
933 	req_ctx->src = src_hook.next;
934 	crypt->src_buf = src_hook.phys_next;
935 	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
936 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
937 	BUG_ON(qmgr_stat_overflow(SEND_QID));
938 	return -EINPROGRESS;
939 
940 free_buf_src:
941 	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
942 free_buf_dest:
943 	if (req->src != req->dst) {
944 		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
945 	}
946 	crypt->ctl_flags = CTL_FLAG_UNUSED;
947 	return -ENOMEM;
948 }
949 
ablk_encrypt(struct ablkcipher_request * req)950 static int ablk_encrypt(struct ablkcipher_request *req)
951 {
952 	return ablk_perform(req, 1);
953 }
954 
ablk_decrypt(struct ablkcipher_request * req)955 static int ablk_decrypt(struct ablkcipher_request *req)
956 {
957 	return ablk_perform(req, 0);
958 }
959 
ablk_rfc3686_crypt(struct ablkcipher_request * req)960 static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
961 {
962 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
963 	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
964 	u8 iv[CTR_RFC3686_BLOCK_SIZE];
965 	u8 *info = req->info;
966 	int ret;
967 
968 	/* set up counter block */
969         memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
970 	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
971 
972 	/* initialize counter portion of counter block */
973 	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
974 		cpu_to_be32(1);
975 
976 	req->info = iv;
977 	ret = ablk_perform(req, 1);
978 	req->info = info;
979 	return ret;
980 }
981 
hmac_inconsistent(struct scatterlist * sg,unsigned start,unsigned int nbytes)982 static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
983 		unsigned int nbytes)
984 {
985 	int offset = 0;
986 
987 	if (!nbytes)
988 		return 0;
989 
990 	for (;;) {
991 		if (start < offset + sg->length)
992 			break;
993 
994 		offset += sg->length;
995 		sg = scatterwalk_sg_next(sg);
996 	}
997 	return (start + nbytes > offset + sg->length);
998 }
999 
aead_perform(struct aead_request * req,int encrypt,int cryptoffset,int eff_cryptlen,u8 * iv)1000 static int aead_perform(struct aead_request *req, int encrypt,
1001 		int cryptoffset, int eff_cryptlen, u8 *iv)
1002 {
1003 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1004 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1005 	unsigned ivsize = crypto_aead_ivsize(tfm);
1006 	unsigned authsize = crypto_aead_authsize(tfm);
1007 	struct ix_sa_dir *dir;
1008 	struct crypt_ctl *crypt;
1009 	unsigned int cryptlen;
1010 	struct buffer_desc *buf, src_hook;
1011 	struct aead_ctx *req_ctx = aead_request_ctx(req);
1012 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1013 				GFP_KERNEL : GFP_ATOMIC;
1014 
1015 	if (qmgr_stat_full(SEND_QID))
1016 		return -EAGAIN;
1017 	if (atomic_read(&ctx->configuring))
1018 		return -EAGAIN;
1019 
1020 	if (encrypt) {
1021 		dir = &ctx->encrypt;
1022 		cryptlen = req->cryptlen;
1023 	} else {
1024 		dir = &ctx->decrypt;
1025 		/* req->cryptlen includes the authsize when decrypting */
1026 		cryptlen = req->cryptlen -authsize;
1027 		eff_cryptlen -= authsize;
1028 	}
1029 	crypt = get_crypt_desc();
1030 	if (!crypt)
1031 		return -ENOMEM;
1032 
1033 	crypt->data.aead_req = req;
1034 	crypt->crypto_ctx = dir->npe_ctx_phys;
1035 	crypt->mode = dir->npe_mode;
1036 	crypt->init_len = dir->npe_ctx_idx;
1037 
1038 	crypt->crypt_offs = cryptoffset;
1039 	crypt->crypt_len = eff_cryptlen;
1040 
1041 	crypt->auth_offs = 0;
1042 	crypt->auth_len = req->assoclen + ivsize + cryptlen;
1043 	BUG_ON(ivsize && !req->iv);
1044 	memcpy(crypt->iv, req->iv, ivsize);
1045 
1046 	if (req->src != req->dst) {
1047 		BUG(); /* -ENOTSUP because of my laziness */
1048 	}
1049 
1050 	/* ASSOC data */
1051 	buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1052 		flags, DMA_TO_DEVICE);
1053 	req_ctx->buffer = src_hook.next;
1054 	crypt->src_buf = src_hook.phys_next;
1055 	if (!buf)
1056 		goto out;
1057 	/* IV */
1058 	sg_init_table(&req_ctx->ivlist, 1);
1059 	sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1060 	buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1061 			DMA_BIDIRECTIONAL);
1062 	if (!buf)
1063 		goto free_chain;
1064 	if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1065 		/* The 12 hmac bytes are scattered,
1066 		 * we need to copy them into a safe buffer */
1067 		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1068 				&crypt->icv_rev_aes);
1069 		if (unlikely(!req_ctx->hmac_virt))
1070 			goto free_chain;
1071 		if (!encrypt) {
1072 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1073 				req->src, cryptlen, authsize, 0);
1074 		}
1075 		req_ctx->encrypt = encrypt;
1076 	} else {
1077 		req_ctx->hmac_virt = NULL;
1078 	}
1079 	/* Crypt */
1080 	buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1081 			DMA_BIDIRECTIONAL);
1082 	if (!buf)
1083 		goto free_hmac_virt;
1084 	if (!req_ctx->hmac_virt) {
1085 		crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1086 	}
1087 
1088 	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1089 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1090 	BUG_ON(qmgr_stat_overflow(SEND_QID));
1091 	return -EINPROGRESS;
1092 free_hmac_virt:
1093 	if (req_ctx->hmac_virt) {
1094 		dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1095 				crypt->icv_rev_aes);
1096 	}
1097 free_chain:
1098 	free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1099 out:
1100 	crypt->ctl_flags = CTL_FLAG_UNUSED;
1101 	return -ENOMEM;
1102 }
1103 
aead_setup(struct crypto_aead * tfm,unsigned int authsize)1104 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1105 {
1106 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1107 	u32 *flags = &tfm->base.crt_flags;
1108 	unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1109 	int ret;
1110 
1111 	if (!ctx->enckey_len && !ctx->authkey_len)
1112 		return 0;
1113 	init_completion(&ctx->completion);
1114 	atomic_inc(&ctx->configuring);
1115 
1116 	reset_sa_dir(&ctx->encrypt);
1117 	reset_sa_dir(&ctx->decrypt);
1118 
1119 	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1120 	if (ret)
1121 		goto out;
1122 	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1123 	if (ret)
1124 		goto out;
1125 	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1126 			ctx->authkey_len, digest_len);
1127 	if (ret)
1128 		goto out;
1129 	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1130 			ctx->authkey_len, digest_len);
1131 	if (ret)
1132 		goto out;
1133 
1134 	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1135 		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1136 			ret = -EINVAL;
1137 			goto out;
1138 		} else {
1139 			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1140 		}
1141 	}
1142 out:
1143 	if (!atomic_dec_and_test(&ctx->configuring))
1144 		wait_for_completion(&ctx->completion);
1145 	return ret;
1146 }
1147 
aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1148 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1149 {
1150 	int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1151 
1152 	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1153 		return -EINVAL;
1154 	return aead_setup(tfm, authsize);
1155 }
1156 
aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1157 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1158 			unsigned int keylen)
1159 {
1160 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1161 	struct rtattr *rta = (struct rtattr *)key;
1162 	struct crypto_authenc_key_param *param;
1163 
1164 	if (!RTA_OK(rta, keylen))
1165 		goto badkey;
1166 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1167 		goto badkey;
1168 	if (RTA_PAYLOAD(rta) < sizeof(*param))
1169 		goto badkey;
1170 
1171 	param = RTA_DATA(rta);
1172 	ctx->enckey_len = be32_to_cpu(param->enckeylen);
1173 
1174 	key += RTA_ALIGN(rta->rta_len);
1175 	keylen -= RTA_ALIGN(rta->rta_len);
1176 
1177 	if (keylen < ctx->enckey_len)
1178 		goto badkey;
1179 
1180 	ctx->authkey_len = keylen - ctx->enckey_len;
1181 	memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1182 	memcpy(ctx->authkey, key, ctx->authkey_len);
1183 
1184 	return aead_setup(tfm, crypto_aead_authsize(tfm));
1185 badkey:
1186 	ctx->enckey_len = 0;
1187 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1188 	return -EINVAL;
1189 }
1190 
aead_encrypt(struct aead_request * req)1191 static int aead_encrypt(struct aead_request *req)
1192 {
1193 	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1194 	return aead_perform(req, 1, req->assoclen + ivsize,
1195 			req->cryptlen, req->iv);
1196 }
1197 
aead_decrypt(struct aead_request * req)1198 static int aead_decrypt(struct aead_request *req)
1199 {
1200 	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1201 	return aead_perform(req, 0, req->assoclen + ivsize,
1202 			req->cryptlen, req->iv);
1203 }
1204 
aead_givencrypt(struct aead_givcrypt_request * req)1205 static int aead_givencrypt(struct aead_givcrypt_request *req)
1206 {
1207 	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1208 	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1209 	unsigned len, ivsize = crypto_aead_ivsize(tfm);
1210 	__be64 seq;
1211 
1212 	/* copied from eseqiv.c */
1213 	if (!ctx->salted) {
1214 		get_random_bytes(ctx->salt, ivsize);
1215 		ctx->salted = 1;
1216 	}
1217 	memcpy(req->areq.iv, ctx->salt, ivsize);
1218 	len = ivsize;
1219 	if (ivsize > sizeof(u64)) {
1220 		memset(req->giv, 0, ivsize - sizeof(u64));
1221 		len = sizeof(u64);
1222 	}
1223 	seq = cpu_to_be64(req->seq);
1224 	memcpy(req->giv + ivsize - len, &seq, len);
1225 	return aead_perform(&req->areq, 1, req->areq.assoclen,
1226 			req->areq.cryptlen +ivsize, req->giv);
1227 }
1228 
1229 static struct ixp_alg ixp4xx_algos[] = {
1230 {
1231 	.crypto	= {
1232 		.cra_name	= "cbc(des)",
1233 		.cra_blocksize	= DES_BLOCK_SIZE,
1234 		.cra_u		= { .ablkcipher = {
1235 			.min_keysize	= DES_KEY_SIZE,
1236 			.max_keysize	= DES_KEY_SIZE,
1237 			.ivsize		= DES_BLOCK_SIZE,
1238 			.geniv		= "eseqiv",
1239 			}
1240 		}
1241 	},
1242 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1243 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1244 
1245 }, {
1246 	.crypto	= {
1247 		.cra_name	= "ecb(des)",
1248 		.cra_blocksize	= DES_BLOCK_SIZE,
1249 		.cra_u		= { .ablkcipher = {
1250 			.min_keysize	= DES_KEY_SIZE,
1251 			.max_keysize	= DES_KEY_SIZE,
1252 			}
1253 		}
1254 	},
1255 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1256 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1257 }, {
1258 	.crypto	= {
1259 		.cra_name	= "cbc(des3_ede)",
1260 		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1261 		.cra_u		= { .ablkcipher = {
1262 			.min_keysize	= DES3_EDE_KEY_SIZE,
1263 			.max_keysize	= DES3_EDE_KEY_SIZE,
1264 			.ivsize		= DES3_EDE_BLOCK_SIZE,
1265 			.geniv		= "eseqiv",
1266 			}
1267 		}
1268 	},
1269 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1270 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1271 }, {
1272 	.crypto	= {
1273 		.cra_name	= "ecb(des3_ede)",
1274 		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1275 		.cra_u		= { .ablkcipher = {
1276 			.min_keysize	= DES3_EDE_KEY_SIZE,
1277 			.max_keysize	= DES3_EDE_KEY_SIZE,
1278 			}
1279 		}
1280 	},
1281 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1282 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1283 }, {
1284 	.crypto	= {
1285 		.cra_name	= "cbc(aes)",
1286 		.cra_blocksize	= AES_BLOCK_SIZE,
1287 		.cra_u		= { .ablkcipher = {
1288 			.min_keysize	= AES_MIN_KEY_SIZE,
1289 			.max_keysize	= AES_MAX_KEY_SIZE,
1290 			.ivsize		= AES_BLOCK_SIZE,
1291 			.geniv		= "eseqiv",
1292 			}
1293 		}
1294 	},
1295 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1296 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1297 }, {
1298 	.crypto	= {
1299 		.cra_name	= "ecb(aes)",
1300 		.cra_blocksize	= AES_BLOCK_SIZE,
1301 		.cra_u		= { .ablkcipher = {
1302 			.min_keysize	= AES_MIN_KEY_SIZE,
1303 			.max_keysize	= AES_MAX_KEY_SIZE,
1304 			}
1305 		}
1306 	},
1307 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1308 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1309 }, {
1310 	.crypto	= {
1311 		.cra_name	= "ctr(aes)",
1312 		.cra_blocksize	= AES_BLOCK_SIZE,
1313 		.cra_u		= { .ablkcipher = {
1314 			.min_keysize	= AES_MIN_KEY_SIZE,
1315 			.max_keysize	= AES_MAX_KEY_SIZE,
1316 			.ivsize		= AES_BLOCK_SIZE,
1317 			.geniv		= "eseqiv",
1318 			}
1319 		}
1320 	},
1321 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1322 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1323 }, {
1324 	.crypto	= {
1325 		.cra_name	= "rfc3686(ctr(aes))",
1326 		.cra_blocksize	= AES_BLOCK_SIZE,
1327 		.cra_u		= { .ablkcipher = {
1328 			.min_keysize	= AES_MIN_KEY_SIZE,
1329 			.max_keysize	= AES_MAX_KEY_SIZE,
1330 			.ivsize		= AES_BLOCK_SIZE,
1331 			.geniv		= "eseqiv",
1332 			.setkey		= ablk_rfc3686_setkey,
1333 			.encrypt	= ablk_rfc3686_crypt,
1334 			.decrypt	= ablk_rfc3686_crypt }
1335 		}
1336 	},
1337 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1338 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1339 }, {
1340 	.crypto	= {
1341 		.cra_name	= "authenc(hmac(md5),cbc(des))",
1342 		.cra_blocksize	= DES_BLOCK_SIZE,
1343 		.cra_u		= { .aead = {
1344 			.ivsize		= DES_BLOCK_SIZE,
1345 			.maxauthsize	= MD5_DIGEST_SIZE,
1346 			}
1347 		}
1348 	},
1349 	.hash = &hash_alg_md5,
1350 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1351 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1352 }, {
1353 	.crypto	= {
1354 		.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1355 		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1356 		.cra_u		= { .aead = {
1357 			.ivsize		= DES3_EDE_BLOCK_SIZE,
1358 			.maxauthsize	= MD5_DIGEST_SIZE,
1359 			}
1360 		}
1361 	},
1362 	.hash = &hash_alg_md5,
1363 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1364 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1365 }, {
1366 	.crypto	= {
1367 		.cra_name	= "authenc(hmac(sha1),cbc(des))",
1368 		.cra_blocksize	= DES_BLOCK_SIZE,
1369 		.cra_u		= { .aead = {
1370 			.ivsize		= DES_BLOCK_SIZE,
1371 			.maxauthsize	= SHA1_DIGEST_SIZE,
1372 			}
1373 		}
1374 	},
1375 	.hash = &hash_alg_sha1,
1376 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1377 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1378 }, {
1379 	.crypto	= {
1380 		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1381 		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1382 		.cra_u		= { .aead = {
1383 			.ivsize		= DES3_EDE_BLOCK_SIZE,
1384 			.maxauthsize	= SHA1_DIGEST_SIZE,
1385 			}
1386 		}
1387 	},
1388 	.hash = &hash_alg_sha1,
1389 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1390 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1391 }, {
1392 	.crypto	= {
1393 		.cra_name	= "authenc(hmac(md5),cbc(aes))",
1394 		.cra_blocksize	= AES_BLOCK_SIZE,
1395 		.cra_u		= { .aead = {
1396 			.ivsize		= AES_BLOCK_SIZE,
1397 			.maxauthsize	= MD5_DIGEST_SIZE,
1398 			}
1399 		}
1400 	},
1401 	.hash = &hash_alg_md5,
1402 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1403 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1404 }, {
1405 	.crypto	= {
1406 		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1407 		.cra_blocksize	= AES_BLOCK_SIZE,
1408 		.cra_u		= { .aead = {
1409 			.ivsize		= AES_BLOCK_SIZE,
1410 			.maxauthsize	= SHA1_DIGEST_SIZE,
1411 			}
1412 		}
1413 	},
1414 	.hash = &hash_alg_sha1,
1415 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1416 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1417 } };
1418 
1419 #define IXP_POSTFIX "-ixp4xx"
ixp_module_init(void)1420 static int __init ixp_module_init(void)
1421 {
1422 	int num = ARRAY_SIZE(ixp4xx_algos);
1423 	int i,err ;
1424 
1425 	if (platform_device_register(&pseudo_dev))
1426 		return -ENODEV;
1427 
1428 	spin_lock_init(&desc_lock);
1429 	spin_lock_init(&emerg_lock);
1430 
1431 	err = init_ixp_crypto();
1432 	if (err) {
1433 		platform_device_unregister(&pseudo_dev);
1434 		return err;
1435 	}
1436 	for (i=0; i< num; i++) {
1437 		struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1438 
1439 		if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1440 			"%s"IXP_POSTFIX, cra->cra_name) >=
1441 			CRYPTO_MAX_ALG_NAME)
1442 		{
1443 			continue;
1444 		}
1445 		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1446 			continue;
1447 		}
1448 		if (!ixp4xx_algos[i].hash) {
1449 			/* block ciphers */
1450 			cra->cra_type = &crypto_ablkcipher_type;
1451 			cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1452 					 CRYPTO_ALG_ASYNC;
1453 			if (!cra->cra_ablkcipher.setkey)
1454 				cra->cra_ablkcipher.setkey = ablk_setkey;
1455 			if (!cra->cra_ablkcipher.encrypt)
1456 				cra->cra_ablkcipher.encrypt = ablk_encrypt;
1457 			if (!cra->cra_ablkcipher.decrypt)
1458 				cra->cra_ablkcipher.decrypt = ablk_decrypt;
1459 			cra->cra_init = init_tfm_ablk;
1460 		} else {
1461 			/* authenc */
1462 			cra->cra_type = &crypto_aead_type;
1463 			cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1464 					 CRYPTO_ALG_ASYNC;
1465 			cra->cra_aead.setkey = aead_setkey;
1466 			cra->cra_aead.setauthsize = aead_setauthsize;
1467 			cra->cra_aead.encrypt = aead_encrypt;
1468 			cra->cra_aead.decrypt = aead_decrypt;
1469 			cra->cra_aead.givencrypt = aead_givencrypt;
1470 			cra->cra_init = init_tfm_aead;
1471 		}
1472 		cra->cra_ctxsize = sizeof(struct ixp_ctx);
1473 		cra->cra_module = THIS_MODULE;
1474 		cra->cra_alignmask = 3;
1475 		cra->cra_priority = 300;
1476 		cra->cra_exit = exit_tfm;
1477 		if (crypto_register_alg(cra))
1478 			printk(KERN_ERR "Failed to register '%s'\n",
1479 				cra->cra_name);
1480 		else
1481 			ixp4xx_algos[i].registered = 1;
1482 	}
1483 	return 0;
1484 }
1485 
ixp_module_exit(void)1486 static void __exit ixp_module_exit(void)
1487 {
1488 	int num = ARRAY_SIZE(ixp4xx_algos);
1489 	int i;
1490 
1491 	for (i=0; i< num; i++) {
1492 		if (ixp4xx_algos[i].registered)
1493 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1494 	}
1495 	release_ixp_crypto();
1496 	platform_device_unregister(&pseudo_dev);
1497 }
1498 
1499 module_init(ixp_module_init);
1500 module_exit(ixp_module_exit);
1501 
1502 MODULE_LICENSE("GPL");
1503 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1504 MODULE_DESCRIPTION("IXP4xx hardware crypto");
1505 
1506