1 /*
2  * Support for Marvell's crypto engine which can be found on some Orion5X
3  * boards.
4  *
5  * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6  * License: GPLv2
7  *
8  */
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/kthread.h>
15 #include <linux/platform_device.h>
16 #include <linux/scatterlist.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <crypto/internal/hash.h>
20 #include <crypto/sha.h>
21 
22 #include "mv_cesa.h"
23 
24 #define MV_CESA	"MV-CESA:"
25 #define MAX_HW_HASH_SIZE	0xFFFF
26 
27 /*
28  * STM:
29  *   /---------------------------------------\
30  *   |					     | request complete
31  *  \./					     |
32  * IDLE -> new request -> BUSY -> done -> DEQUEUE
33  *                         /°\               |
34  *			    |		     | more scatter entries
35  *			    \________________/
36  */
37 enum engine_status {
38 	ENGINE_IDLE,
39 	ENGINE_BUSY,
40 	ENGINE_W_DEQUEUE,
41 };
42 
43 /**
44  * struct req_progress - used for every crypt request
45  * @src_sg_it:		sg iterator for src
46  * @dst_sg_it:		sg iterator for dst
47  * @sg_src_left:	bytes left in src to process (scatter list)
48  * @src_start:		offset to add to src start position (scatter list)
49  * @crypt_len:		length of current hw crypt/hash process
50  * @hw_nbytes:		total bytes to process in hw for this request
51  * @copy_back:		whether to copy data back (crypt) or not (hash)
52  * @sg_dst_left:	bytes left dst to process in this scatter list
53  * @dst_start:		offset to add to dst start position (scatter list)
54  * @hw_processed_bytes:	number of bytes processed by hw (request).
55  *
56  * sg helper are used to iterate over the scatterlist. Since the size of the
57  * SRAM may be less than the scatter size, this struct struct is used to keep
58  * track of progress within current scatterlist.
59  */
60 struct req_progress {
61 	struct sg_mapping_iter src_sg_it;
62 	struct sg_mapping_iter dst_sg_it;
63 	void (*complete) (void);
64 	void (*process) (int is_first);
65 
66 	/* src mostly */
67 	int sg_src_left;
68 	int src_start;
69 	int crypt_len;
70 	int hw_nbytes;
71 	/* dst mostly */
72 	int copy_back;
73 	int sg_dst_left;
74 	int dst_start;
75 	int hw_processed_bytes;
76 };
77 
78 struct crypto_priv {
79 	void __iomem *reg;
80 	void __iomem *sram;
81 	int irq;
82 	struct task_struct *queue_th;
83 
84 	/* the lock protects queue and eng_st */
85 	spinlock_t lock;
86 	struct crypto_queue queue;
87 	enum engine_status eng_st;
88 	struct crypto_async_request *cur_req;
89 	struct req_progress p;
90 	int max_req_size;
91 	int sram_size;
92 	int has_sha1;
93 	int has_hmac_sha1;
94 };
95 
96 static struct crypto_priv *cpg;
97 
98 struct mv_ctx {
99 	u8 aes_enc_key[AES_KEY_LEN];
100 	u32 aes_dec_key[8];
101 	int key_len;
102 	u32 need_calc_aes_dkey;
103 };
104 
105 enum crypto_op {
106 	COP_AES_ECB,
107 	COP_AES_CBC,
108 };
109 
110 struct mv_req_ctx {
111 	enum crypto_op op;
112 	int decrypt;
113 };
114 
115 enum hash_op {
116 	COP_SHA1,
117 	COP_HMAC_SHA1
118 };
119 
120 struct mv_tfm_hash_ctx {
121 	struct crypto_shash *fallback;
122 	struct crypto_shash *base_hash;
123 	u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
124 	int count_add;
125 	enum hash_op op;
126 };
127 
128 struct mv_req_hash_ctx {
129 	u64 count;
130 	u32 state[SHA1_DIGEST_SIZE / 4];
131 	u8 buffer[SHA1_BLOCK_SIZE];
132 	int first_hash;		/* marks that we don't have previous state */
133 	int last_chunk;		/* marks that this is the 'final' request */
134 	int extra_bytes;	/* unprocessed bytes in buffer */
135 	enum hash_op op;
136 	int count_add;
137 };
138 
compute_aes_dec_key(struct mv_ctx * ctx)139 static void compute_aes_dec_key(struct mv_ctx *ctx)
140 {
141 	struct crypto_aes_ctx gen_aes_key;
142 	int key_pos;
143 
144 	if (!ctx->need_calc_aes_dkey)
145 		return;
146 
147 	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
148 
149 	key_pos = ctx->key_len + 24;
150 	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
151 	switch (ctx->key_len) {
152 	case AES_KEYSIZE_256:
153 		key_pos -= 2;
154 		/* fall */
155 	case AES_KEYSIZE_192:
156 		key_pos -= 2;
157 		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
158 				4 * 4);
159 		break;
160 	}
161 	ctx->need_calc_aes_dkey = 0;
162 }
163 
mv_setkey_aes(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)164 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
165 		unsigned int len)
166 {
167 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
168 	struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
169 
170 	switch (len) {
171 	case AES_KEYSIZE_128:
172 	case AES_KEYSIZE_192:
173 	case AES_KEYSIZE_256:
174 		break;
175 	default:
176 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
177 		return -EINVAL;
178 	}
179 	ctx->key_len = len;
180 	ctx->need_calc_aes_dkey = 1;
181 
182 	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
183 	return 0;
184 }
185 
copy_src_to_buf(struct req_progress * p,char * dbuf,int len)186 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
187 {
188 	int ret;
189 	void *sbuf;
190 	int copy_len;
191 
192 	while (len) {
193 		if (!p->sg_src_left) {
194 			ret = sg_miter_next(&p->src_sg_it);
195 			BUG_ON(!ret);
196 			p->sg_src_left = p->src_sg_it.length;
197 			p->src_start = 0;
198 		}
199 
200 		sbuf = p->src_sg_it.addr + p->src_start;
201 
202 		copy_len = min(p->sg_src_left, len);
203 		memcpy(dbuf, sbuf, copy_len);
204 
205 		p->src_start += copy_len;
206 		p->sg_src_left -= copy_len;
207 
208 		len -= copy_len;
209 		dbuf += copy_len;
210 	}
211 }
212 
setup_data_in(void)213 static void setup_data_in(void)
214 {
215 	struct req_progress *p = &cpg->p;
216 	int data_in_sram =
217 	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
218 	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
219 			data_in_sram - p->crypt_len);
220 	p->crypt_len = data_in_sram;
221 }
222 
mv_process_current_q(int first_block)223 static void mv_process_current_q(int first_block)
224 {
225 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
226 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
227 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
228 	struct sec_accel_config op;
229 
230 	switch (req_ctx->op) {
231 	case COP_AES_ECB:
232 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
233 		break;
234 	case COP_AES_CBC:
235 	default:
236 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
237 		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
238 			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
239 		if (first_block)
240 			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
241 		break;
242 	}
243 	if (req_ctx->decrypt) {
244 		op.config |= CFG_DIR_DEC;
245 		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
246 				AES_KEY_LEN);
247 	} else {
248 		op.config |= CFG_DIR_ENC;
249 		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
250 				AES_KEY_LEN);
251 	}
252 
253 	switch (ctx->key_len) {
254 	case AES_KEYSIZE_128:
255 		op.config |= CFG_AES_LEN_128;
256 		break;
257 	case AES_KEYSIZE_192:
258 		op.config |= CFG_AES_LEN_192;
259 		break;
260 	case AES_KEYSIZE_256:
261 		op.config |= CFG_AES_LEN_256;
262 		break;
263 	}
264 	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
265 		ENC_P_DST(SRAM_DATA_OUT_START);
266 	op.enc_key_p = SRAM_DATA_KEY_P;
267 
268 	setup_data_in();
269 	op.enc_len = cpg->p.crypt_len;
270 	memcpy(cpg->sram + SRAM_CONFIG, &op,
271 			sizeof(struct sec_accel_config));
272 
273 	/* GO */
274 	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
275 
276 	/*
277 	 * XXX: add timer if the interrupt does not occur for some mystery
278 	 * reason
279 	 */
280 }
281 
mv_crypto_algo_completion(void)282 static void mv_crypto_algo_completion(void)
283 {
284 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
285 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
286 
287 	sg_miter_stop(&cpg->p.src_sg_it);
288 	sg_miter_stop(&cpg->p.dst_sg_it);
289 
290 	if (req_ctx->op != COP_AES_CBC)
291 		return ;
292 
293 	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
294 }
295 
mv_process_hash_current(int first_block)296 static void mv_process_hash_current(int first_block)
297 {
298 	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
299 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
300 	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
301 	struct req_progress *p = &cpg->p;
302 	struct sec_accel_config op = { 0 };
303 	int is_last;
304 
305 	switch (req_ctx->op) {
306 	case COP_SHA1:
307 	default:
308 		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
309 		break;
310 	case COP_HMAC_SHA1:
311 		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
312 		memcpy(cpg->sram + SRAM_HMAC_IV_IN,
313 				tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
314 		break;
315 	}
316 
317 	op.mac_src_p =
318 		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
319 		req_ctx->
320 		count);
321 
322 	setup_data_in();
323 
324 	op.mac_digest =
325 		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
326 	op.mac_iv =
327 		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
328 		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
329 
330 	is_last = req_ctx->last_chunk
331 		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
332 		&& (req_ctx->count <= MAX_HW_HASH_SIZE);
333 	if (req_ctx->first_hash) {
334 		if (is_last)
335 			op.config |= CFG_NOT_FRAG;
336 		else
337 			op.config |= CFG_FIRST_FRAG;
338 
339 		req_ctx->first_hash = 0;
340 	} else {
341 		if (is_last)
342 			op.config |= CFG_LAST_FRAG;
343 		else
344 			op.config |= CFG_MID_FRAG;
345 
346 		if (first_block) {
347 			writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
348 			writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
349 			writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
350 			writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
351 			writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
352 		}
353 	}
354 
355 	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
356 
357 	/* GO */
358 	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
359 
360 	/*
361 	* XXX: add timer if the interrupt does not occur for some mystery
362 	* reason
363 	*/
364 }
365 
mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx * ctx,struct shash_desc * desc)366 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
367 					  struct shash_desc *desc)
368 {
369 	int i;
370 	struct sha1_state shash_state;
371 
372 	shash_state.count = ctx->count + ctx->count_add;
373 	for (i = 0; i < 5; i++)
374 		shash_state.state[i] = ctx->state[i];
375 	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
376 	return crypto_shash_import(desc, &shash_state);
377 }
378 
mv_hash_final_fallback(struct ahash_request * req)379 static int mv_hash_final_fallback(struct ahash_request *req)
380 {
381 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
382 	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
383 	struct {
384 		struct shash_desc shash;
385 		char ctx[crypto_shash_descsize(tfm_ctx->fallback)];
386 	} desc;
387 	int rc;
388 
389 	desc.shash.tfm = tfm_ctx->fallback;
390 	desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
391 	if (unlikely(req_ctx->first_hash)) {
392 		crypto_shash_init(&desc.shash);
393 		crypto_shash_update(&desc.shash, req_ctx->buffer,
394 				    req_ctx->extra_bytes);
395 	} else {
396 		/* only SHA1 for now....
397 		 */
398 		rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash);
399 		if (rc)
400 			goto out;
401 	}
402 	rc = crypto_shash_final(&desc.shash, req->result);
403 out:
404 	return rc;
405 }
406 
mv_hash_algo_completion(void)407 static void mv_hash_algo_completion(void)
408 {
409 	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
410 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
411 
412 	if (ctx->extra_bytes)
413 		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
414 	sg_miter_stop(&cpg->p.src_sg_it);
415 
416 	if (likely(ctx->last_chunk)) {
417 		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
418 			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
419 			       crypto_ahash_digestsize(crypto_ahash_reqtfm
420 						       (req)));
421 		} else
422 			mv_hash_final_fallback(req);
423 	} else {
424 		ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
425 		ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
426 		ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
427 		ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
428 		ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
429 	}
430 }
431 
dequeue_complete_req(void)432 static void dequeue_complete_req(void)
433 {
434 	struct crypto_async_request *req = cpg->cur_req;
435 	void *buf;
436 	int ret;
437 	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
438 	if (cpg->p.copy_back) {
439 		int need_copy_len = cpg->p.crypt_len;
440 		int sram_offset = 0;
441 		do {
442 			int dst_copy;
443 
444 			if (!cpg->p.sg_dst_left) {
445 				ret = sg_miter_next(&cpg->p.dst_sg_it);
446 				BUG_ON(!ret);
447 				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
448 				cpg->p.dst_start = 0;
449 			}
450 
451 			buf = cpg->p.dst_sg_it.addr;
452 			buf += cpg->p.dst_start;
453 
454 			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
455 
456 			memcpy(buf,
457 			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
458 			       dst_copy);
459 			sram_offset += dst_copy;
460 			cpg->p.sg_dst_left -= dst_copy;
461 			need_copy_len -= dst_copy;
462 			cpg->p.dst_start += dst_copy;
463 		} while (need_copy_len > 0);
464 	}
465 
466 	cpg->p.crypt_len = 0;
467 
468 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
469 	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
470 		/* process next scatter list entry */
471 		cpg->eng_st = ENGINE_BUSY;
472 		cpg->p.process(0);
473 	} else {
474 		cpg->p.complete();
475 		cpg->eng_st = ENGINE_IDLE;
476 		local_bh_disable();
477 		req->complete(req, 0);
478 		local_bh_enable();
479 	}
480 }
481 
count_sgs(struct scatterlist * sl,unsigned int total_bytes)482 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
483 {
484 	int i = 0;
485 	size_t cur_len;
486 
487 	while (sl) {
488 		cur_len = sl[i].length;
489 		++i;
490 		if (total_bytes > cur_len)
491 			total_bytes -= cur_len;
492 		else
493 			break;
494 	}
495 
496 	return i;
497 }
498 
mv_start_new_crypt_req(struct ablkcipher_request * req)499 static void mv_start_new_crypt_req(struct ablkcipher_request *req)
500 {
501 	struct req_progress *p = &cpg->p;
502 	int num_sgs;
503 
504 	cpg->cur_req = &req->base;
505 	memset(p, 0, sizeof(struct req_progress));
506 	p->hw_nbytes = req->nbytes;
507 	p->complete = mv_crypto_algo_completion;
508 	p->process = mv_process_current_q;
509 	p->copy_back = 1;
510 
511 	num_sgs = count_sgs(req->src, req->nbytes);
512 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
513 
514 	num_sgs = count_sgs(req->dst, req->nbytes);
515 	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
516 
517 	mv_process_current_q(1);
518 }
519 
mv_start_new_hash_req(struct ahash_request * req)520 static void mv_start_new_hash_req(struct ahash_request *req)
521 {
522 	struct req_progress *p = &cpg->p;
523 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
524 	int num_sgs, hw_bytes, old_extra_bytes, rc;
525 	cpg->cur_req = &req->base;
526 	memset(p, 0, sizeof(struct req_progress));
527 	hw_bytes = req->nbytes + ctx->extra_bytes;
528 	old_extra_bytes = ctx->extra_bytes;
529 
530 	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
531 	if (ctx->extra_bytes != 0
532 	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
533 		hw_bytes -= ctx->extra_bytes;
534 	else
535 		ctx->extra_bytes = 0;
536 
537 	num_sgs = count_sgs(req->src, req->nbytes);
538 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
539 
540 	if (hw_bytes) {
541 		p->hw_nbytes = hw_bytes;
542 		p->complete = mv_hash_algo_completion;
543 		p->process = mv_process_hash_current;
544 
545 		if (unlikely(old_extra_bytes)) {
546 			memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
547 			       old_extra_bytes);
548 			p->crypt_len = old_extra_bytes;
549 		}
550 
551 		mv_process_hash_current(1);
552 	} else {
553 		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
554 				ctx->extra_bytes - old_extra_bytes);
555 		sg_miter_stop(&p->src_sg_it);
556 		if (ctx->last_chunk)
557 			rc = mv_hash_final_fallback(req);
558 		else
559 			rc = 0;
560 		cpg->eng_st = ENGINE_IDLE;
561 		local_bh_disable();
562 		req->base.complete(&req->base, rc);
563 		local_bh_enable();
564 	}
565 }
566 
queue_manag(void * data)567 static int queue_manag(void *data)
568 {
569 	cpg->eng_st = ENGINE_IDLE;
570 	do {
571 		struct crypto_async_request *async_req = NULL;
572 		struct crypto_async_request *backlog;
573 
574 		__set_current_state(TASK_INTERRUPTIBLE);
575 
576 		if (cpg->eng_st == ENGINE_W_DEQUEUE)
577 			dequeue_complete_req();
578 
579 		spin_lock_irq(&cpg->lock);
580 		if (cpg->eng_st == ENGINE_IDLE) {
581 			backlog = crypto_get_backlog(&cpg->queue);
582 			async_req = crypto_dequeue_request(&cpg->queue);
583 			if (async_req) {
584 				BUG_ON(cpg->eng_st != ENGINE_IDLE);
585 				cpg->eng_st = ENGINE_BUSY;
586 			}
587 		}
588 		spin_unlock_irq(&cpg->lock);
589 
590 		if (backlog) {
591 			backlog->complete(backlog, -EINPROGRESS);
592 			backlog = NULL;
593 		}
594 
595 		if (async_req) {
596 			if (async_req->tfm->__crt_alg->cra_type !=
597 			    &crypto_ahash_type) {
598 				struct ablkcipher_request *req =
599 				    ablkcipher_request_cast(async_req);
600 				mv_start_new_crypt_req(req);
601 			} else {
602 				struct ahash_request *req =
603 				    ahash_request_cast(async_req);
604 				mv_start_new_hash_req(req);
605 			}
606 			async_req = NULL;
607 		}
608 
609 		schedule();
610 
611 	} while (!kthread_should_stop());
612 	return 0;
613 }
614 
mv_handle_req(struct crypto_async_request * req)615 static int mv_handle_req(struct crypto_async_request *req)
616 {
617 	unsigned long flags;
618 	int ret;
619 
620 	spin_lock_irqsave(&cpg->lock, flags);
621 	ret = crypto_enqueue_request(&cpg->queue, req);
622 	spin_unlock_irqrestore(&cpg->lock, flags);
623 	wake_up_process(cpg->queue_th);
624 	return ret;
625 }
626 
mv_enc_aes_ecb(struct ablkcipher_request * req)627 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
628 {
629 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
630 
631 	req_ctx->op = COP_AES_ECB;
632 	req_ctx->decrypt = 0;
633 
634 	return mv_handle_req(&req->base);
635 }
636 
mv_dec_aes_ecb(struct ablkcipher_request * req)637 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
638 {
639 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
640 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
641 
642 	req_ctx->op = COP_AES_ECB;
643 	req_ctx->decrypt = 1;
644 
645 	compute_aes_dec_key(ctx);
646 	return mv_handle_req(&req->base);
647 }
648 
mv_enc_aes_cbc(struct ablkcipher_request * req)649 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
650 {
651 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
652 
653 	req_ctx->op = COP_AES_CBC;
654 	req_ctx->decrypt = 0;
655 
656 	return mv_handle_req(&req->base);
657 }
658 
mv_dec_aes_cbc(struct ablkcipher_request * req)659 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
660 {
661 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
662 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
663 
664 	req_ctx->op = COP_AES_CBC;
665 	req_ctx->decrypt = 1;
666 
667 	compute_aes_dec_key(ctx);
668 	return mv_handle_req(&req->base);
669 }
670 
mv_cra_init(struct crypto_tfm * tfm)671 static int mv_cra_init(struct crypto_tfm *tfm)
672 {
673 	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
674 	return 0;
675 }
676 
mv_init_hash_req_ctx(struct mv_req_hash_ctx * ctx,int op,int is_last,unsigned int req_len,int count_add)677 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
678 				 int is_last, unsigned int req_len,
679 				 int count_add)
680 {
681 	memset(ctx, 0, sizeof(*ctx));
682 	ctx->op = op;
683 	ctx->count = req_len;
684 	ctx->first_hash = 1;
685 	ctx->last_chunk = is_last;
686 	ctx->count_add = count_add;
687 }
688 
mv_update_hash_req_ctx(struct mv_req_hash_ctx * ctx,int is_last,unsigned req_len)689 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
690 				   unsigned req_len)
691 {
692 	ctx->last_chunk = is_last;
693 	ctx->count += req_len;
694 }
695 
mv_hash_init(struct ahash_request * req)696 static int mv_hash_init(struct ahash_request *req)
697 {
698 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
699 	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
700 			     tfm_ctx->count_add);
701 	return 0;
702 }
703 
mv_hash_update(struct ahash_request * req)704 static int mv_hash_update(struct ahash_request *req)
705 {
706 	if (!req->nbytes)
707 		return 0;
708 
709 	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
710 	return mv_handle_req(&req->base);
711 }
712 
mv_hash_final(struct ahash_request * req)713 static int mv_hash_final(struct ahash_request *req)
714 {
715 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
716 
717 	ahash_request_set_crypt(req, NULL, req->result, 0);
718 	mv_update_hash_req_ctx(ctx, 1, 0);
719 	return mv_handle_req(&req->base);
720 }
721 
mv_hash_finup(struct ahash_request * req)722 static int mv_hash_finup(struct ahash_request *req)
723 {
724 	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
725 	return mv_handle_req(&req->base);
726 }
727 
mv_hash_digest(struct ahash_request * req)728 static int mv_hash_digest(struct ahash_request *req)
729 {
730 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
731 	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
732 			     req->nbytes, tfm_ctx->count_add);
733 	return mv_handle_req(&req->base);
734 }
735 
mv_hash_init_ivs(struct mv_tfm_hash_ctx * ctx,const void * istate,const void * ostate)736 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
737 			     const void *ostate)
738 {
739 	const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
740 	int i;
741 	for (i = 0; i < 5; i++) {
742 		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
743 		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
744 	}
745 }
746 
mv_hash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)747 static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
748 			  unsigned int keylen)
749 {
750 	int rc;
751 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
752 	int bs, ds, ss;
753 
754 	if (!ctx->base_hash)
755 		return 0;
756 
757 	rc = crypto_shash_setkey(ctx->fallback, key, keylen);
758 	if (rc)
759 		return rc;
760 
761 	/* Can't see a way to extract the ipad/opad from the fallback tfm
762 	   so I'm basically copying code from the hmac module */
763 	bs = crypto_shash_blocksize(ctx->base_hash);
764 	ds = crypto_shash_digestsize(ctx->base_hash);
765 	ss = crypto_shash_statesize(ctx->base_hash);
766 
767 	{
768 		struct {
769 			struct shash_desc shash;
770 			char ctx[crypto_shash_descsize(ctx->base_hash)];
771 		} desc;
772 		unsigned int i;
773 		char ipad[ss];
774 		char opad[ss];
775 
776 		desc.shash.tfm = ctx->base_hash;
777 		desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) &
778 		    CRYPTO_TFM_REQ_MAY_SLEEP;
779 
780 		if (keylen > bs) {
781 			int err;
782 
783 			err =
784 			    crypto_shash_digest(&desc.shash, key, keylen, ipad);
785 			if (err)
786 				return err;
787 
788 			keylen = ds;
789 		} else
790 			memcpy(ipad, key, keylen);
791 
792 		memset(ipad + keylen, 0, bs - keylen);
793 		memcpy(opad, ipad, bs);
794 
795 		for (i = 0; i < bs; i++) {
796 			ipad[i] ^= 0x36;
797 			opad[i] ^= 0x5c;
798 		}
799 
800 		rc = crypto_shash_init(&desc.shash) ? :
801 		    crypto_shash_update(&desc.shash, ipad, bs) ? :
802 		    crypto_shash_export(&desc.shash, ipad) ? :
803 		    crypto_shash_init(&desc.shash) ? :
804 		    crypto_shash_update(&desc.shash, opad, bs) ? :
805 		    crypto_shash_export(&desc.shash, opad);
806 
807 		if (rc == 0)
808 			mv_hash_init_ivs(ctx, ipad, opad);
809 
810 		return rc;
811 	}
812 }
813 
mv_cra_hash_init(struct crypto_tfm * tfm,const char * base_hash_name,enum hash_op op,int count_add)814 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
815 			    enum hash_op op, int count_add)
816 {
817 	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
818 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
819 	struct crypto_shash *fallback_tfm = NULL;
820 	struct crypto_shash *base_hash = NULL;
821 	int err = -ENOMEM;
822 
823 	ctx->op = op;
824 	ctx->count_add = count_add;
825 
826 	/* Allocate a fallback and abort if it failed. */
827 	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
828 					  CRYPTO_ALG_NEED_FALLBACK);
829 	if (IS_ERR(fallback_tfm)) {
830 		printk(KERN_WARNING MV_CESA
831 		       "Fallback driver '%s' could not be loaded!\n",
832 		       fallback_driver_name);
833 		err = PTR_ERR(fallback_tfm);
834 		goto out;
835 	}
836 	ctx->fallback = fallback_tfm;
837 
838 	if (base_hash_name) {
839 		/* Allocate a hash to compute the ipad/opad of hmac. */
840 		base_hash = crypto_alloc_shash(base_hash_name, 0,
841 					       CRYPTO_ALG_NEED_FALLBACK);
842 		if (IS_ERR(base_hash)) {
843 			printk(KERN_WARNING MV_CESA
844 			       "Base driver '%s' could not be loaded!\n",
845 			       base_hash_name);
846 			err = PTR_ERR(base_hash);
847 			goto err_bad_base;
848 		}
849 	}
850 	ctx->base_hash = base_hash;
851 
852 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
853 				 sizeof(struct mv_req_hash_ctx) +
854 				 crypto_shash_descsize(ctx->fallback));
855 	return 0;
856 err_bad_base:
857 	crypto_free_shash(fallback_tfm);
858 out:
859 	return err;
860 }
861 
mv_cra_hash_exit(struct crypto_tfm * tfm)862 static void mv_cra_hash_exit(struct crypto_tfm *tfm)
863 {
864 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
865 
866 	crypto_free_shash(ctx->fallback);
867 	if (ctx->base_hash)
868 		crypto_free_shash(ctx->base_hash);
869 }
870 
mv_cra_hash_sha1_init(struct crypto_tfm * tfm)871 static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
872 {
873 	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
874 }
875 
mv_cra_hash_hmac_sha1_init(struct crypto_tfm * tfm)876 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
877 {
878 	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
879 }
880 
crypto_int(int irq,void * priv)881 irqreturn_t crypto_int(int irq, void *priv)
882 {
883 	u32 val;
884 
885 	val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
886 	if (!(val & SEC_INT_ACCEL0_DONE))
887 		return IRQ_NONE;
888 
889 	val &= ~SEC_INT_ACCEL0_DONE;
890 	writel(val, cpg->reg + FPGA_INT_STATUS);
891 	writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
892 	BUG_ON(cpg->eng_st != ENGINE_BUSY);
893 	cpg->eng_st = ENGINE_W_DEQUEUE;
894 	wake_up_process(cpg->queue_th);
895 	return IRQ_HANDLED;
896 }
897 
898 struct crypto_alg mv_aes_alg_ecb = {
899 	.cra_name		= "ecb(aes)",
900 	.cra_driver_name	= "mv-ecb-aes",
901 	.cra_priority	= 300,
902 	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
903 	.cra_blocksize	= 16,
904 	.cra_ctxsize	= sizeof(struct mv_ctx),
905 	.cra_alignmask	= 0,
906 	.cra_type	= &crypto_ablkcipher_type,
907 	.cra_module	= THIS_MODULE,
908 	.cra_init	= mv_cra_init,
909 	.cra_u		= {
910 		.ablkcipher = {
911 			.min_keysize	=	AES_MIN_KEY_SIZE,
912 			.max_keysize	=	AES_MAX_KEY_SIZE,
913 			.setkey		=	mv_setkey_aes,
914 			.encrypt	=	mv_enc_aes_ecb,
915 			.decrypt	=	mv_dec_aes_ecb,
916 		},
917 	},
918 };
919 
920 struct crypto_alg mv_aes_alg_cbc = {
921 	.cra_name		= "cbc(aes)",
922 	.cra_driver_name	= "mv-cbc-aes",
923 	.cra_priority	= 300,
924 	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
925 	.cra_blocksize	= AES_BLOCK_SIZE,
926 	.cra_ctxsize	= sizeof(struct mv_ctx),
927 	.cra_alignmask	= 0,
928 	.cra_type	= &crypto_ablkcipher_type,
929 	.cra_module	= THIS_MODULE,
930 	.cra_init	= mv_cra_init,
931 	.cra_u		= {
932 		.ablkcipher = {
933 			.ivsize		=	AES_BLOCK_SIZE,
934 			.min_keysize	=	AES_MIN_KEY_SIZE,
935 			.max_keysize	=	AES_MAX_KEY_SIZE,
936 			.setkey		=	mv_setkey_aes,
937 			.encrypt	=	mv_enc_aes_cbc,
938 			.decrypt	=	mv_dec_aes_cbc,
939 		},
940 	},
941 };
942 
943 struct ahash_alg mv_sha1_alg = {
944 	.init = mv_hash_init,
945 	.update = mv_hash_update,
946 	.final = mv_hash_final,
947 	.finup = mv_hash_finup,
948 	.digest = mv_hash_digest,
949 	.halg = {
950 		 .digestsize = SHA1_DIGEST_SIZE,
951 		 .base = {
952 			  .cra_name = "sha1",
953 			  .cra_driver_name = "mv-sha1",
954 			  .cra_priority = 300,
955 			  .cra_flags =
956 			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
957 			  .cra_blocksize = SHA1_BLOCK_SIZE,
958 			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
959 			  .cra_init = mv_cra_hash_sha1_init,
960 			  .cra_exit = mv_cra_hash_exit,
961 			  .cra_module = THIS_MODULE,
962 			  }
963 		 }
964 };
965 
966 struct ahash_alg mv_hmac_sha1_alg = {
967 	.init = mv_hash_init,
968 	.update = mv_hash_update,
969 	.final = mv_hash_final,
970 	.finup = mv_hash_finup,
971 	.digest = mv_hash_digest,
972 	.setkey = mv_hash_setkey,
973 	.halg = {
974 		 .digestsize = SHA1_DIGEST_SIZE,
975 		 .base = {
976 			  .cra_name = "hmac(sha1)",
977 			  .cra_driver_name = "mv-hmac-sha1",
978 			  .cra_priority = 300,
979 			  .cra_flags =
980 			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
981 			  .cra_blocksize = SHA1_BLOCK_SIZE,
982 			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
983 			  .cra_init = mv_cra_hash_hmac_sha1_init,
984 			  .cra_exit = mv_cra_hash_exit,
985 			  .cra_module = THIS_MODULE,
986 			  }
987 		 }
988 };
989 
mv_probe(struct platform_device * pdev)990 static int mv_probe(struct platform_device *pdev)
991 {
992 	struct crypto_priv *cp;
993 	struct resource *res;
994 	int irq;
995 	int ret;
996 
997 	if (cpg) {
998 		printk(KERN_ERR MV_CESA "Second crypto dev?\n");
999 		return -EEXIST;
1000 	}
1001 
1002 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1003 	if (!res)
1004 		return -ENXIO;
1005 
1006 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1007 	if (!cp)
1008 		return -ENOMEM;
1009 
1010 	spin_lock_init(&cp->lock);
1011 	crypto_init_queue(&cp->queue, 50);
1012 	cp->reg = ioremap(res->start, resource_size(res));
1013 	if (!cp->reg) {
1014 		ret = -ENOMEM;
1015 		goto err;
1016 	}
1017 
1018 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
1019 	if (!res) {
1020 		ret = -ENXIO;
1021 		goto err_unmap_reg;
1022 	}
1023 	cp->sram_size = resource_size(res);
1024 	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1025 	cp->sram = ioremap(res->start, cp->sram_size);
1026 	if (!cp->sram) {
1027 		ret = -ENOMEM;
1028 		goto err_unmap_reg;
1029 	}
1030 
1031 	irq = platform_get_irq(pdev, 0);
1032 	if (irq < 0 || irq == NO_IRQ) {
1033 		ret = irq;
1034 		goto err_unmap_sram;
1035 	}
1036 	cp->irq = irq;
1037 
1038 	platform_set_drvdata(pdev, cp);
1039 	cpg = cp;
1040 
1041 	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1042 	if (IS_ERR(cp->queue_th)) {
1043 		ret = PTR_ERR(cp->queue_th);
1044 		goto err_unmap_sram;
1045 	}
1046 
1047 	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
1048 			cp);
1049 	if (ret)
1050 		goto err_thread;
1051 
1052 	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1053 	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1054 	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1055 
1056 	ret = crypto_register_alg(&mv_aes_alg_ecb);
1057 	if (ret) {
1058 		printk(KERN_WARNING MV_CESA
1059 		       "Could not register aes-ecb driver\n");
1060 		goto err_irq;
1061 	}
1062 
1063 	ret = crypto_register_alg(&mv_aes_alg_cbc);
1064 	if (ret) {
1065 		printk(KERN_WARNING MV_CESA
1066 		       "Could not register aes-cbc driver\n");
1067 		goto err_unreg_ecb;
1068 	}
1069 
1070 	ret = crypto_register_ahash(&mv_sha1_alg);
1071 	if (ret == 0)
1072 		cpg->has_sha1 = 1;
1073 	else
1074 		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1075 
1076 	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1077 	if (ret == 0) {
1078 		cpg->has_hmac_sha1 = 1;
1079 	} else {
1080 		printk(KERN_WARNING MV_CESA
1081 		       "Could not register hmac-sha1 driver\n");
1082 	}
1083 
1084 	return 0;
1085 err_unreg_ecb:
1086 	crypto_unregister_alg(&mv_aes_alg_ecb);
1087 err_irq:
1088 	free_irq(irq, cp);
1089 err_thread:
1090 	kthread_stop(cp->queue_th);
1091 err_unmap_sram:
1092 	iounmap(cp->sram);
1093 err_unmap_reg:
1094 	iounmap(cp->reg);
1095 err:
1096 	kfree(cp);
1097 	cpg = NULL;
1098 	platform_set_drvdata(pdev, NULL);
1099 	return ret;
1100 }
1101 
mv_remove(struct platform_device * pdev)1102 static int mv_remove(struct platform_device *pdev)
1103 {
1104 	struct crypto_priv *cp = platform_get_drvdata(pdev);
1105 
1106 	crypto_unregister_alg(&mv_aes_alg_ecb);
1107 	crypto_unregister_alg(&mv_aes_alg_cbc);
1108 	if (cp->has_sha1)
1109 		crypto_unregister_ahash(&mv_sha1_alg);
1110 	if (cp->has_hmac_sha1)
1111 		crypto_unregister_ahash(&mv_hmac_sha1_alg);
1112 	kthread_stop(cp->queue_th);
1113 	free_irq(cp->irq, cp);
1114 	memset(cp->sram, 0, cp->sram_size);
1115 	iounmap(cp->sram);
1116 	iounmap(cp->reg);
1117 	kfree(cp);
1118 	cpg = NULL;
1119 	return 0;
1120 }
1121 
1122 static struct platform_driver marvell_crypto = {
1123 	.probe		= mv_probe,
1124 	.remove		= mv_remove,
1125 	.driver		= {
1126 		.owner	= THIS_MODULE,
1127 		.name	= "mv_crypto",
1128 	},
1129 };
1130 MODULE_ALIAS("platform:mv_crypto");
1131 
1132 module_platform_driver(marvell_crypto);
1133 
1134 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1135 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1136 MODULE_LICENSE("GPL");
1137