1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
4 * that can be found on the following platform: Orion, Kirkwood, Armada. This
5 * driver supports the TDMA engine on platforms on which it is available.
6 *
7 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
8 * Author: Arnaud Ebalard <arno@natisbad.org>
9 *
10 * This work is based on an initial version written by
11 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 */
13
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/genalloc.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kthread.h>
20 #include <linux/mbus.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/clk.h>
26 #include <linux/of.h>
27 #include <linux/of_platform.h>
28 #include <linux/of_irq.h>
29
30 #include "cesa.h"
31
32 /* Limit of the crypto queue before reaching the backlog */
33 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
34
35 struct mv_cesa_dev *cesa_dev;
36
37 struct crypto_async_request *
mv_cesa_dequeue_req_locked(struct mv_cesa_engine * engine,struct crypto_async_request ** backlog)38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
39 struct crypto_async_request **backlog)
40 {
41 *backlog = crypto_get_backlog(&engine->queue);
42
43 return crypto_dequeue_request(&engine->queue);
44 }
45
mv_cesa_rearm_engine(struct mv_cesa_engine * engine)46 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
47 {
48 struct crypto_async_request *req = NULL, *backlog = NULL;
49 struct mv_cesa_ctx *ctx;
50
51
52 spin_lock_bh(&engine->lock);
53 if (!engine->req) {
54 req = mv_cesa_dequeue_req_locked(engine, &backlog);
55 engine->req = req;
56 }
57 spin_unlock_bh(&engine->lock);
58
59 if (!req)
60 return;
61
62 if (backlog)
63 crypto_request_complete(backlog, -EINPROGRESS);
64
65 ctx = crypto_tfm_ctx(req->tfm);
66 ctx->ops->step(req);
67 }
68
mv_cesa_std_process(struct mv_cesa_engine * engine,u32 status)69 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
70 {
71 struct crypto_async_request *req;
72 struct mv_cesa_ctx *ctx;
73 int res;
74
75 req = engine->req;
76 ctx = crypto_tfm_ctx(req->tfm);
77 res = ctx->ops->process(req, status);
78
79 if (res == 0) {
80 ctx->ops->complete(req);
81 mv_cesa_engine_enqueue_complete_request(engine, req);
82 } else if (res == -EINPROGRESS) {
83 ctx->ops->step(req);
84 }
85
86 return res;
87 }
88
mv_cesa_int_process(struct mv_cesa_engine * engine,u32 status)89 static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
90 {
91 if (engine->chain_hw.first && engine->chain_hw.last)
92 return mv_cesa_tdma_process(engine, status);
93
94 return mv_cesa_std_process(engine, status);
95 }
96
97 static inline void
mv_cesa_complete_req(struct mv_cesa_ctx * ctx,struct crypto_async_request * req,int res)98 mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
99 int res)
100 {
101 ctx->ops->cleanup(req);
102 local_bh_disable();
103 crypto_request_complete(req, res);
104 local_bh_enable();
105 }
106
mv_cesa_int(int irq,void * priv)107 static irqreturn_t mv_cesa_int(int irq, void *priv)
108 {
109 struct mv_cesa_engine *engine = priv;
110 struct crypto_async_request *req;
111 struct mv_cesa_ctx *ctx;
112 u32 status, mask;
113 irqreturn_t ret = IRQ_NONE;
114
115 while (true) {
116 int res;
117
118 mask = mv_cesa_get_int_mask(engine);
119 status = readl(engine->regs + CESA_SA_INT_STATUS);
120
121 if (!(status & mask))
122 break;
123
124 /*
125 * TODO: avoid clearing the FPGA_INT_STATUS if this not
126 * relevant on some platforms.
127 */
128 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
129 writel(~status, engine->regs + CESA_SA_INT_STATUS);
130
131 /* Process fetched requests */
132 res = mv_cesa_int_process(engine, status & mask);
133 ret = IRQ_HANDLED;
134
135 spin_lock_bh(&engine->lock);
136 req = engine->req;
137 if (res != -EINPROGRESS)
138 engine->req = NULL;
139 spin_unlock_bh(&engine->lock);
140
141 ctx = crypto_tfm_ctx(req->tfm);
142
143 if (res && res != -EINPROGRESS)
144 mv_cesa_complete_req(ctx, req, res);
145
146 /* Launch the next pending request */
147 mv_cesa_rearm_engine(engine);
148
149 /* Iterate over the complete queue */
150 while (true) {
151 req = mv_cesa_engine_dequeue_complete_request(engine);
152 if (!req)
153 break;
154
155 ctx = crypto_tfm_ctx(req->tfm);
156 mv_cesa_complete_req(ctx, req, 0);
157 }
158 }
159
160 return ret;
161 }
162
mv_cesa_queue_req(struct crypto_async_request * req,struct mv_cesa_req * creq)163 int mv_cesa_queue_req(struct crypto_async_request *req,
164 struct mv_cesa_req *creq)
165 {
166 int ret;
167 struct mv_cesa_engine *engine = creq->engine;
168
169 spin_lock_bh(&engine->lock);
170 ret = crypto_enqueue_request(&engine->queue, req);
171 if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
172 (ret == -EINPROGRESS || ret == -EBUSY))
173 mv_cesa_tdma_chain(engine, creq);
174 spin_unlock_bh(&engine->lock);
175
176 if (ret != -EINPROGRESS)
177 return ret;
178
179 mv_cesa_rearm_engine(engine);
180
181 return -EINPROGRESS;
182 }
183
mv_cesa_add_algs(struct mv_cesa_dev * cesa)184 static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
185 {
186 int ret;
187 int i, j;
188
189 for (i = 0; i < cesa->caps->ncipher_algs; i++) {
190 ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
191 if (ret)
192 goto err_unregister_crypto;
193 }
194
195 for (i = 0; i < cesa->caps->nahash_algs; i++) {
196 ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
197 if (ret)
198 goto err_unregister_ahash;
199 }
200
201 return 0;
202
203 err_unregister_ahash:
204 for (j = 0; j < i; j++)
205 crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
206 i = cesa->caps->ncipher_algs;
207
208 err_unregister_crypto:
209 for (j = 0; j < i; j++)
210 crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
211
212 return ret;
213 }
214
mv_cesa_remove_algs(struct mv_cesa_dev * cesa)215 static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
216 {
217 int i;
218
219 for (i = 0; i < cesa->caps->nahash_algs; i++)
220 crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
221
222 for (i = 0; i < cesa->caps->ncipher_algs; i++)
223 crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
224 }
225
226 static struct skcipher_alg *orion_cipher_algs[] = {
227 &mv_cesa_ecb_des_alg,
228 &mv_cesa_cbc_des_alg,
229 &mv_cesa_ecb_des3_ede_alg,
230 &mv_cesa_cbc_des3_ede_alg,
231 &mv_cesa_ecb_aes_alg,
232 &mv_cesa_cbc_aes_alg,
233 };
234
235 static struct ahash_alg *orion_ahash_algs[] = {
236 &mv_md5_alg,
237 &mv_sha1_alg,
238 &mv_ahmac_md5_alg,
239 &mv_ahmac_sha1_alg,
240 };
241
242 static struct skcipher_alg *armada_370_cipher_algs[] = {
243 &mv_cesa_ecb_des_alg,
244 &mv_cesa_cbc_des_alg,
245 &mv_cesa_ecb_des3_ede_alg,
246 &mv_cesa_cbc_des3_ede_alg,
247 &mv_cesa_ecb_aes_alg,
248 &mv_cesa_cbc_aes_alg,
249 };
250
251 static struct ahash_alg *armada_370_ahash_algs[] = {
252 &mv_md5_alg,
253 &mv_sha1_alg,
254 &mv_sha256_alg,
255 &mv_ahmac_md5_alg,
256 &mv_ahmac_sha1_alg,
257 &mv_ahmac_sha256_alg,
258 };
259
260 static const struct mv_cesa_caps orion_caps = {
261 .nengines = 1,
262 .cipher_algs = orion_cipher_algs,
263 .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
264 .ahash_algs = orion_ahash_algs,
265 .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
266 .has_tdma = false,
267 };
268
269 static const struct mv_cesa_caps kirkwood_caps = {
270 .nengines = 1,
271 .cipher_algs = orion_cipher_algs,
272 .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
273 .ahash_algs = orion_ahash_algs,
274 .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
275 .has_tdma = true,
276 };
277
278 static const struct mv_cesa_caps armada_370_caps = {
279 .nengines = 1,
280 .cipher_algs = armada_370_cipher_algs,
281 .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
282 .ahash_algs = armada_370_ahash_algs,
283 .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
284 .has_tdma = true,
285 };
286
287 static const struct mv_cesa_caps armada_xp_caps = {
288 .nengines = 2,
289 .cipher_algs = armada_370_cipher_algs,
290 .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
291 .ahash_algs = armada_370_ahash_algs,
292 .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
293 .has_tdma = true,
294 };
295
296 static const struct of_device_id mv_cesa_of_match_table[] = {
297 { .compatible = "marvell,orion-crypto", .data = &orion_caps },
298 { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
299 { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
300 { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
301 { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
302 { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
303 { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
304 {}
305 };
306 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
307
308 static void
mv_cesa_conf_mbus_windows(struct mv_cesa_engine * engine,const struct mbus_dram_target_info * dram)309 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
310 const struct mbus_dram_target_info *dram)
311 {
312 void __iomem *iobase = engine->regs;
313 int i;
314
315 for (i = 0; i < 4; i++) {
316 writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
317 writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
318 }
319
320 for (i = 0; i < dram->num_cs; i++) {
321 const struct mbus_dram_window *cs = dram->cs + i;
322
323 writel(((cs->size - 1) & 0xffff0000) |
324 (cs->mbus_attr << 8) |
325 (dram->mbus_dram_target_id << 4) | 1,
326 iobase + CESA_TDMA_WINDOW_CTRL(i));
327 writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
328 }
329 }
330
mv_cesa_dev_dma_init(struct mv_cesa_dev * cesa)331 static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
332 {
333 struct device *dev = cesa->dev;
334 struct mv_cesa_dev_dma *dma;
335
336 if (!cesa->caps->has_tdma)
337 return 0;
338
339 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
340 if (!dma)
341 return -ENOMEM;
342
343 dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
344 sizeof(struct mv_cesa_tdma_desc),
345 16, 0);
346 if (!dma->tdma_desc_pool)
347 return -ENOMEM;
348
349 dma->op_pool = dmam_pool_create("cesa_op", dev,
350 sizeof(struct mv_cesa_op_ctx), 16, 0);
351 if (!dma->op_pool)
352 return -ENOMEM;
353
354 dma->cache_pool = dmam_pool_create("cesa_cache", dev,
355 CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
356 if (!dma->cache_pool)
357 return -ENOMEM;
358
359 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
360 if (!dma->padding_pool)
361 return -ENOMEM;
362
363 cesa->dma = dma;
364
365 return 0;
366 }
367
mv_cesa_get_sram(struct platform_device * pdev,int idx)368 static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
369 {
370 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
371 struct mv_cesa_engine *engine = &cesa->engines[idx];
372 struct resource *res;
373
374 engine->pool = of_gen_pool_get(cesa->dev->of_node,
375 "marvell,crypto-srams", idx);
376 if (engine->pool) {
377 engine->sram_pool = gen_pool_dma_alloc(engine->pool,
378 cesa->sram_size,
379 &engine->sram_dma);
380 if (engine->sram_pool)
381 return 0;
382
383 engine->pool = NULL;
384 return -ENOMEM;
385 }
386
387 engine->sram = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
388 if (IS_ERR(engine->sram))
389 return PTR_ERR(engine->sram);
390
391 engine->sram_dma = dma_map_resource(cesa->dev, res->start,
392 cesa->sram_size,
393 DMA_BIDIRECTIONAL, 0);
394 if (dma_mapping_error(cesa->dev, engine->sram_dma))
395 return -ENOMEM;
396
397 return 0;
398 }
399
mv_cesa_put_sram(struct platform_device * pdev,int idx)400 static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
401 {
402 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
403 struct mv_cesa_engine *engine = &cesa->engines[idx];
404
405 if (engine->pool)
406 gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
407 cesa->sram_size);
408 else
409 dma_unmap_resource(cesa->dev, engine->sram_dma,
410 cesa->sram_size, DMA_BIDIRECTIONAL, 0);
411 }
412
mv_cesa_probe(struct platform_device * pdev)413 static int mv_cesa_probe(struct platform_device *pdev)
414 {
415 const struct mv_cesa_caps *caps = &orion_caps;
416 const struct mbus_dram_target_info *dram;
417 struct device *dev = &pdev->dev;
418 struct mv_cesa_dev *cesa;
419 struct mv_cesa_engine *engines;
420 int irq, ret, i, cpu;
421 u32 sram_size;
422
423 if (cesa_dev) {
424 dev_err(&pdev->dev, "Only one CESA device authorized\n");
425 return -EEXIST;
426 }
427
428 if (dev->of_node) {
429 caps = of_device_get_match_data(dev);
430 if (!caps)
431 return -ENOTSUPP;
432 }
433
434 cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
435 if (!cesa)
436 return -ENOMEM;
437
438 cesa->caps = caps;
439 cesa->dev = dev;
440
441 sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
442 of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
443 &sram_size);
444 if (sram_size < CESA_SA_MIN_SRAM_SIZE)
445 sram_size = CESA_SA_MIN_SRAM_SIZE;
446
447 cesa->sram_size = sram_size;
448 cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
449 GFP_KERNEL);
450 if (!cesa->engines)
451 return -ENOMEM;
452
453 spin_lock_init(&cesa->lock);
454
455 cesa->regs = devm_platform_ioremap_resource_byname(pdev, "regs");
456 if (IS_ERR(cesa->regs))
457 return PTR_ERR(cesa->regs);
458
459 ret = mv_cesa_dev_dma_init(cesa);
460 if (ret)
461 return ret;
462
463 dram = mv_mbus_dram_info_nooverlap();
464
465 platform_set_drvdata(pdev, cesa);
466
467 for (i = 0; i < caps->nengines; i++) {
468 struct mv_cesa_engine *engine = &cesa->engines[i];
469 char res_name[16];
470
471 engine->id = i;
472 spin_lock_init(&engine->lock);
473
474 ret = mv_cesa_get_sram(pdev, i);
475 if (ret)
476 goto err_cleanup;
477
478 irq = platform_get_irq(pdev, i);
479 if (irq < 0) {
480 ret = irq;
481 goto err_cleanup;
482 }
483
484 engine->irq = irq;
485
486 /*
487 * Not all platforms can gate the CESA clocks: do not complain
488 * if the clock does not exist.
489 */
490 snprintf(res_name, sizeof(res_name), "cesa%u", i);
491 engine->clk = devm_clk_get_optional_enabled(dev, res_name);
492 if (IS_ERR(engine->clk)) {
493 engine->clk = devm_clk_get_optional_enabled(dev, NULL);
494 if (IS_ERR(engine->clk)) {
495 ret = PTR_ERR(engine->clk);
496 goto err_cleanup;
497 }
498 }
499
500 snprintf(res_name, sizeof(res_name), "cesaz%u", i);
501 engine->zclk = devm_clk_get_optional_enabled(dev, res_name);
502 if (IS_ERR(engine->zclk)) {
503 ret = PTR_ERR(engine->zclk);
504 goto err_cleanup;
505 }
506
507 engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
508
509 if (dram && cesa->caps->has_tdma)
510 mv_cesa_conf_mbus_windows(engine, dram);
511
512 writel(0, engine->regs + CESA_SA_INT_STATUS);
513 writel(CESA_SA_CFG_STOP_DIG_ERR,
514 engine->regs + CESA_SA_CFG);
515 writel(engine->sram_dma & CESA_SA_SRAM_MSK,
516 engine->regs + CESA_SA_DESC_P0);
517
518 ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
519 IRQF_ONESHOT,
520 dev_name(&pdev->dev),
521 engine);
522 if (ret)
523 goto err_cleanup;
524
525 /* Set affinity */
526 cpu = cpumask_local_spread(engine->id, NUMA_NO_NODE);
527 irq_set_affinity_hint(irq, get_cpu_mask(cpu));
528
529 crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
530 atomic_set(&engine->load, 0);
531 INIT_LIST_HEAD(&engine->complete_queue);
532 }
533
534 cesa_dev = cesa;
535
536 ret = mv_cesa_add_algs(cesa);
537 if (ret) {
538 cesa_dev = NULL;
539 goto err_cleanup;
540 }
541
542 dev_info(dev, "CESA device successfully registered\n");
543
544 return 0;
545
546 err_cleanup:
547 for (i = 0; i < caps->nengines; i++)
548 mv_cesa_put_sram(pdev, i);
549
550 return ret;
551 }
552
mv_cesa_remove(struct platform_device * pdev)553 static void mv_cesa_remove(struct platform_device *pdev)
554 {
555 struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
556 int i;
557
558 mv_cesa_remove_algs(cesa);
559
560 for (i = 0; i < cesa->caps->nengines; i++)
561 mv_cesa_put_sram(pdev, i);
562 }
563
564 static const struct platform_device_id mv_cesa_plat_id_table[] = {
565 { .name = "mv_crypto" },
566 { /* sentinel */ },
567 };
568 MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
569
570 static struct platform_driver marvell_cesa = {
571 .probe = mv_cesa_probe,
572 .remove = mv_cesa_remove,
573 .id_table = mv_cesa_plat_id_table,
574 .driver = {
575 .name = "marvell-cesa",
576 .of_match_table = mv_cesa_of_match_table,
577 },
578 };
579 module_platform_driver(marvell_cesa);
580
581 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
582 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
583 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
584 MODULE_LICENSE("GPL v2");
585