1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 - 2021
4 *
5 * Richard van Schagen <vschagen@icloud.com>
6 * Christian Marangi <ansuelsmth@gmail.com
7 */
8
9 #include <linux/atomic.h>
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/spinlock.h>
18 #include <crypto/aes.h>
19 #include <crypto/ctr.h>
20
21 #include "eip93-main.h"
22 #include "eip93-regs.h"
23 #include "eip93-common.h"
24 #include "eip93-cipher.h"
25 #include "eip93-aes.h"
26 #include "eip93-des.h"
27 #include "eip93-aead.h"
28 #include "eip93-hash.h"
29
30 static struct eip93_alg_template *eip93_algs[] = {
31 &eip93_alg_ecb_des,
32 &eip93_alg_cbc_des,
33 &eip93_alg_ecb_des3_ede,
34 &eip93_alg_cbc_des3_ede,
35 &eip93_alg_ecb_aes,
36 &eip93_alg_cbc_aes,
37 &eip93_alg_ctr_aes,
38 &eip93_alg_rfc3686_aes,
39 &eip93_alg_authenc_hmac_md5_cbc_des,
40 &eip93_alg_authenc_hmac_sha1_cbc_des,
41 &eip93_alg_authenc_hmac_sha224_cbc_des,
42 &eip93_alg_authenc_hmac_sha256_cbc_des,
43 &eip93_alg_authenc_hmac_md5_cbc_des3_ede,
44 &eip93_alg_authenc_hmac_sha1_cbc_des3_ede,
45 &eip93_alg_authenc_hmac_sha224_cbc_des3_ede,
46 &eip93_alg_authenc_hmac_sha256_cbc_des3_ede,
47 &eip93_alg_authenc_hmac_md5_cbc_aes,
48 &eip93_alg_authenc_hmac_sha1_cbc_aes,
49 &eip93_alg_authenc_hmac_sha224_cbc_aes,
50 &eip93_alg_authenc_hmac_sha256_cbc_aes,
51 &eip93_alg_authenc_hmac_md5_rfc3686_aes,
52 &eip93_alg_authenc_hmac_sha1_rfc3686_aes,
53 &eip93_alg_authenc_hmac_sha224_rfc3686_aes,
54 &eip93_alg_authenc_hmac_sha256_rfc3686_aes,
55 &eip93_alg_md5,
56 &eip93_alg_sha1,
57 &eip93_alg_sha224,
58 &eip93_alg_sha256,
59 &eip93_alg_hmac_md5,
60 &eip93_alg_hmac_sha1,
61 &eip93_alg_hmac_sha224,
62 &eip93_alg_hmac_sha256,
63 };
64
eip93_irq_disable(struct eip93_device * eip93,u32 mask)65 inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask)
66 {
67 __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE);
68 }
69
eip93_irq_enable(struct eip93_device * eip93,u32 mask)70 inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask)
71 {
72 __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE);
73 }
74
eip93_irq_clear(struct eip93_device * eip93,u32 mask)75 inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask)
76 {
77 __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR);
78 }
79
eip93_unregister_algs(unsigned int i)80 static void eip93_unregister_algs(unsigned int i)
81 {
82 unsigned int j;
83
84 for (j = 0; j < i; j++) {
85 switch (eip93_algs[j]->type) {
86 case EIP93_ALG_TYPE_SKCIPHER:
87 crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher);
88 break;
89 case EIP93_ALG_TYPE_AEAD:
90 crypto_unregister_aead(&eip93_algs[j]->alg.aead);
91 break;
92 case EIP93_ALG_TYPE_HASH:
93 crypto_unregister_ahash(&eip93_algs[i]->alg.ahash);
94 break;
95 }
96 }
97 }
98
eip93_register_algs(struct eip93_device * eip93,u32 supported_algo_flags)99 static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags)
100 {
101 unsigned int i;
102 int ret = 0;
103
104 for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) {
105 u32 alg_flags = eip93_algs[i]->flags;
106
107 eip93_algs[i]->eip93 = eip93;
108
109 if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) &&
110 !(supported_algo_flags & EIP93_PE_OPTION_TDES))
111 continue;
112
113 if (IS_AES(alg_flags)) {
114 if (!(supported_algo_flags & EIP93_PE_OPTION_AES))
115 continue;
116
117 if (!IS_HMAC(alg_flags)) {
118 if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128)
119 eip93_algs[i]->alg.skcipher.max_keysize =
120 AES_KEYSIZE_128;
121
122 if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192)
123 eip93_algs[i]->alg.skcipher.max_keysize =
124 AES_KEYSIZE_192;
125
126 if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256)
127 eip93_algs[i]->alg.skcipher.max_keysize =
128 AES_KEYSIZE_256;
129
130 if (IS_RFC3686(alg_flags))
131 eip93_algs[i]->alg.skcipher.max_keysize +=
132 CTR_RFC3686_NONCE_SIZE;
133 }
134 }
135
136 if (IS_HASH_MD5(alg_flags) &&
137 !(supported_algo_flags & EIP93_PE_OPTION_MD5))
138 continue;
139
140 if (IS_HASH_SHA1(alg_flags) &&
141 !(supported_algo_flags & EIP93_PE_OPTION_SHA_1))
142 continue;
143
144 if (IS_HASH_SHA224(alg_flags) &&
145 !(supported_algo_flags & EIP93_PE_OPTION_SHA_224))
146 continue;
147
148 if (IS_HASH_SHA256(alg_flags) &&
149 !(supported_algo_flags & EIP93_PE_OPTION_SHA_256))
150 continue;
151
152 switch (eip93_algs[i]->type) {
153 case EIP93_ALG_TYPE_SKCIPHER:
154 ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher);
155 break;
156 case EIP93_ALG_TYPE_AEAD:
157 ret = crypto_register_aead(&eip93_algs[i]->alg.aead);
158 break;
159 case EIP93_ALG_TYPE_HASH:
160 ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash);
161 break;
162 }
163 if (ret)
164 goto fail;
165 }
166
167 return 0;
168
169 fail:
170 eip93_unregister_algs(i);
171
172 return ret;
173 }
174
eip93_handle_result_descriptor(struct eip93_device * eip93)175 static void eip93_handle_result_descriptor(struct eip93_device *eip93)
176 {
177 struct crypto_async_request *async;
178 struct eip93_descriptor *rdesc;
179 u16 desc_flags, crypto_idr;
180 bool last_entry;
181 int handled, left, err;
182 u32 pe_ctrl_stat;
183 u32 pe_length;
184
185 get_more:
186 handled = 0;
187
188 left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT;
189
190 if (!left) {
191 eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH);
192 eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH);
193 return;
194 }
195
196 last_entry = false;
197
198 while (left) {
199 scoped_guard(spinlock_irqsave, &eip93->ring->read_lock)
200 rdesc = eip93_get_descriptor(eip93);
201 if (IS_ERR(rdesc)) {
202 dev_err(eip93->dev, "Ndesc: %d nreq: %d\n",
203 handled, left);
204 err = -EIO;
205 break;
206 }
207 /* make sure DMA is finished writing */
208 do {
209 pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word);
210 pe_length = READ_ONCE(rdesc->pe_length_word);
211 } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) !=
212 EIP93_PE_CTRL_PE_READY ||
213 FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) !=
214 EIP93_PE_LENGTH_PE_READY);
215
216 err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE |
217 EIP93_PE_CTRL_PE_EXT_ERR |
218 EIP93_PE_CTRL_PE_SEQNUM_ERR |
219 EIP93_PE_CTRL_PE_PAD_ERR |
220 EIP93_PE_CTRL_PE_AUTH_ERR);
221
222 desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id);
223 crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id);
224
225 writel(1, eip93->base + EIP93_REG_PE_RD_COUNT);
226 eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH);
227
228 handled++;
229 left--;
230
231 if (desc_flags & EIP93_DESC_LAST) {
232 last_entry = true;
233 break;
234 }
235 }
236
237 if (!last_entry)
238 goto get_more;
239
240 /* Get crypto async ref only for last descriptor */
241 scoped_guard(spinlock_bh, &eip93->ring->idr_lock) {
242 async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr);
243 idr_remove(&eip93->ring->crypto_async_idr, crypto_idr);
244 }
245
246 /* Parse error in ctrl stat word */
247 err = eip93_parse_ctrl_stat_err(eip93, err);
248
249 if (desc_flags & EIP93_DESC_SKCIPHER)
250 eip93_skcipher_handle_result(async, err);
251
252 if (desc_flags & EIP93_DESC_AEAD)
253 eip93_aead_handle_result(async, err);
254
255 if (desc_flags & EIP93_DESC_HASH)
256 eip93_hash_handle_result(async, err);
257
258 goto get_more;
259 }
260
eip93_done_task(unsigned long data)261 static void eip93_done_task(unsigned long data)
262 {
263 struct eip93_device *eip93 = (struct eip93_device *)data;
264
265 eip93_handle_result_descriptor(eip93);
266 }
267
eip93_irq_handler(int irq,void * data)268 static irqreturn_t eip93_irq_handler(int irq, void *data)
269 {
270 struct eip93_device *eip93 = data;
271 u32 irq_status;
272
273 irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT);
274 if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) {
275 eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH);
276 tasklet_schedule(&eip93->ring->done_task);
277 return IRQ_HANDLED;
278 }
279
280 /* Ignore errors in AUTO mode, handled by the RDR */
281 eip93_irq_clear(eip93, irq_status);
282 if (irq_status)
283 eip93_irq_disable(eip93, irq_status);
284
285 return IRQ_NONE;
286 }
287
eip93_initialize(struct eip93_device * eip93,u32 supported_algo_flags)288 static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags)
289 {
290 u32 val;
291
292 /* Reset PE and rings */
293 val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING;
294 val |= EIP93_PE_TARGET_AUTO_RING_MODE;
295 /* For Auto more, update the CDR ring owner after processing */
296 val |= EIP93_PE_CONFIG_EN_CDR_UPDATE;
297 writel(val, eip93->base + EIP93_REG_PE_CONFIG);
298
299 /* Wait for PE and ring to reset */
300 usleep_range(10, 20);
301
302 /* Release PE and ring reset */
303 val = readl(eip93->base + EIP93_REG_PE_CONFIG);
304 val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING);
305 writel(val, eip93->base + EIP93_REG_PE_CONFIG);
306
307 /* Config Clocks */
308 val = EIP93_PE_CLOCK_EN_PE_CLK;
309 if (supported_algo_flags & EIP93_PE_OPTION_TDES)
310 val |= EIP93_PE_CLOCK_EN_DES_CLK;
311 if (supported_algo_flags & EIP93_PE_OPTION_AES)
312 val |= EIP93_PE_CLOCK_EN_AES_CLK;
313 if (supported_algo_flags &
314 (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 |
315 EIP93_PE_OPTION_SHA_256))
316 val |= EIP93_PE_CLOCK_EN_HASH_CLK;
317 writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL);
318
319 /* Config DMA thresholds */
320 val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) |
321 FIELD_PREP(EIP93_PE_INBUF_THRESH, 128);
322 writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH);
323
324 /* Clear/ack all interrupts before disable all */
325 eip93_irq_clear(eip93, EIP93_INT_ALL);
326 eip93_irq_disable(eip93, EIP93_INT_ALL);
327
328 /* Setup CRD threshold to trigger interrupt */
329 val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY);
330 /*
331 * Configure RDR interrupt to be triggered if RD counter is not 0
332 * for more than 2^(N+10) system clocks.
333 */
334 val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN;
335 writel(val, eip93->base + EIP93_REG_PE_RING_THRESH);
336 }
337
eip93_desc_free(struct eip93_device * eip93)338 static void eip93_desc_free(struct eip93_device *eip93)
339 {
340 writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG);
341 writel(0, eip93->base + EIP93_REG_PE_CDR_BASE);
342 writel(0, eip93->base + EIP93_REG_PE_RDR_BASE);
343 }
344
eip93_set_ring(struct eip93_device * eip93,struct eip93_desc_ring * ring)345 static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring)
346 {
347 ring->offset = sizeof(struct eip93_descriptor);
348 ring->base = dmam_alloc_coherent(eip93->dev,
349 sizeof(struct eip93_descriptor) * EIP93_RING_NUM,
350 &ring->base_dma, GFP_KERNEL);
351 if (!ring->base)
352 return -ENOMEM;
353
354 ring->write = ring->base;
355 ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1);
356 ring->read = ring->base;
357
358 return 0;
359 }
360
eip93_desc_init(struct eip93_device * eip93)361 static int eip93_desc_init(struct eip93_device *eip93)
362 {
363 struct eip93_desc_ring *cdr = &eip93->ring->cdr;
364 struct eip93_desc_ring *rdr = &eip93->ring->rdr;
365 int ret;
366 u32 val;
367
368 ret = eip93_set_ring(eip93, cdr);
369 if (ret)
370 return ret;
371
372 ret = eip93_set_ring(eip93, rdr);
373 if (ret)
374 return ret;
375
376 writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE);
377 writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE);
378
379 val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1);
380 writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG);
381
382 return 0;
383 }
384
eip93_cleanup(struct eip93_device * eip93)385 static void eip93_cleanup(struct eip93_device *eip93)
386 {
387 tasklet_kill(&eip93->ring->done_task);
388
389 /* Clear/ack all interrupts before disable all */
390 eip93_irq_clear(eip93, EIP93_INT_ALL);
391 eip93_irq_disable(eip93, EIP93_INT_ALL);
392
393 writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL);
394
395 eip93_desc_free(eip93);
396
397 idr_destroy(&eip93->ring->crypto_async_idr);
398 }
399
eip93_crypto_probe(struct platform_device * pdev)400 static int eip93_crypto_probe(struct platform_device *pdev)
401 {
402 struct device *dev = &pdev->dev;
403 struct eip93_device *eip93;
404 u32 ver, algo_flags;
405 int ret;
406
407 eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL);
408 if (!eip93)
409 return -ENOMEM;
410
411 eip93->dev = dev;
412 platform_set_drvdata(pdev, eip93);
413
414 eip93->base = devm_platform_ioremap_resource(pdev, 0);
415 if (IS_ERR(eip93->base))
416 return PTR_ERR(eip93->base);
417
418 eip93->irq = platform_get_irq(pdev, 0);
419 if (eip93->irq < 0)
420 return eip93->irq;
421
422 ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler,
423 NULL, IRQF_ONESHOT,
424 dev_name(eip93->dev), eip93);
425
426 eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL);
427 if (!eip93->ring)
428 return -ENOMEM;
429
430 ret = eip93_desc_init(eip93);
431
432 if (ret)
433 return ret;
434
435 tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93);
436
437 spin_lock_init(&eip93->ring->read_lock);
438 spin_lock_init(&eip93->ring->write_lock);
439
440 spin_lock_init(&eip93->ring->idr_lock);
441 idr_init(&eip93->ring->crypto_async_idr);
442
443 algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1);
444
445 eip93_initialize(eip93, algo_flags);
446
447 /* Init finished, enable RDR interrupt */
448 eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH);
449
450 ret = eip93_register_algs(eip93, algo_flags);
451 if (ret) {
452 eip93_cleanup(eip93);
453 return ret;
454 }
455
456 ver = readl(eip93->base + EIP93_REG_PE_REVISION);
457 /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */
458 dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n",
459 FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver),
460 FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver),
461 FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver),
462 FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver),
463 algo_flags,
464 readl(eip93->base + EIP93_REG_PE_OPTION_0));
465
466 return 0;
467 }
468
eip93_crypto_remove(struct platform_device * pdev)469 static void eip93_crypto_remove(struct platform_device *pdev)
470 {
471 struct eip93_device *eip93 = platform_get_drvdata(pdev);
472
473 eip93_unregister_algs(ARRAY_SIZE(eip93_algs));
474 eip93_cleanup(eip93);
475 }
476
477 static const struct of_device_id eip93_crypto_of_match[] = {
478 { .compatible = "inside-secure,safexcel-eip93i", },
479 { .compatible = "inside-secure,safexcel-eip93ie", },
480 { .compatible = "inside-secure,safexcel-eip93is", },
481 { .compatible = "inside-secure,safexcel-eip93ies", },
482 /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */
483 /* { .compatible = "inside-secure,safexcel-eip93iw", }, */
484 {}
485 };
486 MODULE_DEVICE_TABLE(of, eip93_crypto_of_match);
487
488 static struct platform_driver eip93_crypto_driver = {
489 .probe = eip93_crypto_probe,
490 .remove = eip93_crypto_remove,
491 .driver = {
492 .name = "inside-secure-eip93",
493 .of_match_table = eip93_crypto_of_match,
494 },
495 };
496 module_platform_driver(eip93_crypto_driver);
497
498 MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>");
499 MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
500 MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver");
501 MODULE_LICENSE("GPL");
502