1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Cryptographic API.
4 //
5 // Support for Samsung S5PV210 and Exynos HW acceleration.
6 //
7 // Copyright (C) 2011 NetUP Inc. All rights reserved.
8 // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
9 //
10 // Hash part based on omap-sham.c driver.
11
12 #include <crypto/aes.h>
13 #include <crypto/ctr.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/internal/skcipher.h>
16 #include <crypto/md5.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha1.h>
19 #include <crypto/sha2.h>
20 #include <linux/clk.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/io.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/of.h>
29 #include <linux/platform_device.h>
30 #include <linux/scatterlist.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34
35 #define _SBF(s, v) ((v) << (s))
36
37 /* Feed control registers */
38 #define SSS_REG_FCINTSTAT 0x0000
39 #define SSS_FCINTSTAT_HPARTINT BIT(7)
40 #define SSS_FCINTSTAT_HDONEINT BIT(5)
41 #define SSS_FCINTSTAT_BRDMAINT BIT(3)
42 #define SSS_FCINTSTAT_BTDMAINT BIT(2)
43 #define SSS_FCINTSTAT_HRDMAINT BIT(1)
44 #define SSS_FCINTSTAT_PKDMAINT BIT(0)
45
46 #define SSS_REG_FCINTENSET 0x0004
47 #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
48 #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
49 #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
50 #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
51 #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
52 #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
53
54 #define SSS_REG_FCINTENCLR 0x0008
55 #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
56 #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
57 #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
58 #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
59 #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
60 #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
61
62 #define SSS_REG_FCINTPEND 0x000C
63 #define SSS_FCINTPEND_HPARTINTP BIT(7)
64 #define SSS_FCINTPEND_HDONEINTP BIT(5)
65 #define SSS_FCINTPEND_BRDMAINTP BIT(3)
66 #define SSS_FCINTPEND_BTDMAINTP BIT(2)
67 #define SSS_FCINTPEND_HRDMAINTP BIT(1)
68 #define SSS_FCINTPEND_PKDMAINTP BIT(0)
69
70 #define SSS_REG_FCFIFOSTAT 0x0010
71 #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
72 #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
73 #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
74 #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
75 #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
76 #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
77 #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
78 #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
79
80 #define SSS_REG_FCFIFOCTRL 0x0014
81 #define SSS_FCFIFOCTRL_DESSEL BIT(2)
82 #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
83 #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
84 #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
85 #define SSS_HASHIN_MASK _SBF(0, 0x03)
86
87 #define SSS_REG_FCBRDMAS 0x0020
88 #define SSS_REG_FCBRDMAL 0x0024
89 #define SSS_REG_FCBRDMAC 0x0028
90 #define SSS_FCBRDMAC_BYTESWAP BIT(1)
91 #define SSS_FCBRDMAC_FLUSH BIT(0)
92
93 #define SSS_REG_FCBTDMAS 0x0030
94 #define SSS_REG_FCBTDMAL 0x0034
95 #define SSS_REG_FCBTDMAC 0x0038
96 #define SSS_FCBTDMAC_BYTESWAP BIT(1)
97 #define SSS_FCBTDMAC_FLUSH BIT(0)
98
99 #define SSS_REG_FCHRDMAS 0x0040
100 #define SSS_REG_FCHRDMAL 0x0044
101 #define SSS_REG_FCHRDMAC 0x0048
102 #define SSS_FCHRDMAC_BYTESWAP BIT(1)
103 #define SSS_FCHRDMAC_FLUSH BIT(0)
104
105 #define SSS_REG_FCPKDMAS 0x0050
106 #define SSS_REG_FCPKDMAL 0x0054
107 #define SSS_REG_FCPKDMAC 0x0058
108 #define SSS_FCPKDMAC_BYTESWAP BIT(3)
109 #define SSS_FCPKDMAC_DESCEND BIT(2)
110 #define SSS_FCPKDMAC_TRANSMIT BIT(1)
111 #define SSS_FCPKDMAC_FLUSH BIT(0)
112
113 #define SSS_REG_FCPKDMAO 0x005C
114
115 /* AES registers */
116 #define SSS_REG_AES_CONTROL 0x00
117 #define SSS_AES_BYTESWAP_DI BIT(11)
118 #define SSS_AES_BYTESWAP_DO BIT(10)
119 #define SSS_AES_BYTESWAP_IV BIT(9)
120 #define SSS_AES_BYTESWAP_CNT BIT(8)
121 #define SSS_AES_BYTESWAP_KEY BIT(7)
122 #define SSS_AES_KEY_CHANGE_MODE BIT(6)
123 #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
124 #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
125 #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
126 #define SSS_AES_FIFO_MODE BIT(3)
127 #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
128 #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
129 #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
130 #define SSS_AES_MODE_DECRYPT BIT(0)
131
132 #define SSS_REG_AES_STATUS 0x04
133 #define SSS_AES_BUSY BIT(2)
134 #define SSS_AES_INPUT_READY BIT(1)
135 #define SSS_AES_OUTPUT_READY BIT(0)
136
137 #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
138 #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
139 #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
140 #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
141 #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
142
143 #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
144 #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
145 #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
146
147 #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
148 #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
149 SSS_AES_REG(dev, reg))
150
151 /* HW engine modes */
152 #define FLAGS_AES_DECRYPT BIT(0)
153 #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
154 #define FLAGS_AES_CBC _SBF(1, 0x01)
155 #define FLAGS_AES_CTR _SBF(1, 0x02)
156
157 #define AES_KEY_LEN 16
158 #define CRYPTO_QUEUE_LEN 1
159
160 /* HASH registers */
161 #define SSS_REG_HASH_CTRL 0x00
162
163 #define SSS_HASH_USER_IV_EN BIT(5)
164 #define SSS_HASH_INIT_BIT BIT(4)
165 #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
166 #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
167 #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
168
169 #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
170
171 #define SSS_REG_HASH_CTRL_PAUSE 0x04
172
173 #define SSS_HASH_PAUSE BIT(0)
174
175 #define SSS_REG_HASH_CTRL_FIFO 0x08
176
177 #define SSS_HASH_FIFO_MODE_DMA BIT(0)
178 #define SSS_HASH_FIFO_MODE_CPU 0
179
180 #define SSS_REG_HASH_CTRL_SWAP 0x0C
181
182 #define SSS_HASH_BYTESWAP_DI BIT(3)
183 #define SSS_HASH_BYTESWAP_DO BIT(2)
184 #define SSS_HASH_BYTESWAP_IV BIT(1)
185 #define SSS_HASH_BYTESWAP_KEY BIT(0)
186
187 #define SSS_REG_HASH_STATUS 0x10
188
189 #define SSS_HASH_STATUS_MSG_DONE BIT(6)
190 #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
191 #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
192
193 #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
194 #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
195
196 #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
197 #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
198
199 #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
200 #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
201
202 #define HASH_BLOCK_SIZE 64
203 #define HASH_REG_SIZEOF 4
204 #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
205 #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
206 #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
207
208 /*
209 * HASH bit numbers, used by device, setting in dev->hash_flags with
210 * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
211 * to keep HASH state BUSY or FREE, or to signal state from irq_handler
212 * to hash_tasklet. SGS keep track of allocated memory for scatterlist
213 */
214 #define HASH_FLAGS_BUSY 0
215 #define HASH_FLAGS_FINAL 1
216 #define HASH_FLAGS_DMA_ACTIVE 2
217 #define HASH_FLAGS_OUTPUT_READY 3
218 #define HASH_FLAGS_DMA_READY 4
219 #define HASH_FLAGS_SGS_COPIED 5
220 #define HASH_FLAGS_SGS_ALLOCED 6
221
222 /* HASH HW constants */
223 #define BUFLEN HASH_BLOCK_SIZE
224
225 #define SSS_HASH_QUEUE_LENGTH 10
226
227 /**
228 * struct samsung_aes_variant - platform specific SSS driver data
229 * @aes_offset: AES register offset from SSS module's base.
230 * @hash_offset: HASH register offset from SSS module's base.
231 * @clk_names: names of clocks needed to run SSS IP
232 *
233 * Specifies platform specific configuration of SSS module.
234 * Note: A structure for driver specific platform data is used for future
235 * expansion of its usage.
236 */
237 struct samsung_aes_variant {
238 unsigned int aes_offset;
239 unsigned int hash_offset;
240 const char *clk_names[2];
241 };
242
243 struct s5p_aes_reqctx {
244 unsigned long mode;
245 };
246
247 struct s5p_aes_ctx {
248 struct s5p_aes_dev *dev;
249
250 u8 aes_key[AES_MAX_KEY_SIZE];
251 u8 nonce[CTR_RFC3686_NONCE_SIZE];
252 int keylen;
253 };
254
255 /**
256 * struct s5p_aes_dev - Crypto device state container
257 * @dev: Associated device
258 * @clk: Clock for accessing hardware
259 * @pclk: APB bus clock necessary to access the hardware
260 * @ioaddr: Mapped IO memory region
261 * @aes_ioaddr: Per-varian offset for AES block IO memory
262 * @irq_fc: Feed control interrupt line
263 * @req: Crypto request currently handled by the device
264 * @ctx: Configuration for currently handled crypto request
265 * @sg_src: Scatter list with source data for currently handled block
266 * in device. This is DMA-mapped into device.
267 * @sg_dst: Scatter list with destination data for currently handled block
268 * in device. This is DMA-mapped into device.
269 * @sg_src_cpy: In case of unaligned access, copied scatter list
270 * with source data.
271 * @sg_dst_cpy: In case of unaligned access, copied scatter list
272 * with destination data.
273 * @tasklet: New request scheduling jib
274 * @queue: Crypto queue
275 * @busy: Indicates whether the device is currently handling some request
276 * thus it uses some of the fields from this state, like:
277 * req, ctx, sg_src/dst (and copies). This essentially
278 * protects against concurrent access to these fields.
279 * @lock: Lock for protecting both access to device hardware registers
280 * and fields related to current request (including the busy field).
281 * @res: Resources for hash.
282 * @io_hash_base: Per-variant offset for HASH block IO memory.
283 * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
284 * variable.
285 * @hash_flags: Flags for current HASH op.
286 * @hash_queue: Async hash queue.
287 * @hash_tasklet: New HASH request scheduling job.
288 * @xmit_buf: Buffer for current HASH request transfer into SSS block.
289 * @hash_req: Current request sending to SSS HASH block.
290 * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
291 * @hash_sg_cnt: Counter for hash_sg_iter.
292 *
293 * @use_hash: true if HASH algs enabled
294 */
295 struct s5p_aes_dev {
296 struct device *dev;
297 struct clk *clk;
298 struct clk *pclk;
299 void __iomem *ioaddr;
300 void __iomem *aes_ioaddr;
301 int irq_fc;
302
303 struct skcipher_request *req;
304 struct s5p_aes_ctx *ctx;
305 struct scatterlist *sg_src;
306 struct scatterlist *sg_dst;
307
308 struct scatterlist *sg_src_cpy;
309 struct scatterlist *sg_dst_cpy;
310
311 struct tasklet_struct tasklet;
312 struct crypto_queue queue;
313 bool busy;
314 spinlock_t lock;
315
316 struct resource *res;
317 void __iomem *io_hash_base;
318
319 spinlock_t hash_lock; /* protect hash_ vars */
320 unsigned long hash_flags;
321 struct crypto_queue hash_queue;
322 struct tasklet_struct hash_tasklet;
323
324 u8 xmit_buf[BUFLEN];
325 struct ahash_request *hash_req;
326 struct scatterlist *hash_sg_iter;
327 unsigned int hash_sg_cnt;
328
329 bool use_hash;
330 };
331
332 /**
333 * struct s5p_hash_reqctx - HASH request context
334 * @dd: Associated device
335 * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
336 * @digcnt: Number of bytes processed by HW (without buffer[] ones)
337 * @digest: Digest message or IV for partial result
338 * @nregs: Number of HW registers for digest or IV read/write
339 * @engine: Bits for selecting type of HASH in SSS block
340 * @sg: sg for DMA transfer
341 * @sg_len: Length of sg for DMA transfer
342 * @sgl: sg for joining buffer and req->src scatterlist
343 * @skip: Skip offset in req->src for current op
344 * @total: Total number of bytes for current request
345 * @finup: Keep state for finup or final.
346 * @error: Keep track of error.
347 * @bufcnt: Number of bytes holded in buffer[]
348 * @buffer: For byte(s) from end of req->src in UPDATE op
349 */
350 struct s5p_hash_reqctx {
351 struct s5p_aes_dev *dd;
352 bool op_update;
353
354 u64 digcnt;
355 u8 digest[SHA256_DIGEST_SIZE];
356
357 unsigned int nregs; /* digest_size / sizeof(reg) */
358 u32 engine;
359
360 struct scatterlist *sg;
361 unsigned int sg_len;
362 struct scatterlist sgl[2];
363 unsigned int skip;
364 unsigned int total;
365 bool finup;
366 bool error;
367
368 u32 bufcnt;
369 u8 buffer[];
370 };
371
372 /**
373 * struct s5p_hash_ctx - HASH transformation context
374 * @dd: Associated device
375 * @flags: Bits for algorithm HASH.
376 * @fallback: Software transformation for zero message or size < BUFLEN.
377 */
378 struct s5p_hash_ctx {
379 struct s5p_aes_dev *dd;
380 unsigned long flags;
381 struct crypto_shash *fallback;
382 };
383
384 static const struct samsung_aes_variant s5p_aes_data = {
385 .aes_offset = 0x4000,
386 .hash_offset = 0x6000,
387 .clk_names = { "secss", },
388 };
389
390 static const struct samsung_aes_variant exynos_aes_data = {
391 .aes_offset = 0x200,
392 .hash_offset = 0x400,
393 .clk_names = { "secss", },
394 };
395
396 static const struct samsung_aes_variant exynos5433_slim_aes_data = {
397 .aes_offset = 0x400,
398 .hash_offset = 0x800,
399 .clk_names = { "aclk", "pclk", },
400 };
401
402 static const struct of_device_id s5p_sss_dt_match[] = {
403 {
404 .compatible = "samsung,s5pv210-secss",
405 .data = &s5p_aes_data,
406 },
407 {
408 .compatible = "samsung,exynos4210-secss",
409 .data = &exynos_aes_data,
410 },
411 {
412 .compatible = "samsung,exynos5433-slim-sss",
413 .data = &exynos5433_slim_aes_data,
414 },
415 { },
416 };
417 MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
418
find_s5p_sss_version(const struct platform_device * pdev)419 static inline const struct samsung_aes_variant *find_s5p_sss_version
420 (const struct platform_device *pdev)
421 {
422 if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
423 return of_device_get_match_data(&pdev->dev);
424
425 return (const struct samsung_aes_variant *)
426 platform_get_device_id(pdev)->driver_data;
427 }
428
429 static struct s5p_aes_dev *s5p_dev;
430
s5p_set_dma_indata(struct s5p_aes_dev * dev,const struct scatterlist * sg)431 static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
432 const struct scatterlist *sg)
433 {
434 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
435 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
436 }
437
s5p_set_dma_outdata(struct s5p_aes_dev * dev,const struct scatterlist * sg)438 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
439 const struct scatterlist *sg)
440 {
441 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
442 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
443 }
444
s5p_free_sg_cpy(struct s5p_aes_dev * dev,struct scatterlist ** sg)445 static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
446 {
447 int len;
448
449 if (!*sg)
450 return;
451
452 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
453 free_pages((unsigned long)sg_virt(*sg), get_order(len));
454
455 kfree(*sg);
456 *sg = NULL;
457 }
458
s5p_sg_done(struct s5p_aes_dev * dev)459 static void s5p_sg_done(struct s5p_aes_dev *dev)
460 {
461 struct skcipher_request *req = dev->req;
462 struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
463
464 if (dev->sg_dst_cpy) {
465 dev_dbg(dev->dev,
466 "Copying %d bytes of output data back to original place\n",
467 dev->req->cryptlen);
468 memcpy_to_sglist(dev->req->dst, 0, sg_virt(dev->sg_dst_cpy),
469 dev->req->cryptlen);
470 }
471 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
472 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
473 if (reqctx->mode & FLAGS_AES_CBC)
474 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
475
476 else if (reqctx->mode & FLAGS_AES_CTR)
477 memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
478 }
479
480 /* Calls the completion. Cannot be called with dev->lock hold. */
s5p_aes_complete(struct skcipher_request * req,int err)481 static void s5p_aes_complete(struct skcipher_request *req, int err)
482 {
483 skcipher_request_complete(req, err);
484 }
485
s5p_unset_outdata(struct s5p_aes_dev * dev)486 static void s5p_unset_outdata(struct s5p_aes_dev *dev)
487 {
488 dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
489 }
490
s5p_unset_indata(struct s5p_aes_dev * dev)491 static void s5p_unset_indata(struct s5p_aes_dev *dev)
492 {
493 dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
494 }
495
s5p_make_sg_cpy(struct s5p_aes_dev * dev,struct scatterlist * src,struct scatterlist ** dst)496 static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
497 struct scatterlist **dst)
498 {
499 void *pages;
500 int len;
501
502 *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
503 if (!*dst)
504 return -ENOMEM;
505
506 len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
507 pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
508 if (!pages) {
509 kfree(*dst);
510 *dst = NULL;
511 return -ENOMEM;
512 }
513
514 memcpy_from_sglist(pages, src, 0, dev->req->cryptlen);
515
516 sg_init_table(*dst, 1);
517 sg_set_buf(*dst, pages, len);
518
519 return 0;
520 }
521
s5p_set_outdata(struct s5p_aes_dev * dev,struct scatterlist * sg)522 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
523 {
524 if (!sg->length)
525 return -EINVAL;
526
527 if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
528 return -ENOMEM;
529
530 dev->sg_dst = sg;
531
532 return 0;
533 }
534
s5p_set_indata(struct s5p_aes_dev * dev,struct scatterlist * sg)535 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
536 {
537 if (!sg->length)
538 return -EINVAL;
539
540 if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
541 return -ENOMEM;
542
543 dev->sg_src = sg;
544
545 return 0;
546 }
547
548 /*
549 * Returns -ERRNO on error (mapping of new data failed).
550 * On success returns:
551 * - 0 if there is no more data,
552 * - 1 if new transmitting (output) data is ready and its address+length
553 * have to be written to device (by calling s5p_set_dma_outdata()).
554 */
s5p_aes_tx(struct s5p_aes_dev * dev)555 static int s5p_aes_tx(struct s5p_aes_dev *dev)
556 {
557 int ret = 0;
558
559 s5p_unset_outdata(dev);
560
561 if (!sg_is_last(dev->sg_dst)) {
562 ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
563 if (!ret)
564 ret = 1;
565 }
566
567 return ret;
568 }
569
570 /*
571 * Returns -ERRNO on error (mapping of new data failed).
572 * On success returns:
573 * - 0 if there is no more data,
574 * - 1 if new receiving (input) data is ready and its address+length
575 * have to be written to device (by calling s5p_set_dma_indata()).
576 */
s5p_aes_rx(struct s5p_aes_dev * dev)577 static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
578 {
579 int ret = 0;
580
581 s5p_unset_indata(dev);
582
583 if (!sg_is_last(dev->sg_src)) {
584 ret = s5p_set_indata(dev, sg_next(dev->sg_src));
585 if (!ret)
586 ret = 1;
587 }
588
589 return ret;
590 }
591
s5p_hash_read(struct s5p_aes_dev * dd,u32 offset)592 static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
593 {
594 return __raw_readl(dd->io_hash_base + offset);
595 }
596
s5p_hash_write(struct s5p_aes_dev * dd,u32 offset,u32 value)597 static inline void s5p_hash_write(struct s5p_aes_dev *dd,
598 u32 offset, u32 value)
599 {
600 __raw_writel(value, dd->io_hash_base + offset);
601 }
602
603 /**
604 * s5p_set_dma_hashdata() - start DMA with sg
605 * @dev: device
606 * @sg: scatterlist ready to DMA transmit
607 */
s5p_set_dma_hashdata(struct s5p_aes_dev * dev,const struct scatterlist * sg)608 static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
609 const struct scatterlist *sg)
610 {
611 dev->hash_sg_cnt--;
612 SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
613 SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
614 }
615
616 /**
617 * s5p_hash_rx() - get next hash_sg_iter
618 * @dev: device
619 *
620 * Return:
621 * 2 if there is no more data and it is UPDATE op
622 * 1 if new receiving (input) data is ready and can be written to device
623 * 0 if there is no more data and it is FINAL op
624 */
s5p_hash_rx(struct s5p_aes_dev * dev)625 static int s5p_hash_rx(struct s5p_aes_dev *dev)
626 {
627 if (dev->hash_sg_cnt > 0) {
628 dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
629 return 1;
630 }
631
632 set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
633 if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
634 return 0;
635
636 return 2;
637 }
638
s5p_aes_interrupt(int irq,void * dev_id)639 static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
640 {
641 struct platform_device *pdev = dev_id;
642 struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
643 struct skcipher_request *req;
644 int err_dma_tx = 0;
645 int err_dma_rx = 0;
646 int err_dma_hx = 0;
647 bool tx_end = false;
648 bool hx_end = false;
649 unsigned long flags;
650 u32 status, st_bits;
651 int err;
652
653 spin_lock_irqsave(&dev->lock, flags);
654
655 /*
656 * Handle rx or tx interrupt. If there is still data (scatterlist did not
657 * reach end), then map next scatterlist entry.
658 * In case of such mapping error, s5p_aes_complete() should be called.
659 *
660 * If there is no more data in tx scatter list, call s5p_aes_complete()
661 * and schedule new tasklet.
662 *
663 * Handle hx interrupt. If there is still data map next entry.
664 */
665 status = SSS_READ(dev, FCINTSTAT);
666 if (status & SSS_FCINTSTAT_BRDMAINT)
667 err_dma_rx = s5p_aes_rx(dev);
668
669 if (status & SSS_FCINTSTAT_BTDMAINT) {
670 if (sg_is_last(dev->sg_dst))
671 tx_end = true;
672 err_dma_tx = s5p_aes_tx(dev);
673 }
674
675 if (status & SSS_FCINTSTAT_HRDMAINT)
676 err_dma_hx = s5p_hash_rx(dev);
677
678 st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
679 SSS_FCINTSTAT_HRDMAINT);
680 /* clear DMA bits */
681 SSS_WRITE(dev, FCINTPEND, st_bits);
682
683 /* clear HASH irq bits */
684 if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
685 /* cannot have both HPART and HDONE */
686 if (status & SSS_FCINTSTAT_HPARTINT)
687 st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
688
689 if (status & SSS_FCINTSTAT_HDONEINT)
690 st_bits = SSS_HASH_STATUS_MSG_DONE;
691
692 set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
693 s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
694 hx_end = true;
695 /* when DONE or PART, do not handle HASH DMA */
696 err_dma_hx = 0;
697 }
698
699 if (err_dma_rx < 0) {
700 err = err_dma_rx;
701 goto error;
702 }
703 if (err_dma_tx < 0) {
704 err = err_dma_tx;
705 goto error;
706 }
707
708 if (tx_end) {
709 s5p_sg_done(dev);
710 if (err_dma_hx == 1)
711 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
712
713 spin_unlock_irqrestore(&dev->lock, flags);
714
715 s5p_aes_complete(dev->req, 0);
716 /* Device is still busy */
717 tasklet_schedule(&dev->tasklet);
718 } else {
719 /*
720 * Writing length of DMA block (either receiving or
721 * transmitting) will start the operation immediately, so this
722 * should be done at the end (even after clearing pending
723 * interrupts to not miss the interrupt).
724 */
725 if (err_dma_tx == 1)
726 s5p_set_dma_outdata(dev, dev->sg_dst);
727 if (err_dma_rx == 1)
728 s5p_set_dma_indata(dev, dev->sg_src);
729 if (err_dma_hx == 1)
730 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
731
732 spin_unlock_irqrestore(&dev->lock, flags);
733 }
734
735 goto hash_irq_end;
736
737 error:
738 s5p_sg_done(dev);
739 dev->busy = false;
740 req = dev->req;
741 if (err_dma_hx == 1)
742 s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
743
744 spin_unlock_irqrestore(&dev->lock, flags);
745 s5p_aes_complete(req, err);
746
747 hash_irq_end:
748 /*
749 * Note about else if:
750 * when hash_sg_iter reaches end and its UPDATE op,
751 * issue SSS_HASH_PAUSE and wait for HPART irq
752 */
753 if (hx_end)
754 tasklet_schedule(&dev->hash_tasklet);
755 else if (err_dma_hx == 2)
756 s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
757 SSS_HASH_PAUSE);
758
759 return IRQ_HANDLED;
760 }
761
762 /**
763 * s5p_hash_read_msg() - read message or IV from HW
764 * @req: AHASH request
765 */
s5p_hash_read_msg(struct ahash_request * req)766 static void s5p_hash_read_msg(struct ahash_request *req)
767 {
768 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
769 struct s5p_aes_dev *dd = ctx->dd;
770 u32 *hash = (u32 *)ctx->digest;
771 unsigned int i;
772
773 for (i = 0; i < ctx->nregs; i++)
774 hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
775 }
776
777 /**
778 * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
779 * @dd: device
780 * @ctx: request context
781 */
s5p_hash_write_ctx_iv(struct s5p_aes_dev * dd,const struct s5p_hash_reqctx * ctx)782 static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
783 const struct s5p_hash_reqctx *ctx)
784 {
785 const u32 *hash = (const u32 *)ctx->digest;
786 unsigned int i;
787
788 for (i = 0; i < ctx->nregs; i++)
789 s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
790 }
791
792 /**
793 * s5p_hash_write_iv() - write IV for next partial/finup op.
794 * @req: AHASH request
795 */
s5p_hash_write_iv(struct ahash_request * req)796 static void s5p_hash_write_iv(struct ahash_request *req)
797 {
798 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
799
800 s5p_hash_write_ctx_iv(ctx->dd, ctx);
801 }
802
803 /**
804 * s5p_hash_copy_result() - copy digest into req->result
805 * @req: AHASH request
806 */
s5p_hash_copy_result(struct ahash_request * req)807 static void s5p_hash_copy_result(struct ahash_request *req)
808 {
809 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
810
811 if (!req->result)
812 return;
813
814 memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
815 }
816
817 /**
818 * s5p_hash_dma_flush() - flush HASH DMA
819 * @dev: secss device
820 */
s5p_hash_dma_flush(struct s5p_aes_dev * dev)821 static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
822 {
823 SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
824 }
825
826 /**
827 * s5p_hash_dma_enable() - enable DMA mode for HASH
828 * @dev: secss device
829 *
830 * enable DMA mode for HASH
831 */
s5p_hash_dma_enable(struct s5p_aes_dev * dev)832 static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
833 {
834 s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
835 }
836
837 /**
838 * s5p_hash_irq_disable() - disable irq HASH signals
839 * @dev: secss device
840 * @flags: bitfield with irq's to be disabled
841 */
s5p_hash_irq_disable(struct s5p_aes_dev * dev,u32 flags)842 static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
843 {
844 SSS_WRITE(dev, FCINTENCLR, flags);
845 }
846
847 /**
848 * s5p_hash_irq_enable() - enable irq signals
849 * @dev: secss device
850 * @flags: bitfield with irq's to be enabled
851 */
s5p_hash_irq_enable(struct s5p_aes_dev * dev,int flags)852 static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
853 {
854 SSS_WRITE(dev, FCINTENSET, flags);
855 }
856
857 /**
858 * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
859 * @dev: secss device
860 * @hashflow: HASH stream flow with/without crypto AES/DES
861 */
s5p_hash_set_flow(struct s5p_aes_dev * dev,u32 hashflow)862 static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
863 {
864 unsigned long flags;
865 u32 flow;
866
867 spin_lock_irqsave(&dev->lock, flags);
868
869 flow = SSS_READ(dev, FCFIFOCTRL);
870 flow &= ~SSS_HASHIN_MASK;
871 flow |= hashflow;
872 SSS_WRITE(dev, FCFIFOCTRL, flow);
873
874 spin_unlock_irqrestore(&dev->lock, flags);
875 }
876
877 /**
878 * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
879 * @dev: secss device
880 * @hashflow: HASH stream flow with/without AES/DES
881 *
882 * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
883 * enable HASH irq's HRDMA, HDONE, HPART
884 */
s5p_ahash_dma_init(struct s5p_aes_dev * dev,u32 hashflow)885 static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
886 {
887 s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
888 SSS_FCINTENCLR_HDONEINTENCLR |
889 SSS_FCINTENCLR_HPARTINTENCLR);
890 s5p_hash_dma_flush(dev);
891
892 s5p_hash_dma_enable(dev);
893 s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
894 s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
895 SSS_FCINTENSET_HDONEINTENSET |
896 SSS_FCINTENSET_HPARTINTENSET);
897 }
898
899 /**
900 * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
901 * @dd: secss device
902 * @length: length for request
903 * @final: true if final op
904 *
905 * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
906 * after previous updates, fill up IV words. For final, calculate and set
907 * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
908 * length as 2^63 so it will be never reached and set to zero prelow and
909 * prehigh.
910 *
911 * This function does not start DMA transfer.
912 */
s5p_hash_write_ctrl(struct s5p_aes_dev * dd,size_t length,bool final)913 static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
914 bool final)
915 {
916 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
917 u32 prelow, prehigh, low, high;
918 u32 configflags, swapflags;
919 u64 tmplen;
920
921 configflags = ctx->engine | SSS_HASH_INIT_BIT;
922
923 if (likely(ctx->digcnt)) {
924 s5p_hash_write_ctx_iv(dd, ctx);
925 configflags |= SSS_HASH_USER_IV_EN;
926 }
927
928 if (final) {
929 /* number of bytes for last part */
930 low = length;
931 high = 0;
932 /* total number of bits prev hashed */
933 tmplen = ctx->digcnt * 8;
934 prelow = (u32)tmplen;
935 prehigh = (u32)(tmplen >> 32);
936 } else {
937 prelow = 0;
938 prehigh = 0;
939 low = 0;
940 high = BIT(31);
941 }
942
943 swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
944 SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
945
946 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
947 s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
948 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
949 s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
950
951 s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
952 s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
953 }
954
955 /**
956 * s5p_hash_xmit_dma() - start DMA hash processing
957 * @dd: secss device
958 * @length: length for request
959 * @final: true if final op
960 *
961 * Update digcnt here, as it is needed for finup/final op.
962 */
s5p_hash_xmit_dma(struct s5p_aes_dev * dd,size_t length,bool final)963 static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
964 bool final)
965 {
966 struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
967 unsigned int cnt;
968
969 cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
970 if (!cnt) {
971 dev_err(dd->dev, "dma_map_sg error\n");
972 ctx->error = true;
973 return -EINVAL;
974 }
975
976 set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
977 dd->hash_sg_iter = ctx->sg;
978 dd->hash_sg_cnt = cnt;
979 s5p_hash_write_ctrl(dd, length, final);
980 ctx->digcnt += length;
981 ctx->total -= length;
982
983 /* catch last interrupt */
984 if (final)
985 set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
986
987 s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
988
989 return -EINPROGRESS;
990 }
991
992 /**
993 * s5p_hash_copy_sgs() - copy request's bytes into new buffer
994 * @ctx: request context
995 * @sg: source scatterlist request
996 * @new_len: number of bytes to process from sg
997 *
998 * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
999 * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
1000 * with allocated buffer.
1001 *
1002 * Set bit in dd->hash_flag so we can free it after irq ends processing.
1003 */
s5p_hash_copy_sgs(struct s5p_hash_reqctx * ctx,struct scatterlist * sg,unsigned int new_len)1004 static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1005 struct scatterlist *sg, unsigned int new_len)
1006 {
1007 unsigned int pages, len;
1008 void *buf;
1009
1010 len = new_len + ctx->bufcnt;
1011 pages = get_order(len);
1012
1013 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1014 if (!buf) {
1015 dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1016 ctx->error = true;
1017 return -ENOMEM;
1018 }
1019
1020 if (ctx->bufcnt)
1021 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1022
1023 memcpy_from_sglist(buf + ctx->bufcnt, sg, ctx->skip, new_len);
1024 sg_init_table(ctx->sgl, 1);
1025 sg_set_buf(ctx->sgl, buf, len);
1026 ctx->sg = ctx->sgl;
1027 ctx->sg_len = 1;
1028 ctx->bufcnt = 0;
1029 ctx->skip = 0;
1030 set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1031
1032 return 0;
1033 }
1034
1035 /**
1036 * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
1037 * @ctx: request context
1038 * @sg: source scatterlist request
1039 * @new_len: number of bytes to process from sg
1040 *
1041 * Allocate new scatterlist table, copy data for HASH into it. If there was
1042 * xmit_buf filled, prepare it first, then copy page, length and offset from
1043 * source sg into it, adjusting begin and/or end for skip offset and
1044 * hash_later value.
1045 *
1046 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1047 * it after irq ends processing.
1048 */
s5p_hash_copy_sg_lists(struct s5p_hash_reqctx * ctx,struct scatterlist * sg,unsigned int new_len)1049 static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1050 struct scatterlist *sg, unsigned int new_len)
1051 {
1052 unsigned int skip = ctx->skip, n = sg_nents(sg);
1053 struct scatterlist *tmp;
1054 unsigned int len;
1055
1056 if (ctx->bufcnt)
1057 n++;
1058
1059 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1060 if (!ctx->sg) {
1061 ctx->error = true;
1062 return -ENOMEM;
1063 }
1064
1065 sg_init_table(ctx->sg, n);
1066
1067 tmp = ctx->sg;
1068
1069 ctx->sg_len = 0;
1070
1071 if (ctx->bufcnt) {
1072 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1073 tmp = sg_next(tmp);
1074 ctx->sg_len++;
1075 }
1076
1077 while (sg && skip >= sg->length) {
1078 skip -= sg->length;
1079 sg = sg_next(sg);
1080 }
1081
1082 while (sg && new_len) {
1083 len = sg->length - skip;
1084 if (new_len < len)
1085 len = new_len;
1086
1087 new_len -= len;
1088 sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1089 skip = 0;
1090 if (new_len <= 0)
1091 sg_mark_end(tmp);
1092
1093 tmp = sg_next(tmp);
1094 ctx->sg_len++;
1095 sg = sg_next(sg);
1096 }
1097
1098 set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1099
1100 return 0;
1101 }
1102
1103 /**
1104 * s5p_hash_prepare_sgs() - prepare sg for processing
1105 * @ctx: request context
1106 * @sg: source scatterlist request
1107 * @new_len: number of bytes to process from sg
1108 * @final: final flag
1109 *
1110 * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
1111 * sg table have good aligned elements (list_ok). If one of this checks fails,
1112 * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
1113 * data into this buffer and prepare request in sgl, or (2) allocates new sg
1114 * table and prepare sg elements.
1115 *
1116 * For digest or finup all conditions can be good, and we may not need any
1117 * fixes.
1118 */
s5p_hash_prepare_sgs(struct s5p_hash_reqctx * ctx,struct scatterlist * sg,unsigned int new_len,bool final)1119 static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1120 struct scatterlist *sg,
1121 unsigned int new_len, bool final)
1122 {
1123 unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1124 bool aligned = true, list_ok = true;
1125 struct scatterlist *sg_tmp = sg;
1126
1127 if (!sg || !sg->length || !new_len)
1128 return 0;
1129
1130 if (skip || !final)
1131 list_ok = false;
1132
1133 while (nbytes > 0 && sg_tmp) {
1134 n++;
1135 if (skip >= sg_tmp->length) {
1136 skip -= sg_tmp->length;
1137 if (!sg_tmp->length) {
1138 aligned = false;
1139 break;
1140 }
1141 } else {
1142 if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1143 aligned = false;
1144 break;
1145 }
1146
1147 if (nbytes < sg_tmp->length - skip) {
1148 list_ok = false;
1149 break;
1150 }
1151
1152 nbytes -= sg_tmp->length - skip;
1153 skip = 0;
1154 }
1155
1156 sg_tmp = sg_next(sg_tmp);
1157 }
1158
1159 if (!aligned)
1160 return s5p_hash_copy_sgs(ctx, sg, new_len);
1161 else if (!list_ok)
1162 return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1163
1164 /*
1165 * Have aligned data from previous operation and/or current
1166 * Note: will enter here only if (digest or finup) and aligned
1167 */
1168 if (ctx->bufcnt) {
1169 ctx->sg_len = n;
1170 sg_init_table(ctx->sgl, 2);
1171 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1172 sg_chain(ctx->sgl, 2, sg);
1173 ctx->sg = ctx->sgl;
1174 ctx->sg_len++;
1175 } else {
1176 ctx->sg = sg;
1177 ctx->sg_len = n;
1178 }
1179
1180 return 0;
1181 }
1182
1183 /**
1184 * s5p_hash_prepare_request() - prepare request for processing
1185 * @req: AHASH request
1186 * @update: true if UPDATE op
1187 *
1188 * Note 1: we can have update flag _and_ final flag at the same time.
1189 * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
1190 * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1191 * we have final op
1192 */
s5p_hash_prepare_request(struct ahash_request * req,bool update)1193 static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1194 {
1195 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1196 bool final = ctx->finup;
1197 int xmit_len, hash_later, nbytes;
1198 int ret;
1199
1200 if (update)
1201 nbytes = req->nbytes;
1202 else
1203 nbytes = 0;
1204
1205 ctx->total = nbytes + ctx->bufcnt;
1206 if (!ctx->total)
1207 return 0;
1208
1209 if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1210 /* bytes left from previous request, so fill up to BUFLEN */
1211 int len = BUFLEN - ctx->bufcnt % BUFLEN;
1212
1213 if (len > nbytes)
1214 len = nbytes;
1215
1216 memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, 0, len);
1217 ctx->bufcnt += len;
1218 nbytes -= len;
1219 ctx->skip = len;
1220 } else {
1221 ctx->skip = 0;
1222 }
1223
1224 if (ctx->bufcnt)
1225 memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1226
1227 xmit_len = ctx->total;
1228 if (final) {
1229 hash_later = 0;
1230 } else {
1231 if (IS_ALIGNED(xmit_len, BUFLEN))
1232 xmit_len -= BUFLEN;
1233 else
1234 xmit_len -= xmit_len & (BUFLEN - 1);
1235
1236 hash_later = ctx->total - xmit_len;
1237 /* copy hash_later bytes from end of req->src */
1238 /* previous bytes are in xmit_buf, so no overwrite */
1239 memcpy_from_sglist(ctx->buffer, req->src,
1240 req->nbytes - hash_later, hash_later);
1241 }
1242
1243 if (xmit_len > BUFLEN) {
1244 ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1245 final);
1246 if (ret)
1247 return ret;
1248 } else {
1249 /* have buffered data only */
1250 if (unlikely(!ctx->bufcnt)) {
1251 /* first update didn't fill up buffer */
1252 memcpy_from_sglist(ctx->dd->xmit_buf, req->src,
1253 0, xmit_len);
1254 }
1255
1256 sg_init_table(ctx->sgl, 1);
1257 sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1258
1259 ctx->sg = ctx->sgl;
1260 ctx->sg_len = 1;
1261 }
1262
1263 ctx->bufcnt = hash_later;
1264 if (!final)
1265 ctx->total = xmit_len;
1266
1267 return 0;
1268 }
1269
1270 /**
1271 * s5p_hash_update_dma_stop() - unmap DMA
1272 * @dd: secss device
1273 *
1274 * Unmap scatterlist ctx->sg.
1275 */
s5p_hash_update_dma_stop(struct s5p_aes_dev * dd)1276 static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1277 {
1278 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1279
1280 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1281 clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1282 }
1283
1284 /**
1285 * s5p_hash_finish() - copy calculated digest to crypto layer
1286 * @req: AHASH request
1287 */
s5p_hash_finish(struct ahash_request * req)1288 static void s5p_hash_finish(struct ahash_request *req)
1289 {
1290 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1291 struct s5p_aes_dev *dd = ctx->dd;
1292
1293 if (ctx->digcnt)
1294 s5p_hash_copy_result(req);
1295
1296 dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1297 }
1298
1299 /**
1300 * s5p_hash_finish_req() - finish request
1301 * @req: AHASH request
1302 * @err: error
1303 */
s5p_hash_finish_req(struct ahash_request * req,int err)1304 static void s5p_hash_finish_req(struct ahash_request *req, int err)
1305 {
1306 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1307 struct s5p_aes_dev *dd = ctx->dd;
1308 unsigned long flags;
1309
1310 if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1311 free_pages((unsigned long)sg_virt(ctx->sg),
1312 get_order(ctx->sg->length));
1313
1314 if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1315 kfree(ctx->sg);
1316
1317 ctx->sg = NULL;
1318 dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1319 BIT(HASH_FLAGS_SGS_COPIED));
1320
1321 if (!err && !ctx->error) {
1322 s5p_hash_read_msg(req);
1323 if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1324 s5p_hash_finish(req);
1325 } else {
1326 ctx->error = true;
1327 }
1328
1329 spin_lock_irqsave(&dd->hash_lock, flags);
1330 dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1331 BIT(HASH_FLAGS_DMA_READY) |
1332 BIT(HASH_FLAGS_OUTPUT_READY));
1333 spin_unlock_irqrestore(&dd->hash_lock, flags);
1334
1335 if (req->base.complete)
1336 ahash_request_complete(req, err);
1337 }
1338
1339 /**
1340 * s5p_hash_handle_queue() - handle hash queue
1341 * @dd: device s5p_aes_dev
1342 * @req: AHASH request
1343 *
1344 * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
1345 * device then processes the first request from the dd->queue
1346 *
1347 * Returns: see s5p_hash_final below.
1348 */
s5p_hash_handle_queue(struct s5p_aes_dev * dd,struct ahash_request * req)1349 static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1350 struct ahash_request *req)
1351 {
1352 struct crypto_async_request *async_req, *backlog;
1353 struct s5p_hash_reqctx *ctx;
1354 unsigned long flags;
1355 int err = 0, ret = 0;
1356
1357 retry:
1358 spin_lock_irqsave(&dd->hash_lock, flags);
1359 if (req)
1360 ret = ahash_enqueue_request(&dd->hash_queue, req);
1361
1362 if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1363 spin_unlock_irqrestore(&dd->hash_lock, flags);
1364 return ret;
1365 }
1366
1367 backlog = crypto_get_backlog(&dd->hash_queue);
1368 async_req = crypto_dequeue_request(&dd->hash_queue);
1369 if (async_req)
1370 set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1371
1372 spin_unlock_irqrestore(&dd->hash_lock, flags);
1373
1374 if (!async_req)
1375 return ret;
1376
1377 if (backlog)
1378 crypto_request_complete(backlog, -EINPROGRESS);
1379
1380 req = ahash_request_cast(async_req);
1381 dd->hash_req = req;
1382 ctx = ahash_request_ctx(req);
1383
1384 err = s5p_hash_prepare_request(req, ctx->op_update);
1385 if (err || !ctx->total)
1386 goto out;
1387
1388 dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1389 ctx->op_update, req->nbytes);
1390
1391 s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1392 if (ctx->digcnt)
1393 s5p_hash_write_iv(req); /* restore hash IV */
1394
1395 if (ctx->op_update) { /* HASH_OP_UPDATE */
1396 err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1397 if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1398 /* no final() after finup() */
1399 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1400 } else { /* HASH_OP_FINAL */
1401 err = s5p_hash_xmit_dma(dd, ctx->total, true);
1402 }
1403 out:
1404 if (err != -EINPROGRESS) {
1405 /* hash_tasklet_cb will not finish it, so do it here */
1406 s5p_hash_finish_req(req, err);
1407 req = NULL;
1408
1409 /*
1410 * Execute next request immediately if there is anything
1411 * in queue.
1412 */
1413 goto retry;
1414 }
1415
1416 return ret;
1417 }
1418
1419 /**
1420 * s5p_hash_tasklet_cb() - hash tasklet
1421 * @data: ptr to s5p_aes_dev
1422 */
s5p_hash_tasklet_cb(unsigned long data)1423 static void s5p_hash_tasklet_cb(unsigned long data)
1424 {
1425 struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1426
1427 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1428 s5p_hash_handle_queue(dd, NULL);
1429 return;
1430 }
1431
1432 if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1433 if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1434 &dd->hash_flags)) {
1435 s5p_hash_update_dma_stop(dd);
1436 }
1437
1438 if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1439 &dd->hash_flags)) {
1440 /* hash or semi-hash ready */
1441 clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
1442 goto finish;
1443 }
1444 }
1445
1446 return;
1447
1448 finish:
1449 /* finish curent request */
1450 s5p_hash_finish_req(dd->hash_req, 0);
1451
1452 /* If we are not busy, process next req */
1453 if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1454 s5p_hash_handle_queue(dd, NULL);
1455 }
1456
1457 /**
1458 * s5p_hash_enqueue() - enqueue request
1459 * @req: AHASH request
1460 * @op: operation UPDATE (true) or FINAL (false)
1461 *
1462 * Returns: see s5p_hash_final below.
1463 */
s5p_hash_enqueue(struct ahash_request * req,bool op)1464 static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1465 {
1466 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1467 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1468
1469 ctx->op_update = op;
1470
1471 return s5p_hash_handle_queue(tctx->dd, req);
1472 }
1473
1474 /**
1475 * s5p_hash_update() - process the hash input data
1476 * @req: AHASH request
1477 *
1478 * If request will fit in buffer, copy it and return immediately
1479 * else enqueue it with OP_UPDATE.
1480 *
1481 * Returns: see s5p_hash_final below.
1482 */
s5p_hash_update(struct ahash_request * req)1483 static int s5p_hash_update(struct ahash_request *req)
1484 {
1485 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1486
1487 if (!req->nbytes)
1488 return 0;
1489
1490 if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1491 memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src,
1492 0, req->nbytes);
1493 ctx->bufcnt += req->nbytes;
1494 return 0;
1495 }
1496
1497 return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
1498 }
1499
1500 /**
1501 * s5p_hash_final() - close up hash and calculate digest
1502 * @req: AHASH request
1503 *
1504 * Note: in final req->src do not have any data, and req->nbytes can be
1505 * non-zero.
1506 *
1507 * If there were no input data processed yet and the buffered hash data is
1508 * less than BUFLEN (64) then calculate the final hash immediately by using
1509 * SW algorithm fallback.
1510 *
1511 * Otherwise enqueues the current AHASH request with OP_FINAL operation op
1512 * and finalize hash message in HW. Note that if digcnt!=0 then there were
1513 * previous update op, so there are always some buffered bytes in ctx->buffer,
1514 * which means that ctx->bufcnt!=0
1515 *
1516 * Returns:
1517 * 0 if the request has been processed immediately,
1518 * -EINPROGRESS if the operation has been queued for later execution or is set
1519 * to processing by HW,
1520 * -EBUSY if queue is full and request should be resubmitted later,
1521 * other negative values denotes an error.
1522 */
s5p_hash_final(struct ahash_request * req)1523 static int s5p_hash_final(struct ahash_request *req)
1524 {
1525 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1526
1527 ctx->finup = true;
1528 if (ctx->error)
1529 return -EINVAL; /* uncompleted hash is not needed */
1530
1531 if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
1532 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1533
1534 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
1535 ctx->bufcnt, req->result);
1536 }
1537
1538 return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
1539 }
1540
1541 /**
1542 * s5p_hash_finup() - process last req->src and calculate digest
1543 * @req: AHASH request containing the last update data
1544 *
1545 * Return values: see s5p_hash_final above.
1546 */
s5p_hash_finup(struct ahash_request * req)1547 static int s5p_hash_finup(struct ahash_request *req)
1548 {
1549 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1550 int err1, err2;
1551
1552 ctx->finup = true;
1553
1554 err1 = s5p_hash_update(req);
1555 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1556 return err1;
1557
1558 /*
1559 * final() has to be always called to cleanup resources even if
1560 * update() failed, except EINPROGRESS or calculate digest for small
1561 * size
1562 */
1563 err2 = s5p_hash_final(req);
1564
1565 return err1 ?: err2;
1566 }
1567
1568 /**
1569 * s5p_hash_init() - initialize AHASH request contex
1570 * @req: AHASH request
1571 *
1572 * Init async hash request context.
1573 */
s5p_hash_init(struct ahash_request * req)1574 static int s5p_hash_init(struct ahash_request *req)
1575 {
1576 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1577 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1578 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1579
1580 ctx->dd = tctx->dd;
1581 ctx->error = false;
1582 ctx->finup = false;
1583 ctx->bufcnt = 0;
1584 ctx->digcnt = 0;
1585 ctx->total = 0;
1586 ctx->skip = 0;
1587
1588 dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1589 crypto_ahash_digestsize(tfm));
1590
1591 switch (crypto_ahash_digestsize(tfm)) {
1592 case MD5_DIGEST_SIZE:
1593 ctx->engine = SSS_HASH_ENGINE_MD5;
1594 ctx->nregs = HASH_MD5_MAX_REG;
1595 break;
1596 case SHA1_DIGEST_SIZE:
1597 ctx->engine = SSS_HASH_ENGINE_SHA1;
1598 ctx->nregs = HASH_SHA1_MAX_REG;
1599 break;
1600 case SHA256_DIGEST_SIZE:
1601 ctx->engine = SSS_HASH_ENGINE_SHA256;
1602 ctx->nregs = HASH_SHA256_MAX_REG;
1603 break;
1604 default:
1605 ctx->error = true;
1606 return -EINVAL;
1607 }
1608
1609 return 0;
1610 }
1611
1612 /**
1613 * s5p_hash_digest - calculate digest from req->src
1614 * @req: AHASH request
1615 *
1616 * Return values: see s5p_hash_final above.
1617 */
s5p_hash_digest(struct ahash_request * req)1618 static int s5p_hash_digest(struct ahash_request *req)
1619 {
1620 return s5p_hash_init(req) ?: s5p_hash_finup(req);
1621 }
1622
1623 /**
1624 * s5p_hash_cra_init_alg - init crypto alg transformation
1625 * @tfm: crypto transformation
1626 */
s5p_hash_cra_init_alg(struct crypto_tfm * tfm)1627 static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1628 {
1629 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1630 const char *alg_name = crypto_tfm_alg_name(tfm);
1631
1632 tctx->dd = s5p_dev;
1633 /* Allocate a fallback and abort if it failed. */
1634 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1635 CRYPTO_ALG_NEED_FALLBACK);
1636 if (IS_ERR(tctx->fallback)) {
1637 pr_err("fallback alloc fails for '%s'\n", alg_name);
1638 return PTR_ERR(tctx->fallback);
1639 }
1640
1641 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1642 sizeof(struct s5p_hash_reqctx) + BUFLEN);
1643
1644 return 0;
1645 }
1646
1647 /**
1648 * s5p_hash_cra_init - init crypto tfm
1649 * @tfm: crypto transformation
1650 */
s5p_hash_cra_init(struct crypto_tfm * tfm)1651 static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1652 {
1653 return s5p_hash_cra_init_alg(tfm);
1654 }
1655
1656 /**
1657 * s5p_hash_cra_exit - exit crypto tfm
1658 * @tfm: crypto transformation
1659 *
1660 * free allocated fallback
1661 */
s5p_hash_cra_exit(struct crypto_tfm * tfm)1662 static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1663 {
1664 struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1665
1666 crypto_free_shash(tctx->fallback);
1667 tctx->fallback = NULL;
1668 }
1669
1670 /**
1671 * s5p_hash_export - export hash state
1672 * @req: AHASH request
1673 * @out: buffer for exported state
1674 */
s5p_hash_export(struct ahash_request * req,void * out)1675 static int s5p_hash_export(struct ahash_request *req, void *out)
1676 {
1677 const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1678
1679 memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1680
1681 return 0;
1682 }
1683
1684 /**
1685 * s5p_hash_import - import hash state
1686 * @req: AHASH request
1687 * @in: buffer with state to be imported from
1688 */
s5p_hash_import(struct ahash_request * req,const void * in)1689 static int s5p_hash_import(struct ahash_request *req, const void *in)
1690 {
1691 struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1692 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1693 struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1694 const struct s5p_hash_reqctx *ctx_in = in;
1695
1696 memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1697 if (ctx_in->bufcnt > BUFLEN) {
1698 ctx->error = true;
1699 return -EINVAL;
1700 }
1701
1702 ctx->dd = tctx->dd;
1703 ctx->error = false;
1704
1705 return 0;
1706 }
1707
1708 static struct ahash_alg algs_sha1_md5_sha256[] = {
1709 {
1710 .init = s5p_hash_init,
1711 .update = s5p_hash_update,
1712 .final = s5p_hash_final,
1713 .finup = s5p_hash_finup,
1714 .digest = s5p_hash_digest,
1715 .export = s5p_hash_export,
1716 .import = s5p_hash_import,
1717 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1718 .halg.digestsize = SHA1_DIGEST_SIZE,
1719 .halg.base = {
1720 .cra_name = "sha1",
1721 .cra_driver_name = "exynos-sha1",
1722 .cra_priority = 100,
1723 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1724 CRYPTO_ALG_ASYNC |
1725 CRYPTO_ALG_NEED_FALLBACK,
1726 .cra_blocksize = HASH_BLOCK_SIZE,
1727 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1728 .cra_module = THIS_MODULE,
1729 .cra_init = s5p_hash_cra_init,
1730 .cra_exit = s5p_hash_cra_exit,
1731 }
1732 },
1733 {
1734 .init = s5p_hash_init,
1735 .update = s5p_hash_update,
1736 .final = s5p_hash_final,
1737 .finup = s5p_hash_finup,
1738 .digest = s5p_hash_digest,
1739 .export = s5p_hash_export,
1740 .import = s5p_hash_import,
1741 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1742 .halg.digestsize = MD5_DIGEST_SIZE,
1743 .halg.base = {
1744 .cra_name = "md5",
1745 .cra_driver_name = "exynos-md5",
1746 .cra_priority = 100,
1747 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1748 CRYPTO_ALG_ASYNC |
1749 CRYPTO_ALG_NEED_FALLBACK,
1750 .cra_blocksize = HASH_BLOCK_SIZE,
1751 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1752 .cra_module = THIS_MODULE,
1753 .cra_init = s5p_hash_cra_init,
1754 .cra_exit = s5p_hash_cra_exit,
1755 }
1756 },
1757 {
1758 .init = s5p_hash_init,
1759 .update = s5p_hash_update,
1760 .final = s5p_hash_final,
1761 .finup = s5p_hash_finup,
1762 .digest = s5p_hash_digest,
1763 .export = s5p_hash_export,
1764 .import = s5p_hash_import,
1765 .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1766 .halg.digestsize = SHA256_DIGEST_SIZE,
1767 .halg.base = {
1768 .cra_name = "sha256",
1769 .cra_driver_name = "exynos-sha256",
1770 .cra_priority = 100,
1771 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1772 CRYPTO_ALG_ASYNC |
1773 CRYPTO_ALG_NEED_FALLBACK,
1774 .cra_blocksize = HASH_BLOCK_SIZE,
1775 .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1776 .cra_module = THIS_MODULE,
1777 .cra_init = s5p_hash_cra_init,
1778 .cra_exit = s5p_hash_cra_exit,
1779 }
1780 }
1781
1782 };
1783
s5p_set_aes(struct s5p_aes_dev * dev,const u8 * key,const u8 * iv,const u8 * ctr,unsigned int keylen)1784 static void s5p_set_aes(struct s5p_aes_dev *dev,
1785 const u8 *key, const u8 *iv, const u8 *ctr,
1786 unsigned int keylen)
1787 {
1788 void __iomem *keystart;
1789
1790 if (iv)
1791 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1792 AES_BLOCK_SIZE);
1793
1794 if (ctr)
1795 memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1796 AES_BLOCK_SIZE);
1797
1798 if (keylen == AES_KEYSIZE_256)
1799 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1800 else if (keylen == AES_KEYSIZE_192)
1801 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1802 else
1803 keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1804
1805 memcpy_toio(keystart, key, keylen);
1806 }
1807
s5p_is_sg_aligned(struct scatterlist * sg)1808 static bool s5p_is_sg_aligned(struct scatterlist *sg)
1809 {
1810 while (sg) {
1811 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
1812 return false;
1813 sg = sg_next(sg);
1814 }
1815
1816 return true;
1817 }
1818
s5p_set_indata_start(struct s5p_aes_dev * dev,struct skcipher_request * req)1819 static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1820 struct skcipher_request *req)
1821 {
1822 struct scatterlist *sg;
1823 int err;
1824
1825 dev->sg_src_cpy = NULL;
1826 sg = req->src;
1827 if (!s5p_is_sg_aligned(sg)) {
1828 dev_dbg(dev->dev,
1829 "At least one unaligned source scatter list, making a copy\n");
1830 err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1831 if (err)
1832 return err;
1833
1834 sg = dev->sg_src_cpy;
1835 }
1836
1837 err = s5p_set_indata(dev, sg);
1838 if (err) {
1839 s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1840 return err;
1841 }
1842
1843 return 0;
1844 }
1845
s5p_set_outdata_start(struct s5p_aes_dev * dev,struct skcipher_request * req)1846 static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1847 struct skcipher_request *req)
1848 {
1849 struct scatterlist *sg;
1850 int err;
1851
1852 dev->sg_dst_cpy = NULL;
1853 sg = req->dst;
1854 if (!s5p_is_sg_aligned(sg)) {
1855 dev_dbg(dev->dev,
1856 "At least one unaligned dest scatter list, making a copy\n");
1857 err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1858 if (err)
1859 return err;
1860
1861 sg = dev->sg_dst_cpy;
1862 }
1863
1864 err = s5p_set_outdata(dev, sg);
1865 if (err) {
1866 s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1867 return err;
1868 }
1869
1870 return 0;
1871 }
1872
s5p_aes_crypt_start(struct s5p_aes_dev * dev,unsigned long mode)1873 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1874 {
1875 struct skcipher_request *req = dev->req;
1876 u32 aes_control;
1877 unsigned long flags;
1878 int err;
1879 u8 *iv, *ctr;
1880
1881 /* This sets bit [13:12] to 00, which selects 128-bit counter */
1882 aes_control = SSS_AES_KEY_CHANGE_MODE;
1883 if (mode & FLAGS_AES_DECRYPT)
1884 aes_control |= SSS_AES_MODE_DECRYPT;
1885
1886 if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1887 aes_control |= SSS_AES_CHAIN_MODE_CBC;
1888 iv = req->iv;
1889 ctr = NULL;
1890 } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1891 aes_control |= SSS_AES_CHAIN_MODE_CTR;
1892 iv = NULL;
1893 ctr = req->iv;
1894 } else {
1895 iv = NULL; /* AES_ECB */
1896 ctr = NULL;
1897 }
1898
1899 if (dev->ctx->keylen == AES_KEYSIZE_192)
1900 aes_control |= SSS_AES_KEY_SIZE_192;
1901 else if (dev->ctx->keylen == AES_KEYSIZE_256)
1902 aes_control |= SSS_AES_KEY_SIZE_256;
1903
1904 aes_control |= SSS_AES_FIFO_MODE;
1905
1906 /* as a variant it is possible to use byte swapping on DMA side */
1907 aes_control |= SSS_AES_BYTESWAP_DI
1908 | SSS_AES_BYTESWAP_DO
1909 | SSS_AES_BYTESWAP_IV
1910 | SSS_AES_BYTESWAP_KEY
1911 | SSS_AES_BYTESWAP_CNT;
1912
1913 spin_lock_irqsave(&dev->lock, flags);
1914
1915 SSS_WRITE(dev, FCINTENCLR,
1916 SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1917 SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1918
1919 err = s5p_set_indata_start(dev, req);
1920 if (err)
1921 goto indata_error;
1922
1923 err = s5p_set_outdata_start(dev, req);
1924 if (err)
1925 goto outdata_error;
1926
1927 SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1928 s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1929
1930 s5p_set_dma_indata(dev, dev->sg_src);
1931 s5p_set_dma_outdata(dev, dev->sg_dst);
1932
1933 SSS_WRITE(dev, FCINTENSET,
1934 SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1935
1936 spin_unlock_irqrestore(&dev->lock, flags);
1937
1938 return;
1939
1940 outdata_error:
1941 s5p_unset_indata(dev);
1942
1943 indata_error:
1944 s5p_sg_done(dev);
1945 dev->busy = false;
1946 spin_unlock_irqrestore(&dev->lock, flags);
1947 s5p_aes_complete(req, err);
1948 }
1949
s5p_tasklet_cb(unsigned long data)1950 static void s5p_tasklet_cb(unsigned long data)
1951 {
1952 struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
1953 struct crypto_async_request *async_req, *backlog;
1954 struct s5p_aes_reqctx *reqctx;
1955 unsigned long flags;
1956
1957 spin_lock_irqsave(&dev->lock, flags);
1958 backlog = crypto_get_backlog(&dev->queue);
1959 async_req = crypto_dequeue_request(&dev->queue);
1960
1961 if (!async_req) {
1962 dev->busy = false;
1963 spin_unlock_irqrestore(&dev->lock, flags);
1964 return;
1965 }
1966 spin_unlock_irqrestore(&dev->lock, flags);
1967
1968 if (backlog)
1969 crypto_request_complete(backlog, -EINPROGRESS);
1970
1971 dev->req = skcipher_request_cast(async_req);
1972 dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
1973 reqctx = skcipher_request_ctx(dev->req);
1974
1975 s5p_aes_crypt_start(dev, reqctx->mode);
1976 }
1977
s5p_aes_handle_req(struct s5p_aes_dev * dev,struct skcipher_request * req)1978 static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
1979 struct skcipher_request *req)
1980 {
1981 unsigned long flags;
1982 int err;
1983
1984 spin_lock_irqsave(&dev->lock, flags);
1985 err = crypto_enqueue_request(&dev->queue, &req->base);
1986 if (dev->busy) {
1987 spin_unlock_irqrestore(&dev->lock, flags);
1988 return err;
1989 }
1990 dev->busy = true;
1991
1992 spin_unlock_irqrestore(&dev->lock, flags);
1993
1994 tasklet_schedule(&dev->tasklet);
1995
1996 return err;
1997 }
1998
s5p_aes_crypt(struct skcipher_request * req,unsigned long mode)1999 static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
2000 {
2001 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2002 struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
2003 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2004 struct s5p_aes_dev *dev = ctx->dev;
2005
2006 if (!req->cryptlen)
2007 return 0;
2008
2009 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
2010 ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2011 dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
2012 return -EINVAL;
2013 }
2014
2015 reqctx->mode = mode;
2016
2017 return s5p_aes_handle_req(dev, req);
2018 }
2019
s5p_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)2020 static int s5p_aes_setkey(struct crypto_skcipher *cipher,
2021 const u8 *key, unsigned int keylen)
2022 {
2023 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
2024 struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2025
2026 if (keylen != AES_KEYSIZE_128 &&
2027 keylen != AES_KEYSIZE_192 &&
2028 keylen != AES_KEYSIZE_256)
2029 return -EINVAL;
2030
2031 memcpy(ctx->aes_key, key, keylen);
2032 ctx->keylen = keylen;
2033
2034 return 0;
2035 }
2036
s5p_aes_ecb_encrypt(struct skcipher_request * req)2037 static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
2038 {
2039 return s5p_aes_crypt(req, 0);
2040 }
2041
s5p_aes_ecb_decrypt(struct skcipher_request * req)2042 static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
2043 {
2044 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2045 }
2046
s5p_aes_cbc_encrypt(struct skcipher_request * req)2047 static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
2048 {
2049 return s5p_aes_crypt(req, FLAGS_AES_CBC);
2050 }
2051
s5p_aes_cbc_decrypt(struct skcipher_request * req)2052 static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
2053 {
2054 return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2055 }
2056
s5p_aes_ctr_crypt(struct skcipher_request * req)2057 static int s5p_aes_ctr_crypt(struct skcipher_request *req)
2058 {
2059 return s5p_aes_crypt(req, FLAGS_AES_CTR);
2060 }
2061
s5p_aes_init_tfm(struct crypto_skcipher * tfm)2062 static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
2063 {
2064 struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2065
2066 ctx->dev = s5p_dev;
2067 crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
2068
2069 return 0;
2070 }
2071
2072 static struct skcipher_alg algs[] = {
2073 {
2074 .base.cra_name = "ecb(aes)",
2075 .base.cra_driver_name = "ecb-aes-s5p",
2076 .base.cra_priority = 100,
2077 .base.cra_flags = CRYPTO_ALG_ASYNC |
2078 CRYPTO_ALG_KERN_DRIVER_ONLY,
2079 .base.cra_blocksize = AES_BLOCK_SIZE,
2080 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2081 .base.cra_alignmask = 0x0f,
2082 .base.cra_module = THIS_MODULE,
2083
2084 .min_keysize = AES_MIN_KEY_SIZE,
2085 .max_keysize = AES_MAX_KEY_SIZE,
2086 .setkey = s5p_aes_setkey,
2087 .encrypt = s5p_aes_ecb_encrypt,
2088 .decrypt = s5p_aes_ecb_decrypt,
2089 .init = s5p_aes_init_tfm,
2090 },
2091 {
2092 .base.cra_name = "cbc(aes)",
2093 .base.cra_driver_name = "cbc-aes-s5p",
2094 .base.cra_priority = 100,
2095 .base.cra_flags = CRYPTO_ALG_ASYNC |
2096 CRYPTO_ALG_KERN_DRIVER_ONLY,
2097 .base.cra_blocksize = AES_BLOCK_SIZE,
2098 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2099 .base.cra_alignmask = 0x0f,
2100 .base.cra_module = THIS_MODULE,
2101
2102 .min_keysize = AES_MIN_KEY_SIZE,
2103 .max_keysize = AES_MAX_KEY_SIZE,
2104 .ivsize = AES_BLOCK_SIZE,
2105 .setkey = s5p_aes_setkey,
2106 .encrypt = s5p_aes_cbc_encrypt,
2107 .decrypt = s5p_aes_cbc_decrypt,
2108 .init = s5p_aes_init_tfm,
2109 },
2110 {
2111 .base.cra_name = "ctr(aes)",
2112 .base.cra_driver_name = "ctr-aes-s5p",
2113 .base.cra_priority = 100,
2114 .base.cra_flags = CRYPTO_ALG_ASYNC |
2115 CRYPTO_ALG_KERN_DRIVER_ONLY,
2116 .base.cra_blocksize = 1,
2117 .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2118 .base.cra_alignmask = 0x0f,
2119 .base.cra_module = THIS_MODULE,
2120
2121 .min_keysize = AES_MIN_KEY_SIZE,
2122 .max_keysize = AES_MAX_KEY_SIZE,
2123 .ivsize = AES_BLOCK_SIZE,
2124 .setkey = s5p_aes_setkey,
2125 .encrypt = s5p_aes_ctr_crypt,
2126 .decrypt = s5p_aes_ctr_crypt,
2127 .init = s5p_aes_init_tfm,
2128 },
2129 };
2130
s5p_aes_probe(struct platform_device * pdev)2131 static int s5p_aes_probe(struct platform_device *pdev)
2132 {
2133 struct device *dev = &pdev->dev;
2134 int i, j, err;
2135 const struct samsung_aes_variant *variant;
2136 struct s5p_aes_dev *pdata;
2137 struct resource *res;
2138 unsigned int hash_i;
2139
2140 if (s5p_dev)
2141 return -EEXIST;
2142
2143 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2144 if (!pdata)
2145 return -ENOMEM;
2146
2147 variant = find_s5p_sss_version(pdev);
2148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2149 if (!res)
2150 return -EINVAL;
2151
2152 /*
2153 * Note: HASH and PRNG uses the same registers in secss, avoid
2154 * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
2155 * is enabled in config. We need larger size for HASH registers in
2156 * secss, current describe only AES/DES
2157 */
2158 if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2159 if (variant == &exynos_aes_data) {
2160 res->end += 0x300;
2161 pdata->use_hash = true;
2162 }
2163 }
2164
2165 pdata->res = res;
2166 pdata->ioaddr = devm_ioremap_resource(dev, res);
2167 if (IS_ERR(pdata->ioaddr)) {
2168 if (!pdata->use_hash)
2169 return PTR_ERR(pdata->ioaddr);
2170 /* try AES without HASH */
2171 res->end -= 0x300;
2172 pdata->use_hash = false;
2173 pdata->ioaddr = devm_ioremap_resource(dev, res);
2174 if (IS_ERR(pdata->ioaddr))
2175 return PTR_ERR(pdata->ioaddr);
2176 }
2177
2178 pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
2179 if (IS_ERR(pdata->clk))
2180 return dev_err_probe(dev, PTR_ERR(pdata->clk),
2181 "failed to find secss clock %s\n",
2182 variant->clk_names[0]);
2183
2184 err = clk_prepare_enable(pdata->clk);
2185 if (err < 0) {
2186 dev_err(dev, "Enabling clock %s failed, err %d\n",
2187 variant->clk_names[0], err);
2188 return err;
2189 }
2190
2191 if (variant->clk_names[1]) {
2192 pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2193 if (IS_ERR(pdata->pclk)) {
2194 err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
2195 "failed to find clock %s\n",
2196 variant->clk_names[1]);
2197 goto err_clk;
2198 }
2199
2200 err = clk_prepare_enable(pdata->pclk);
2201 if (err < 0) {
2202 dev_err(dev, "Enabling clock %s failed, err %d\n",
2203 variant->clk_names[0], err);
2204 goto err_clk;
2205 }
2206 } else {
2207 pdata->pclk = NULL;
2208 }
2209
2210 spin_lock_init(&pdata->lock);
2211 spin_lock_init(&pdata->hash_lock);
2212
2213 pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
2214 pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
2215
2216 pdata->irq_fc = platform_get_irq(pdev, 0);
2217 if (pdata->irq_fc < 0) {
2218 err = pdata->irq_fc;
2219 dev_warn(dev, "feed control interrupt is not available.\n");
2220 goto err_irq;
2221 }
2222 err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2223 s5p_aes_interrupt, IRQF_ONESHOT,
2224 pdev->name, pdev);
2225 if (err < 0) {
2226 dev_warn(dev, "feed control interrupt is not available.\n");
2227 goto err_irq;
2228 }
2229
2230 pdata->busy = false;
2231 pdata->dev = dev;
2232 platform_set_drvdata(pdev, pdata);
2233 s5p_dev = pdata;
2234
2235 tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2236 crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2237
2238 for (i = 0; i < ARRAY_SIZE(algs); i++) {
2239 err = crypto_register_skcipher(&algs[i]);
2240 if (err)
2241 goto err_algs;
2242 }
2243
2244 if (pdata->use_hash) {
2245 tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2246 (unsigned long)pdata);
2247 crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2248
2249 for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2250 hash_i++) {
2251 struct ahash_alg *alg;
2252
2253 alg = &algs_sha1_md5_sha256[hash_i];
2254 err = crypto_register_ahash(alg);
2255 if (err) {
2256 dev_err(dev, "can't register '%s': %d\n",
2257 alg->halg.base.cra_driver_name, err);
2258 goto err_hash;
2259 }
2260 }
2261 }
2262
2263 dev_info(dev, "s5p-sss driver registered\n");
2264
2265 return 0;
2266
2267 err_hash:
2268 for (j = hash_i - 1; j >= 0; j--)
2269 crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2270
2271 tasklet_kill(&pdata->hash_tasklet);
2272 res->end -= 0x300;
2273
2274 err_algs:
2275 if (i < ARRAY_SIZE(algs))
2276 dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
2277 err);
2278
2279 for (j = 0; j < i; j++)
2280 crypto_unregister_skcipher(&algs[j]);
2281
2282 tasklet_kill(&pdata->tasklet);
2283
2284 err_irq:
2285 clk_disable_unprepare(pdata->pclk);
2286
2287 err_clk:
2288 clk_disable_unprepare(pdata->clk);
2289 s5p_dev = NULL;
2290
2291 return err;
2292 }
2293
s5p_aes_remove(struct platform_device * pdev)2294 static void s5p_aes_remove(struct platform_device *pdev)
2295 {
2296 struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2297 int i;
2298
2299 for (i = 0; i < ARRAY_SIZE(algs); i++)
2300 crypto_unregister_skcipher(&algs[i]);
2301
2302 tasklet_kill(&pdata->tasklet);
2303 if (pdata->use_hash) {
2304 for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2305 crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2306
2307 pdata->res->end -= 0x300;
2308 tasklet_kill(&pdata->hash_tasklet);
2309 pdata->use_hash = false;
2310 }
2311
2312 clk_disable_unprepare(pdata->pclk);
2313
2314 clk_disable_unprepare(pdata->clk);
2315 s5p_dev = NULL;
2316 }
2317
2318 static struct platform_driver s5p_aes_crypto = {
2319 .probe = s5p_aes_probe,
2320 .remove = s5p_aes_remove,
2321 .driver = {
2322 .name = "s5p-secss",
2323 .of_match_table = s5p_sss_dt_match,
2324 },
2325 };
2326
2327 module_platform_driver(s5p_aes_crypto);
2328
2329 MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2330 MODULE_LICENSE("GPL v2");
2331 MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
2332 MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
2333