1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel Keem Bay OCS HCU Crypto Driver.
4 *
5 * Copyright (C) 2018-2020 Intel Corporation
6 */
7
8 #include <crypto/engine.h>
9 #include <crypto/hmac.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/scatterwalk.h>
12 #include <crypto/sha2.h>
13 #include <crypto/sm3.h>
14 #include <linux/completion.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/mod_devicetable.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/string.h>
23
24 #include "ocs-hcu.h"
25
26 #define DRV_NAME "keembay-ocs-hcu"
27
28 /* Flag marking a final request. */
29 #define REQ_FINAL BIT(0)
30 /* Flag marking a HMAC request. */
31 #define REQ_FLAGS_HMAC BIT(1)
32 /* Flag set when HW HMAC is being used. */
33 #define REQ_FLAGS_HMAC_HW BIT(2)
34 /* Flag set when SW HMAC is being used. */
35 #define REQ_FLAGS_HMAC_SW BIT(3)
36
37 /**
38 * struct ocs_hcu_ctx: OCS HCU Transform context.
39 * @hcu_dev: The OCS HCU device used by the transformation.
40 * @key: The key (used only for HMAC transformations).
41 * @key_len: The length of the key.
42 * @is_sm3_tfm: Whether or not this is an SM3 transformation.
43 * @is_hmac_tfm: Whether or not this is a HMAC transformation.
44 */
45 struct ocs_hcu_ctx {
46 struct ocs_hcu_dev *hcu_dev;
47 u8 key[SHA512_BLOCK_SIZE];
48 size_t key_len;
49 bool is_sm3_tfm;
50 bool is_hmac_tfm;
51 };
52
53 /**
54 * struct ocs_hcu_rctx - Context for the request.
55 * @hcu_dev: OCS HCU device to be used to service the request.
56 * @flags: Flags tracking request status.
57 * @algo: Algorithm to use for the request.
58 * @blk_sz: Block size of the transformation / request.
59 * @dig_sz: Digest size of the transformation / request.
60 * @dma_list: OCS DMA linked list.
61 * @hash_ctx: OCS HCU hashing context.
62 * @buffer: Buffer to store: partial block of data and SW HMAC
63 * artifacts (ipad, opad, etc.).
64 * @buf_cnt: Number of bytes currently stored in the buffer.
65 * @buf_dma_addr: The DMA address of @buffer (when mapped).
66 * @buf_dma_count: The number of bytes in @buffer currently DMA-mapped.
67 * @sg: Head of the scatterlist entries containing data.
68 * @sg_data_total: Total data in the SG list at any time.
69 * @sg_data_offset: Offset into the data of the current individual SG node.
70 * @sg_dma_nents: Number of sg entries mapped in dma_list.
71 * @nents: Number of entries in the scatterlist.
72 */
73 struct ocs_hcu_rctx {
74 struct ocs_hcu_dev *hcu_dev;
75 u32 flags;
76 enum ocs_hcu_algo algo;
77 size_t blk_sz;
78 size_t dig_sz;
79 struct ocs_hcu_dma_list *dma_list;
80 struct ocs_hcu_hash_ctx hash_ctx;
81 /*
82 * Buffer is double the block size because we need space for SW HMAC
83 * artifacts, i.e:
84 * - ipad (1 block) + a possible partial block of data.
85 * - opad (1 block) + digest of H(k ^ ipad || m)
86 */
87 u8 buffer[2 * SHA512_BLOCK_SIZE];
88 size_t buf_cnt;
89 dma_addr_t buf_dma_addr;
90 size_t buf_dma_count;
91 struct scatterlist *sg;
92 unsigned int sg_data_total;
93 unsigned int sg_data_offset;
94 unsigned int sg_dma_nents;
95 unsigned int nents;
96 };
97
98 /**
99 * struct ocs_hcu_drv - Driver data
100 * @dev_list: The list of HCU devices.
101 * @lock: The lock protecting dev_list.
102 */
103 struct ocs_hcu_drv {
104 struct list_head dev_list;
105 spinlock_t lock; /* Protects dev_list. */
106 };
107
108 static struct ocs_hcu_drv ocs_hcu = {
109 .dev_list = LIST_HEAD_INIT(ocs_hcu.dev_list),
110 .lock = __SPIN_LOCK_UNLOCKED(ocs_hcu.lock),
111 };
112
113 /*
114 * Return the total amount of data in the request; that is: the data in the
115 * request buffer + the data in the sg list.
116 */
kmb_get_total_data(struct ocs_hcu_rctx * rctx)117 static inline unsigned int kmb_get_total_data(struct ocs_hcu_rctx *rctx)
118 {
119 return rctx->sg_data_total + rctx->buf_cnt;
120 }
121
122 /* Move remaining content of scatter-gather list to context buffer. */
flush_sg_to_ocs_buffer(struct ocs_hcu_rctx * rctx)123 static int flush_sg_to_ocs_buffer(struct ocs_hcu_rctx *rctx)
124 {
125 size_t count;
126
127 if (rctx->sg_data_total > (sizeof(rctx->buffer) - rctx->buf_cnt)) {
128 WARN(1, "%s: sg data does not fit in buffer\n", __func__);
129 return -EINVAL;
130 }
131
132 while (rctx->sg_data_total) {
133 if (!rctx->sg) {
134 WARN(1, "%s: unexpected NULL sg\n", __func__);
135 return -EINVAL;
136 }
137 /*
138 * If current sg has been fully processed, skip to the next
139 * one.
140 */
141 if (rctx->sg_data_offset == rctx->sg->length) {
142 rctx->sg = sg_next(rctx->sg);
143 rctx->sg_data_offset = 0;
144 continue;
145 }
146 /*
147 * Determine the maximum data available to copy from the node.
148 * Minimum of the length left in the sg node, or the total data
149 * in the request.
150 */
151 count = min(rctx->sg->length - rctx->sg_data_offset,
152 rctx->sg_data_total);
153 /* Copy from scatter-list entry to context buffer. */
154 scatterwalk_map_and_copy(&rctx->buffer[rctx->buf_cnt],
155 rctx->sg, rctx->sg_data_offset,
156 count, 0);
157
158 rctx->sg_data_offset += count;
159 rctx->sg_data_total -= count;
160 rctx->buf_cnt += count;
161 }
162
163 return 0;
164 }
165
kmb_ocs_hcu_find_dev(struct ahash_request * req)166 static struct ocs_hcu_dev *kmb_ocs_hcu_find_dev(struct ahash_request *req)
167 {
168 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
169 struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
170
171 /* If the HCU device for the request was previously set, return it. */
172 if (tctx->hcu_dev)
173 return tctx->hcu_dev;
174
175 /*
176 * Otherwise, get the first HCU device available (there should be one
177 * and only one device).
178 */
179 spin_lock_bh(&ocs_hcu.lock);
180 tctx->hcu_dev = list_first_entry_or_null(&ocs_hcu.dev_list,
181 struct ocs_hcu_dev,
182 list);
183 spin_unlock_bh(&ocs_hcu.lock);
184
185 return tctx->hcu_dev;
186 }
187
188 /* Free OCS DMA linked list and DMA-able context buffer. */
kmb_ocs_hcu_dma_cleanup(struct ahash_request * req,struct ocs_hcu_rctx * rctx)189 static void kmb_ocs_hcu_dma_cleanup(struct ahash_request *req,
190 struct ocs_hcu_rctx *rctx)
191 {
192 struct ocs_hcu_dev *hcu_dev = rctx->hcu_dev;
193 struct device *dev = hcu_dev->dev;
194
195 /* Unmap rctx->buffer (if mapped). */
196 if (rctx->buf_dma_count) {
197 dma_unmap_single(dev, rctx->buf_dma_addr, rctx->buf_dma_count,
198 DMA_TO_DEVICE);
199 rctx->buf_dma_count = 0;
200 }
201
202 /* Unmap req->src (if mapped). */
203 if (rctx->sg_dma_nents) {
204 dma_unmap_sg(dev, req->src, rctx->nents, DMA_TO_DEVICE);
205 rctx->sg_dma_nents = 0;
206 }
207
208 /* Free dma_list (if allocated). */
209 if (rctx->dma_list) {
210 ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list);
211 rctx->dma_list = NULL;
212 }
213 }
214
215 /*
216 * Prepare for DMA operation:
217 * - DMA-map request context buffer (if needed)
218 * - DMA-map SG list (only the entries to be processed, see note below)
219 * - Allocate OCS HCU DMA linked list (number of elements = SG entries to
220 * process + context buffer (if not empty)).
221 * - Add DMA-mapped request context buffer to OCS HCU DMA list.
222 * - Add SG entries to DMA list.
223 *
224 * Note: if this is a final request, we process all the data in the SG list,
225 * otherwise we can only process up to the maximum amount of block-aligned data
226 * (the remainder will be put into the context buffer and processed in the next
227 * request).
228 */
kmb_ocs_dma_prepare(struct ahash_request * req)229 static int kmb_ocs_dma_prepare(struct ahash_request *req)
230 {
231 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
232 struct device *dev = rctx->hcu_dev->dev;
233 unsigned int remainder = 0;
234 unsigned int total;
235 int nents;
236 size_t count;
237 int rc;
238 int i;
239
240 /* This function should be called only when there is data to process. */
241 total = kmb_get_total_data(rctx);
242 if (!total)
243 return -EINVAL;
244
245 /*
246 * If this is not a final DMA (terminated DMA), the data passed to the
247 * HCU must be aligned to the block size; compute the remainder data to
248 * be processed in the next request.
249 */
250 if (!(rctx->flags & REQ_FINAL))
251 remainder = total % rctx->blk_sz;
252
253 /* Determine the number of scatter gather list entries to process. */
254 nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
255
256 if (nents < 0)
257 return nents;
258
259 /* If there are entries to process, map them. */
260 if (nents) {
261 rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
262 DMA_TO_DEVICE);
263 if (!rctx->sg_dma_nents) {
264 dev_err(dev, "Failed to MAP SG\n");
265 rc = -ENOMEM;
266 goto cleanup;
267 }
268
269 /* Save the value of nents to pass to dma_unmap_sg. */
270 rctx->nents = nents;
271
272 /*
273 * The value returned by dma_map_sg() can be < nents; so update
274 * nents accordingly.
275 */
276 nents = rctx->sg_dma_nents;
277 }
278
279 /*
280 * If context buffer is not empty, map it and add extra DMA entry for
281 * it.
282 */
283 if (rctx->buf_cnt) {
284 rctx->buf_dma_addr = dma_map_single(dev, rctx->buffer,
285 rctx->buf_cnt,
286 DMA_TO_DEVICE);
287 if (dma_mapping_error(dev, rctx->buf_dma_addr)) {
288 dev_err(dev, "Failed to map request context buffer\n");
289 rc = -ENOMEM;
290 goto cleanup;
291 }
292 rctx->buf_dma_count = rctx->buf_cnt;
293 /* Increase number of dma entries. */
294 nents++;
295 }
296
297 /* Allocate OCS HCU DMA list. */
298 rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents);
299 if (!rctx->dma_list) {
300 rc = -ENOMEM;
301 goto cleanup;
302 }
303
304 /* Add request context buffer (if previously DMA-mapped) */
305 if (rctx->buf_dma_count) {
306 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list,
307 rctx->buf_dma_addr,
308 rctx->buf_dma_count);
309 if (rc)
310 goto cleanup;
311 }
312
313 /* Add the SG nodes to be processed to the DMA linked list. */
314 for_each_sg(req->src, rctx->sg, rctx->sg_dma_nents, i) {
315 /*
316 * The number of bytes to add to the list entry is the minimum
317 * between:
318 * - The DMA length of the SG entry.
319 * - The data left to be processed.
320 */
321 count = min(rctx->sg_data_total - remainder,
322 sg_dma_len(rctx->sg) - rctx->sg_data_offset);
323 /*
324 * Do not create a zero length DMA descriptor. Check in case of
325 * zero length SG node.
326 */
327 if (count == 0)
328 continue;
329 /* Add sg to HCU DMA list. */
330 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev,
331 rctx->dma_list,
332 rctx->sg->dma_address,
333 count);
334 if (rc)
335 goto cleanup;
336
337 /* Update amount of data remaining in SG list. */
338 rctx->sg_data_total -= count;
339
340 /*
341 * If remaining data is equal to remainder (note: 'less than'
342 * case should never happen in practice), we are done: update
343 * offset and exit the loop.
344 */
345 if (rctx->sg_data_total <= remainder) {
346 WARN_ON(rctx->sg_data_total < remainder);
347 rctx->sg_data_offset += count;
348 break;
349 }
350
351 /*
352 * If we get here is because we need to process the next sg in
353 * the list; set offset within the sg to 0.
354 */
355 rctx->sg_data_offset = 0;
356 }
357
358 return 0;
359 cleanup:
360 dev_err(dev, "Failed to prepare DMA.\n");
361 kmb_ocs_hcu_dma_cleanup(req, rctx);
362
363 return rc;
364 }
365
kmb_ocs_hcu_secure_cleanup(struct ahash_request * req)366 static void kmb_ocs_hcu_secure_cleanup(struct ahash_request *req)
367 {
368 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
369
370 /* Clear buffer of any data. */
371 memzero_explicit(rctx->buffer, sizeof(rctx->buffer));
372 }
373
kmb_ocs_hcu_handle_queue(struct ahash_request * req)374 static int kmb_ocs_hcu_handle_queue(struct ahash_request *req)
375 {
376 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
377
378 if (!hcu_dev)
379 return -ENOENT;
380
381 return crypto_transfer_hash_request_to_engine(hcu_dev->engine, req);
382 }
383
prepare_ipad(struct ahash_request * req)384 static int prepare_ipad(struct ahash_request *req)
385 {
386 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
387 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
388 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
389 int i;
390
391 WARN(rctx->buf_cnt, "%s: Context buffer is not empty\n", __func__);
392 WARN(!(rctx->flags & REQ_FLAGS_HMAC_SW),
393 "%s: HMAC_SW flag is not set\n", __func__);
394 /*
395 * Key length must be equal to block size. If key is shorter,
396 * we pad it with zero (note: key cannot be longer, since
397 * longer keys are hashed by kmb_ocs_hcu_setkey()).
398 */
399 if (ctx->key_len > rctx->blk_sz) {
400 WARN(1, "%s: Invalid key length in tfm context\n", __func__);
401 return -EINVAL;
402 }
403 memzero_explicit(&ctx->key[ctx->key_len],
404 rctx->blk_sz - ctx->key_len);
405 ctx->key_len = rctx->blk_sz;
406 /*
407 * Prepare IPAD for HMAC. Only done for first block.
408 * HMAC(k,m) = H(k ^ opad || H(k ^ ipad || m))
409 * k ^ ipad will be first hashed block.
410 * k ^ opad will be calculated in the final request.
411 * Only needed if not using HW HMAC.
412 */
413 for (i = 0; i < rctx->blk_sz; i++)
414 rctx->buffer[i] = ctx->key[i] ^ HMAC_IPAD_VALUE;
415 rctx->buf_cnt = rctx->blk_sz;
416
417 return 0;
418 }
419
kmb_ocs_hcu_do_one_request(struct crypto_engine * engine,void * areq)420 static int kmb_ocs_hcu_do_one_request(struct crypto_engine *engine, void *areq)
421 {
422 struct ahash_request *req = container_of(areq, struct ahash_request,
423 base);
424 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
425 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
426 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
427 struct ocs_hcu_ctx *tctx = crypto_ahash_ctx(tfm);
428 int rc;
429 int i;
430
431 if (!hcu_dev) {
432 rc = -ENOENT;
433 goto error;
434 }
435
436 /*
437 * If hardware HMAC flag is set, perform HMAC in hardware.
438 *
439 * NOTE: this flag implies REQ_FINAL && kmb_get_total_data(rctx)
440 */
441 if (rctx->flags & REQ_FLAGS_HMAC_HW) {
442 /* Map input data into the HCU DMA linked list. */
443 rc = kmb_ocs_dma_prepare(req);
444 if (rc)
445 goto error;
446
447 rc = ocs_hcu_hmac(hcu_dev, rctx->algo, tctx->key, tctx->key_len,
448 rctx->dma_list, req->result, rctx->dig_sz);
449
450 /* Unmap data and free DMA list regardless of return code. */
451 kmb_ocs_hcu_dma_cleanup(req, rctx);
452
453 /* Process previous return code. */
454 if (rc)
455 goto error;
456
457 goto done;
458 }
459
460 /* Handle update request case. */
461 if (!(rctx->flags & REQ_FINAL)) {
462 /* Update should always have input data. */
463 if (!kmb_get_total_data(rctx))
464 return -EINVAL;
465
466 /* Map input data into the HCU DMA linked list. */
467 rc = kmb_ocs_dma_prepare(req);
468 if (rc)
469 goto error;
470
471 /* Do hashing step. */
472 rc = ocs_hcu_hash_update(hcu_dev, &rctx->hash_ctx,
473 rctx->dma_list);
474
475 /* Unmap data and free DMA list regardless of return code. */
476 kmb_ocs_hcu_dma_cleanup(req, rctx);
477
478 /* Process previous return code. */
479 if (rc)
480 goto error;
481
482 /*
483 * Reset request buffer count (data in the buffer was just
484 * processed).
485 */
486 rctx->buf_cnt = 0;
487 /*
488 * Move remaining sg data into the request buffer, so that it
489 * will be processed during the next request.
490 *
491 * NOTE: we have remaining data if kmb_get_total_data() was not
492 * a multiple of block size.
493 */
494 rc = flush_sg_to_ocs_buffer(rctx);
495 if (rc)
496 goto error;
497
498 goto done;
499 }
500
501 /* If we get here, this is a final request. */
502
503 /* If there is data to process, use finup. */
504 if (kmb_get_total_data(rctx)) {
505 /* Map input data into the HCU DMA linked list. */
506 rc = kmb_ocs_dma_prepare(req);
507 if (rc)
508 goto error;
509
510 /* Do hashing step. */
511 rc = ocs_hcu_hash_finup(hcu_dev, &rctx->hash_ctx,
512 rctx->dma_list,
513 req->result, rctx->dig_sz);
514 /* Free DMA list regardless of return code. */
515 kmb_ocs_hcu_dma_cleanup(req, rctx);
516
517 /* Process previous return code. */
518 if (rc)
519 goto error;
520
521 } else { /* Otherwise (if we have no data), use final. */
522 rc = ocs_hcu_hash_final(hcu_dev, &rctx->hash_ctx, req->result,
523 rctx->dig_sz);
524 if (rc)
525 goto error;
526 }
527
528 /*
529 * If we are finalizing a SW HMAC request, we just computed the result
530 * of: H(k ^ ipad || m).
531 *
532 * We now need to complete the HMAC calculation with the OPAD step,
533 * that is, we need to compute H(k ^ opad || digest), where digest is
534 * the digest we just obtained, i.e., H(k ^ ipad || m).
535 */
536 if (rctx->flags & REQ_FLAGS_HMAC_SW) {
537 /*
538 * Compute k ^ opad and store it in the request buffer (which
539 * is not used anymore at this point).
540 * Note: key has been padded / hashed already (so keylen ==
541 * blksz) .
542 */
543 WARN_ON(tctx->key_len != rctx->blk_sz);
544 for (i = 0; i < rctx->blk_sz; i++)
545 rctx->buffer[i] = tctx->key[i] ^ HMAC_OPAD_VALUE;
546 /* Now append the digest to the rest of the buffer. */
547 for (i = 0; (i < rctx->dig_sz); i++)
548 rctx->buffer[rctx->blk_sz + i] = req->result[i];
549
550 /* Now hash the buffer to obtain the final HMAC. */
551 rc = ocs_hcu_digest(hcu_dev, rctx->algo, rctx->buffer,
552 rctx->blk_sz + rctx->dig_sz, req->result,
553 rctx->dig_sz);
554 if (rc)
555 goto error;
556 }
557
558 /* Perform secure clean-up. */
559 kmb_ocs_hcu_secure_cleanup(req);
560 done:
561 crypto_finalize_hash_request(hcu_dev->engine, req, 0);
562
563 return 0;
564
565 error:
566 kmb_ocs_hcu_secure_cleanup(req);
567 return rc;
568 }
569
kmb_ocs_hcu_init(struct ahash_request * req)570 static int kmb_ocs_hcu_init(struct ahash_request *req)
571 {
572 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
573 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
574 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
575 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
576
577 if (!hcu_dev)
578 return -ENOENT;
579
580 /* Initialize entire request context to zero. */
581 memset(rctx, 0, sizeof(*rctx));
582
583 rctx->hcu_dev = hcu_dev;
584 rctx->dig_sz = crypto_ahash_digestsize(tfm);
585
586 switch (rctx->dig_sz) {
587 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
588 case SHA224_DIGEST_SIZE:
589 rctx->blk_sz = SHA224_BLOCK_SIZE;
590 rctx->algo = OCS_HCU_ALGO_SHA224;
591 break;
592 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
593 case SHA256_DIGEST_SIZE:
594 rctx->blk_sz = SHA256_BLOCK_SIZE;
595 /*
596 * SHA256 and SM3 have the same digest size: use info from tfm
597 * context to find out which one we should use.
598 */
599 rctx->algo = ctx->is_sm3_tfm ? OCS_HCU_ALGO_SM3 :
600 OCS_HCU_ALGO_SHA256;
601 break;
602 case SHA384_DIGEST_SIZE:
603 rctx->blk_sz = SHA384_BLOCK_SIZE;
604 rctx->algo = OCS_HCU_ALGO_SHA384;
605 break;
606 case SHA512_DIGEST_SIZE:
607 rctx->blk_sz = SHA512_BLOCK_SIZE;
608 rctx->algo = OCS_HCU_ALGO_SHA512;
609 break;
610 default:
611 return -EINVAL;
612 }
613
614 /* Initialize intermediate data. */
615 ocs_hcu_hash_init(&rctx->hash_ctx, rctx->algo);
616
617 /* If this a HMAC request, set HMAC flag. */
618 if (ctx->is_hmac_tfm)
619 rctx->flags |= REQ_FLAGS_HMAC;
620
621 return 0;
622 }
623
kmb_ocs_hcu_update(struct ahash_request * req)624 static int kmb_ocs_hcu_update(struct ahash_request *req)
625 {
626 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
627 int rc;
628
629 if (!req->nbytes)
630 return 0;
631
632 rctx->sg_data_total = req->nbytes;
633 rctx->sg_data_offset = 0;
634 rctx->sg = req->src;
635
636 /*
637 * If we are doing HMAC, then we must use SW-assisted HMAC, since HW
638 * HMAC does not support context switching (there it can only be used
639 * with finup() or digest()).
640 */
641 if (rctx->flags & REQ_FLAGS_HMAC &&
642 !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
643 rctx->flags |= REQ_FLAGS_HMAC_SW;
644 rc = prepare_ipad(req);
645 if (rc)
646 return rc;
647 }
648
649 /*
650 * If remaining sg_data fits into ctx buffer, just copy it there; we'll
651 * process it at the next update() or final().
652 */
653 if (rctx->sg_data_total <= (sizeof(rctx->buffer) - rctx->buf_cnt))
654 return flush_sg_to_ocs_buffer(rctx);
655
656 return kmb_ocs_hcu_handle_queue(req);
657 }
658
659 /* Common logic for kmb_ocs_hcu_final() and kmb_ocs_hcu_finup(). */
kmb_ocs_hcu_fin_common(struct ahash_request * req)660 static int kmb_ocs_hcu_fin_common(struct ahash_request *req)
661 {
662 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
663 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
664 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
665 int rc;
666
667 rctx->flags |= REQ_FINAL;
668
669 /*
670 * If this is a HMAC request and, so far, we didn't have to switch to
671 * SW HMAC, check if we can use HW HMAC.
672 */
673 if (rctx->flags & REQ_FLAGS_HMAC &&
674 !(rctx->flags & REQ_FLAGS_HMAC_SW)) {
675 /*
676 * If we are here, it means we never processed any data so far,
677 * so we can use HW HMAC, but only if there is some data to
678 * process (since OCS HW MAC does not support zero-length
679 * messages) and the key length is supported by the hardware
680 * (OCS HCU HW only supports length <= 64); if HW HMAC cannot
681 * be used, fall back to SW-assisted HMAC.
682 */
683 if (kmb_get_total_data(rctx) &&
684 ctx->key_len <= OCS_HCU_HW_KEY_LEN) {
685 rctx->flags |= REQ_FLAGS_HMAC_HW;
686 } else {
687 rctx->flags |= REQ_FLAGS_HMAC_SW;
688 rc = prepare_ipad(req);
689 if (rc)
690 return rc;
691 }
692 }
693
694 return kmb_ocs_hcu_handle_queue(req);
695 }
696
kmb_ocs_hcu_final(struct ahash_request * req)697 static int kmb_ocs_hcu_final(struct ahash_request *req)
698 {
699 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
700
701 rctx->sg_data_total = 0;
702 rctx->sg_data_offset = 0;
703 rctx->sg = NULL;
704
705 return kmb_ocs_hcu_fin_common(req);
706 }
707
kmb_ocs_hcu_finup(struct ahash_request * req)708 static int kmb_ocs_hcu_finup(struct ahash_request *req)
709 {
710 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
711
712 rctx->sg_data_total = req->nbytes;
713 rctx->sg_data_offset = 0;
714 rctx->sg = req->src;
715
716 return kmb_ocs_hcu_fin_common(req);
717 }
718
kmb_ocs_hcu_digest(struct ahash_request * req)719 static int kmb_ocs_hcu_digest(struct ahash_request *req)
720 {
721 int rc = 0;
722 struct ocs_hcu_dev *hcu_dev = kmb_ocs_hcu_find_dev(req);
723
724 if (!hcu_dev)
725 return -ENOENT;
726
727 rc = kmb_ocs_hcu_init(req);
728 if (rc)
729 return rc;
730
731 rc = kmb_ocs_hcu_finup(req);
732
733 return rc;
734 }
735
kmb_ocs_hcu_export(struct ahash_request * req,void * out)736 static int kmb_ocs_hcu_export(struct ahash_request *req, void *out)
737 {
738 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
739
740 /* Intermediate data is always stored and applied per request. */
741 memcpy(out, rctx, sizeof(*rctx));
742
743 return 0;
744 }
745
kmb_ocs_hcu_import(struct ahash_request * req,const void * in)746 static int kmb_ocs_hcu_import(struct ahash_request *req, const void *in)
747 {
748 struct ocs_hcu_rctx *rctx = ahash_request_ctx_dma(req);
749
750 /* Intermediate data is always stored and applied per request. */
751 memcpy(rctx, in, sizeof(*rctx));
752
753 return 0;
754 }
755
kmb_ocs_hcu_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)756 static int kmb_ocs_hcu_setkey(struct crypto_ahash *tfm, const u8 *key,
757 unsigned int keylen)
758 {
759 unsigned int digestsize = crypto_ahash_digestsize(tfm);
760 struct ocs_hcu_ctx *ctx = crypto_ahash_ctx(tfm);
761 size_t blk_sz = crypto_ahash_blocksize(tfm);
762 struct crypto_ahash *ahash_tfm;
763 struct ahash_request *req;
764 struct crypto_wait wait;
765 struct scatterlist sg;
766 const char *alg_name;
767 int rc;
768
769 /*
770 * Key length must be equal to block size:
771 * - If key is shorter, we are done for now (the key will be padded
772 * later on); this is to maximize the use of HW HMAC (which works
773 * only for keys <= 64 bytes).
774 * - If key is longer, we hash it.
775 */
776 if (keylen <= blk_sz) {
777 memcpy(ctx->key, key, keylen);
778 ctx->key_len = keylen;
779 return 0;
780 }
781
782 switch (digestsize) {
783 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
784 case SHA224_DIGEST_SIZE:
785 alg_name = "sha224-keembay-ocs";
786 break;
787 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
788 case SHA256_DIGEST_SIZE:
789 alg_name = ctx->is_sm3_tfm ? "sm3-keembay-ocs" :
790 "sha256-keembay-ocs";
791 break;
792 case SHA384_DIGEST_SIZE:
793 alg_name = "sha384-keembay-ocs";
794 break;
795 case SHA512_DIGEST_SIZE:
796 alg_name = "sha512-keembay-ocs";
797 break;
798 default:
799 return -EINVAL;
800 }
801
802 ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
803 if (IS_ERR(ahash_tfm))
804 return PTR_ERR(ahash_tfm);
805
806 req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
807 if (!req) {
808 rc = -ENOMEM;
809 goto err_free_ahash;
810 }
811
812 crypto_init_wait(&wait);
813 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
814 crypto_req_done, &wait);
815 crypto_ahash_clear_flags(ahash_tfm, ~0);
816
817 sg_init_one(&sg, key, keylen);
818 ahash_request_set_crypt(req, &sg, ctx->key, keylen);
819
820 rc = crypto_wait_req(crypto_ahash_digest(req), &wait);
821 if (rc == 0)
822 ctx->key_len = digestsize;
823
824 ahash_request_free(req);
825 err_free_ahash:
826 crypto_free_ahash(ahash_tfm);
827
828 return rc;
829 }
830
831 /* Set request size and initialize tfm context. */
__cra_init(struct crypto_tfm * tfm,struct ocs_hcu_ctx * ctx)832 static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
833 {
834 crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
835 sizeof(struct ocs_hcu_rctx));
836 }
837
kmb_ocs_hcu_sha_cra_init(struct crypto_tfm * tfm)838 static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
839 {
840 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
841
842 __cra_init(tfm, ctx);
843
844 return 0;
845 }
846
kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm * tfm)847 static int kmb_ocs_hcu_sm3_cra_init(struct crypto_tfm *tfm)
848 {
849 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
850
851 __cra_init(tfm, ctx);
852
853 ctx->is_sm3_tfm = true;
854
855 return 0;
856 }
857
kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm * tfm)858 static int kmb_ocs_hcu_hmac_sm3_cra_init(struct crypto_tfm *tfm)
859 {
860 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
861
862 __cra_init(tfm, ctx);
863
864 ctx->is_sm3_tfm = true;
865 ctx->is_hmac_tfm = true;
866
867 return 0;
868 }
869
kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm * tfm)870 static int kmb_ocs_hcu_hmac_cra_init(struct crypto_tfm *tfm)
871 {
872 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
873
874 __cra_init(tfm, ctx);
875
876 ctx->is_hmac_tfm = true;
877
878 return 0;
879 }
880
881 /* Function called when 'tfm' is de-initialized. */
kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm * tfm)882 static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
883 {
884 struct ocs_hcu_ctx *ctx = crypto_tfm_ctx(tfm);
885
886 /* Clear the key. */
887 memzero_explicit(ctx->key, sizeof(ctx->key));
888 }
889
890 static struct ahash_engine_alg ocs_hcu_algs[] = {
891 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
892 {
893 .base.init = kmb_ocs_hcu_init,
894 .base.update = kmb_ocs_hcu_update,
895 .base.final = kmb_ocs_hcu_final,
896 .base.finup = kmb_ocs_hcu_finup,
897 .base.digest = kmb_ocs_hcu_digest,
898 .base.export = kmb_ocs_hcu_export,
899 .base.import = kmb_ocs_hcu_import,
900 .base.halg = {
901 .digestsize = SHA224_DIGEST_SIZE,
902 .statesize = sizeof(struct ocs_hcu_rctx),
903 .base = {
904 .cra_name = "sha224",
905 .cra_driver_name = "sha224-keembay-ocs",
906 .cra_priority = 255,
907 .cra_flags = CRYPTO_ALG_ASYNC,
908 .cra_blocksize = SHA224_BLOCK_SIZE,
909 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
910 .cra_alignmask = 0,
911 .cra_module = THIS_MODULE,
912 .cra_init = kmb_ocs_hcu_sha_cra_init,
913 }
914 },
915 .op.do_one_request = kmb_ocs_hcu_do_one_request,
916 },
917 {
918 .base.init = kmb_ocs_hcu_init,
919 .base.update = kmb_ocs_hcu_update,
920 .base.final = kmb_ocs_hcu_final,
921 .base.finup = kmb_ocs_hcu_finup,
922 .base.digest = kmb_ocs_hcu_digest,
923 .base.export = kmb_ocs_hcu_export,
924 .base.import = kmb_ocs_hcu_import,
925 .base.setkey = kmb_ocs_hcu_setkey,
926 .base.halg = {
927 .digestsize = SHA224_DIGEST_SIZE,
928 .statesize = sizeof(struct ocs_hcu_rctx),
929 .base = {
930 .cra_name = "hmac(sha224)",
931 .cra_driver_name = "hmac-sha224-keembay-ocs",
932 .cra_priority = 255,
933 .cra_flags = CRYPTO_ALG_ASYNC,
934 .cra_blocksize = SHA224_BLOCK_SIZE,
935 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
936 .cra_alignmask = 0,
937 .cra_module = THIS_MODULE,
938 .cra_init = kmb_ocs_hcu_hmac_cra_init,
939 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
940 }
941 },
942 .op.do_one_request = kmb_ocs_hcu_do_one_request,
943 },
944 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
945 {
946 .base.init = kmb_ocs_hcu_init,
947 .base.update = kmb_ocs_hcu_update,
948 .base.final = kmb_ocs_hcu_final,
949 .base.finup = kmb_ocs_hcu_finup,
950 .base.digest = kmb_ocs_hcu_digest,
951 .base.export = kmb_ocs_hcu_export,
952 .base.import = kmb_ocs_hcu_import,
953 .base.halg = {
954 .digestsize = SHA256_DIGEST_SIZE,
955 .statesize = sizeof(struct ocs_hcu_rctx),
956 .base = {
957 .cra_name = "sha256",
958 .cra_driver_name = "sha256-keembay-ocs",
959 .cra_priority = 255,
960 .cra_flags = CRYPTO_ALG_ASYNC,
961 .cra_blocksize = SHA256_BLOCK_SIZE,
962 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
963 .cra_alignmask = 0,
964 .cra_module = THIS_MODULE,
965 .cra_init = kmb_ocs_hcu_sha_cra_init,
966 }
967 },
968 .op.do_one_request = kmb_ocs_hcu_do_one_request,
969 },
970 {
971 .base.init = kmb_ocs_hcu_init,
972 .base.update = kmb_ocs_hcu_update,
973 .base.final = kmb_ocs_hcu_final,
974 .base.finup = kmb_ocs_hcu_finup,
975 .base.digest = kmb_ocs_hcu_digest,
976 .base.export = kmb_ocs_hcu_export,
977 .base.import = kmb_ocs_hcu_import,
978 .base.setkey = kmb_ocs_hcu_setkey,
979 .base.halg = {
980 .digestsize = SHA256_DIGEST_SIZE,
981 .statesize = sizeof(struct ocs_hcu_rctx),
982 .base = {
983 .cra_name = "hmac(sha256)",
984 .cra_driver_name = "hmac-sha256-keembay-ocs",
985 .cra_priority = 255,
986 .cra_flags = CRYPTO_ALG_ASYNC,
987 .cra_blocksize = SHA256_BLOCK_SIZE,
988 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
989 .cra_alignmask = 0,
990 .cra_module = THIS_MODULE,
991 .cra_init = kmb_ocs_hcu_hmac_cra_init,
992 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
993 }
994 },
995 .op.do_one_request = kmb_ocs_hcu_do_one_request,
996 },
997 {
998 .base.init = kmb_ocs_hcu_init,
999 .base.update = kmb_ocs_hcu_update,
1000 .base.final = kmb_ocs_hcu_final,
1001 .base.finup = kmb_ocs_hcu_finup,
1002 .base.digest = kmb_ocs_hcu_digest,
1003 .base.export = kmb_ocs_hcu_export,
1004 .base.import = kmb_ocs_hcu_import,
1005 .base.halg = {
1006 .digestsize = SM3_DIGEST_SIZE,
1007 .statesize = sizeof(struct ocs_hcu_rctx),
1008 .base = {
1009 .cra_name = "sm3",
1010 .cra_driver_name = "sm3-keembay-ocs",
1011 .cra_priority = 255,
1012 .cra_flags = CRYPTO_ALG_ASYNC,
1013 .cra_blocksize = SM3_BLOCK_SIZE,
1014 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1015 .cra_alignmask = 0,
1016 .cra_module = THIS_MODULE,
1017 .cra_init = kmb_ocs_hcu_sm3_cra_init,
1018 }
1019 },
1020 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1021 },
1022 {
1023 .base.init = kmb_ocs_hcu_init,
1024 .base.update = kmb_ocs_hcu_update,
1025 .base.final = kmb_ocs_hcu_final,
1026 .base.finup = kmb_ocs_hcu_finup,
1027 .base.digest = kmb_ocs_hcu_digest,
1028 .base.export = kmb_ocs_hcu_export,
1029 .base.import = kmb_ocs_hcu_import,
1030 .base.setkey = kmb_ocs_hcu_setkey,
1031 .base.halg = {
1032 .digestsize = SM3_DIGEST_SIZE,
1033 .statesize = sizeof(struct ocs_hcu_rctx),
1034 .base = {
1035 .cra_name = "hmac(sm3)",
1036 .cra_driver_name = "hmac-sm3-keembay-ocs",
1037 .cra_priority = 255,
1038 .cra_flags = CRYPTO_ALG_ASYNC,
1039 .cra_blocksize = SM3_BLOCK_SIZE,
1040 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1041 .cra_alignmask = 0,
1042 .cra_module = THIS_MODULE,
1043 .cra_init = kmb_ocs_hcu_hmac_sm3_cra_init,
1044 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1045 }
1046 },
1047 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1048 },
1049 {
1050 .base.init = kmb_ocs_hcu_init,
1051 .base.update = kmb_ocs_hcu_update,
1052 .base.final = kmb_ocs_hcu_final,
1053 .base.finup = kmb_ocs_hcu_finup,
1054 .base.digest = kmb_ocs_hcu_digest,
1055 .base.export = kmb_ocs_hcu_export,
1056 .base.import = kmb_ocs_hcu_import,
1057 .base.halg = {
1058 .digestsize = SHA384_DIGEST_SIZE,
1059 .statesize = sizeof(struct ocs_hcu_rctx),
1060 .base = {
1061 .cra_name = "sha384",
1062 .cra_driver_name = "sha384-keembay-ocs",
1063 .cra_priority = 255,
1064 .cra_flags = CRYPTO_ALG_ASYNC,
1065 .cra_blocksize = SHA384_BLOCK_SIZE,
1066 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1067 .cra_alignmask = 0,
1068 .cra_module = THIS_MODULE,
1069 .cra_init = kmb_ocs_hcu_sha_cra_init,
1070 }
1071 },
1072 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1073 },
1074 {
1075 .base.init = kmb_ocs_hcu_init,
1076 .base.update = kmb_ocs_hcu_update,
1077 .base.final = kmb_ocs_hcu_final,
1078 .base.finup = kmb_ocs_hcu_finup,
1079 .base.digest = kmb_ocs_hcu_digest,
1080 .base.export = kmb_ocs_hcu_export,
1081 .base.import = kmb_ocs_hcu_import,
1082 .base.setkey = kmb_ocs_hcu_setkey,
1083 .base.halg = {
1084 .digestsize = SHA384_DIGEST_SIZE,
1085 .statesize = sizeof(struct ocs_hcu_rctx),
1086 .base = {
1087 .cra_name = "hmac(sha384)",
1088 .cra_driver_name = "hmac-sha384-keembay-ocs",
1089 .cra_priority = 255,
1090 .cra_flags = CRYPTO_ALG_ASYNC,
1091 .cra_blocksize = SHA384_BLOCK_SIZE,
1092 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1093 .cra_alignmask = 0,
1094 .cra_module = THIS_MODULE,
1095 .cra_init = kmb_ocs_hcu_hmac_cra_init,
1096 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1097 }
1098 },
1099 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1100 },
1101 {
1102 .base.init = kmb_ocs_hcu_init,
1103 .base.update = kmb_ocs_hcu_update,
1104 .base.final = kmb_ocs_hcu_final,
1105 .base.finup = kmb_ocs_hcu_finup,
1106 .base.digest = kmb_ocs_hcu_digest,
1107 .base.export = kmb_ocs_hcu_export,
1108 .base.import = kmb_ocs_hcu_import,
1109 .base.halg = {
1110 .digestsize = SHA512_DIGEST_SIZE,
1111 .statesize = sizeof(struct ocs_hcu_rctx),
1112 .base = {
1113 .cra_name = "sha512",
1114 .cra_driver_name = "sha512-keembay-ocs",
1115 .cra_priority = 255,
1116 .cra_flags = CRYPTO_ALG_ASYNC,
1117 .cra_blocksize = SHA512_BLOCK_SIZE,
1118 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1119 .cra_alignmask = 0,
1120 .cra_module = THIS_MODULE,
1121 .cra_init = kmb_ocs_hcu_sha_cra_init,
1122 }
1123 },
1124 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1125 },
1126 {
1127 .base.init = kmb_ocs_hcu_init,
1128 .base.update = kmb_ocs_hcu_update,
1129 .base.final = kmb_ocs_hcu_final,
1130 .base.finup = kmb_ocs_hcu_finup,
1131 .base.digest = kmb_ocs_hcu_digest,
1132 .base.export = kmb_ocs_hcu_export,
1133 .base.import = kmb_ocs_hcu_import,
1134 .base.setkey = kmb_ocs_hcu_setkey,
1135 .base.halg = {
1136 .digestsize = SHA512_DIGEST_SIZE,
1137 .statesize = sizeof(struct ocs_hcu_rctx),
1138 .base = {
1139 .cra_name = "hmac(sha512)",
1140 .cra_driver_name = "hmac-sha512-keembay-ocs",
1141 .cra_priority = 255,
1142 .cra_flags = CRYPTO_ALG_ASYNC,
1143 .cra_blocksize = SHA512_BLOCK_SIZE,
1144 .cra_ctxsize = sizeof(struct ocs_hcu_ctx),
1145 .cra_alignmask = 0,
1146 .cra_module = THIS_MODULE,
1147 .cra_init = kmb_ocs_hcu_hmac_cra_init,
1148 .cra_exit = kmb_ocs_hcu_hmac_cra_exit,
1149 }
1150 },
1151 .op.do_one_request = kmb_ocs_hcu_do_one_request,
1152 },
1153 };
1154
1155 /* Device tree driver match. */
1156 static const struct of_device_id kmb_ocs_hcu_of_match[] = {
1157 {
1158 .compatible = "intel,keembay-ocs-hcu",
1159 },
1160 {}
1161 };
1162 MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match);
1163
kmb_ocs_hcu_remove(struct platform_device * pdev)1164 static void kmb_ocs_hcu_remove(struct platform_device *pdev)
1165 {
1166 struct ocs_hcu_dev *hcu_dev = platform_get_drvdata(pdev);
1167
1168 crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1169
1170 crypto_engine_exit(hcu_dev->engine);
1171
1172 spin_lock_bh(&ocs_hcu.lock);
1173 list_del(&hcu_dev->list);
1174 spin_unlock_bh(&ocs_hcu.lock);
1175 }
1176
kmb_ocs_hcu_probe(struct platform_device * pdev)1177 static int kmb_ocs_hcu_probe(struct platform_device *pdev)
1178 {
1179 struct device *dev = &pdev->dev;
1180 struct ocs_hcu_dev *hcu_dev;
1181 int rc;
1182
1183 hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
1184 if (!hcu_dev)
1185 return -ENOMEM;
1186
1187 hcu_dev->dev = dev;
1188
1189 platform_set_drvdata(pdev, hcu_dev);
1190 rc = dma_set_mask_and_coherent(&pdev->dev, OCS_HCU_DMA_BIT_MASK);
1191 if (rc)
1192 return rc;
1193
1194 hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
1195 if (IS_ERR(hcu_dev->io_base))
1196 return PTR_ERR(hcu_dev->io_base);
1197
1198 init_completion(&hcu_dev->irq_done);
1199
1200 /* Get and request IRQ. */
1201 hcu_dev->irq = platform_get_irq(pdev, 0);
1202 if (hcu_dev->irq < 0)
1203 return hcu_dev->irq;
1204
1205 rc = devm_request_threaded_irq(&pdev->dev, hcu_dev->irq,
1206 ocs_hcu_irq_handler, NULL, 0,
1207 "keembay-ocs-hcu", hcu_dev);
1208 if (rc < 0) {
1209 dev_err(dev, "Could not request IRQ.\n");
1210 return rc;
1211 }
1212
1213 INIT_LIST_HEAD(&hcu_dev->list);
1214
1215 spin_lock_bh(&ocs_hcu.lock);
1216 list_add_tail(&hcu_dev->list, &ocs_hcu.dev_list);
1217 spin_unlock_bh(&ocs_hcu.lock);
1218
1219 /* Initialize crypto engine */
1220 hcu_dev->engine = crypto_engine_alloc_init(dev, 1);
1221 if (!hcu_dev->engine) {
1222 rc = -ENOMEM;
1223 goto list_del;
1224 }
1225
1226 rc = crypto_engine_start(hcu_dev->engine);
1227 if (rc) {
1228 dev_err(dev, "Could not start engine.\n");
1229 goto cleanup;
1230 }
1231
1232 /* Security infrastructure guarantees OCS clock is enabled. */
1233
1234 rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
1235 if (rc) {
1236 dev_err(dev, "Could not register algorithms.\n");
1237 goto cleanup;
1238 }
1239
1240 return 0;
1241
1242 cleanup:
1243 crypto_engine_exit(hcu_dev->engine);
1244 list_del:
1245 spin_lock_bh(&ocs_hcu.lock);
1246 list_del(&hcu_dev->list);
1247 spin_unlock_bh(&ocs_hcu.lock);
1248
1249 return rc;
1250 }
1251
1252 /* The OCS driver is a platform device. */
1253 static struct platform_driver kmb_ocs_hcu_driver = {
1254 .probe = kmb_ocs_hcu_probe,
1255 .remove = kmb_ocs_hcu_remove,
1256 .driver = {
1257 .name = DRV_NAME,
1258 .of_match_table = kmb_ocs_hcu_of_match,
1259 },
1260 };
1261
1262 module_platform_driver(kmb_ocs_hcu_driver);
1263
1264 MODULE_LICENSE("GPL");
1265