Lines Matching +full:resume +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
10 * Some ideas are from omap-sham.c drivers.
30 #include <linux/dma-mapping.h>
39 #include "atmel-sha-regs.h"
40 #include "atmel-authenc.h"
101 unsigned int offset; /* offset in current sg */ member
146 atmel_sha_fn_t resume; member
169 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr) in atmel_sha_reg_name() argument
171 switch (offset) { in atmel_sha_reg_name()
212 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); in atmel_sha_reg_name()
233 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); in atmel_sha_reg_name()
236 (offset - SHA_REG_DIGEST(0)) >> 2); in atmel_sha_reg_name()
243 snprintf(tmp, sz, "0x%02x", offset); in atmel_sha_reg_name()
252 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) in atmel_sha_read() argument
254 u32 value = readl_relaxed(dd->io_base + offset); in atmel_sha_read()
257 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_read()
260 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, in atmel_sha_read()
261 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false)); in atmel_sha_read()
269 u32 offset, u32 value) in atmel_sha_write() argument
272 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_write()
275 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, in atmel_sha_write()
276 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true)); in atmel_sha_write()
280 writel_relaxed(value, dd->io_base + offset); in atmel_sha_write()
285 struct ahash_request *req = dd->req; in atmel_sha_complete()
287 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | in atmel_sha_complete()
291 clk_disable(dd->iclk); in atmel_sha_complete()
293 if ((dd->is_async || dd->force_complete) && req->base.complete) in atmel_sha_complete()
294 req->base.complete(&req->base, err); in atmel_sha_complete()
297 tasklet_schedule(&dd->queue_task); in atmel_sha_complete()
306 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { in atmel_sha_append_sg()
307 count = min(ctx->sg->length - ctx->offset, ctx->total); in atmel_sha_append_sg()
308 count = min(count, ctx->buflen - ctx->bufcnt); in atmel_sha_append_sg()
317 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in atmel_sha_append_sg()
318 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
325 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in atmel_sha_append_sg()
326 ctx->offset, count, 0); in atmel_sha_append_sg()
328 ctx->bufcnt += count; in atmel_sha_append_sg()
329 ctx->offset += count; in atmel_sha_append_sg()
330 ctx->total -= count; in atmel_sha_append_sg()
332 if (ctx->offset == ctx->sg->length) { in atmel_sha_append_sg()
333 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
334 if (ctx->sg) in atmel_sha_append_sg()
335 ctx->offset = 0; in atmel_sha_append_sg()
337 ctx->total = 0; in atmel_sha_append_sg()
348 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
353 * - if message length < 56 bytes then padlen = 56 - message length
354 * - else padlen = 64 + 56 - message length
357 * - if message length < 112 bytes then padlen = 112 - message length
358 * - else padlen = 128 + 112 - message length
366 size[0] = ctx->digcnt[0]; in atmel_sha_fill_padding()
367 size[1] = ctx->digcnt[1]; in atmel_sha_fill_padding()
369 size[0] += ctx->bufcnt; in atmel_sha_fill_padding()
370 if (size[0] < ctx->bufcnt) in atmel_sha_fill_padding()
380 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_fill_padding()
383 index = ctx->bufcnt & 0x7f; in atmel_sha_fill_padding()
384 padlen = (index < 112) ? (112 - index) : ((128+112) - index); in atmel_sha_fill_padding()
385 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
386 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
387 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in atmel_sha_fill_padding()
388 ctx->bufcnt += padlen + 16; in atmel_sha_fill_padding()
389 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
393 index = ctx->bufcnt & 0x3f; in atmel_sha_fill_padding()
394 padlen = (index < 56) ? (56 - index) : ((64+56) - index); in atmel_sha_fill_padding()
395 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
396 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
397 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in atmel_sha_fill_padding()
398 ctx->bufcnt += padlen + 8; in atmel_sha_fill_padding()
399 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
410 if (!tctx->dd) { in atmel_sha_find_dev()
415 tctx->dd = dd; in atmel_sha_find_dev()
417 dd = tctx->dd; in atmel_sha_find_dev()
432 ctx->dd = dd; in atmel_sha_init()
434 ctx->flags = 0; in atmel_sha_init()
436 dev_dbg(dd->dev, "init: digest size: %d\n", in atmel_sha_init()
441 ctx->flags |= SHA_FLAGS_SHA1; in atmel_sha_init()
442 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_init()
445 ctx->flags |= SHA_FLAGS_SHA224; in atmel_sha_init()
446 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_init()
449 ctx->flags |= SHA_FLAGS_SHA256; in atmel_sha_init()
450 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_init()
453 ctx->flags |= SHA_FLAGS_SHA384; in atmel_sha_init()
454 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_init()
457 ctx->flags |= SHA_FLAGS_SHA512; in atmel_sha_init()
458 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_init()
461 return -EINVAL; in atmel_sha_init()
465 ctx->bufcnt = 0; in atmel_sha_init()
466 ctx->digcnt[0] = 0; in atmel_sha_init()
467 ctx->digcnt[1] = 0; in atmel_sha_init()
468 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_init()
475 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_write_ctrl()
480 if (!dd->caps.has_dma) in atmel_sha_write_ctrl()
483 if (dd->caps.has_dualbuff) in atmel_sha_write_ctrl()
489 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_write_ctrl()
520 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { in atmel_sha_write_ctrl()
522 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { in atmel_sha_write_ctrl()
523 const u32 *hash = (const u32 *)ctx->digest; in atmel_sha_write_ctrl()
531 ctx->flags &= ~SHA_FLAGS_RESTORE; in atmel_sha_write_ctrl()
549 atmel_sha_fn_t resume) in atmel_sha_wait_for_data_ready() argument
554 return resume(dd); in atmel_sha_wait_for_data_ready()
556 dd->resume = resume; in atmel_sha_wait_for_data_ready()
558 return -EINPROGRESS; in atmel_sha_wait_for_data_ready()
564 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_cpu()
568 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_cpu()
569 ctx->digcnt[1], ctx->digcnt[0], length, final); in atmel_sha_xmit_cpu()
573 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_cpu()
574 ctx->digcnt[0] += length; in atmel_sha_xmit_cpu()
575 if (ctx->digcnt[0] < length) in atmel_sha_xmit_cpu()
576 ctx->digcnt[1]++; in atmel_sha_xmit_cpu()
579 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_cpu()
583 dd->flags |= SHA_FLAGS_CPU; in atmel_sha_xmit_cpu()
588 return -EINPROGRESS; in atmel_sha_xmit_cpu()
594 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_pdc()
597 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_pdc()
598 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_pdc()
611 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_pdc()
612 ctx->digcnt[0] += length1; in atmel_sha_xmit_pdc()
613 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_pdc()
614 ctx->digcnt[1]++; in atmel_sha_xmit_pdc()
617 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_pdc()
619 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_pdc()
624 return -EINPROGRESS; in atmel_sha_xmit_pdc()
631 dd->is_async = true; in atmel_sha_dma_callback()
633 /* dma_lch_in - completed - wait DATRDY */ in atmel_sha_dma_callback()
640 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_dma()
644 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_dma()
645 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_dma()
647 dd->dma_lch_in.dma_conf.src_maxburst = 16; in atmel_sha_xmit_dma()
648 dd->dma_lch_in.dma_conf.dst_maxburst = 16; in atmel_sha_xmit_dma()
650 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); in atmel_sha_xmit_dma()
658 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, in atmel_sha_xmit_dma()
664 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, in atmel_sha_xmit_dma()
668 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma()
670 in_desc->callback = atmel_sha_dma_callback; in atmel_sha_xmit_dma()
671 in_desc->callback_param = dd; in atmel_sha_xmit_dma()
675 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_dma()
676 ctx->digcnt[0] += length1; in atmel_sha_xmit_dma()
677 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_dma()
678 ctx->digcnt[1]++; in atmel_sha_xmit_dma()
681 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_dma()
683 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_dma()
687 dma_async_issue_pending(dd->dma_lch_in.chan); in atmel_sha_xmit_dma()
689 return -EINPROGRESS; in atmel_sha_xmit_dma()
695 if (dd->caps.has_dma) in atmel_sha_xmit_start()
705 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_cpu()
710 bufcnt = ctx->bufcnt; in atmel_sha_update_cpu()
711 ctx->bufcnt = 0; in atmel_sha_update_cpu()
713 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); in atmel_sha_update_cpu()
720 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_xmit_dma_map()
721 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_xmit_dma_map()
722 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_xmit_dma_map()
723 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + in atmel_sha_xmit_dma_map()
724 ctx->block_size); in atmel_sha_xmit_dma_map()
725 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma_map()
728 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_xmit_dma_map()
731 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); in atmel_sha_xmit_dma_map()
736 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_slow()
742 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_slow()
744 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", in atmel_sha_update_dma_slow()
745 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); in atmel_sha_update_dma_slow()
750 if (final || (ctx->bufcnt == ctx->buflen)) { in atmel_sha_update_dma_slow()
751 count = ctx->bufcnt; in atmel_sha_update_dma_slow()
752 ctx->bufcnt = 0; in atmel_sha_update_dma_slow()
761 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_start()
766 if (!ctx->total) in atmel_sha_update_dma_start()
769 if (ctx->bufcnt || ctx->offset) in atmel_sha_update_dma_start()
772 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", in atmel_sha_update_dma_start()
773 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); in atmel_sha_update_dma_start()
775 sg = ctx->sg; in atmel_sha_update_dma_start()
777 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_update_dma_start()
780 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) in atmel_sha_update_dma_start()
781 /* size is not ctx->block_size aligned */ in atmel_sha_update_dma_start()
784 length = min(ctx->total, sg->length); in atmel_sha_update_dma_start()
787 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in atmel_sha_update_dma_start()
788 /* not last sg must be ctx->block_size aligned */ in atmel_sha_update_dma_start()
789 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
790 length -= tail; in atmel_sha_update_dma_start()
794 ctx->total -= length; in atmel_sha_update_dma_start()
795 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
797 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_start()
801 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
802 length -= tail; in atmel_sha_update_dma_start()
803 ctx->total += tail; in atmel_sha_update_dma_start()
804 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
806 sg = ctx->sg; in atmel_sha_update_dma_start()
811 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_update_dma_start()
812 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_start()
813 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_update_dma_start()
814 dev_err(dd->dev, "dma %zu bytes error\n", in atmel_sha_update_dma_start()
815 ctx->buflen + ctx->block_size); in atmel_sha_update_dma_start()
816 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
820 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_update_dma_start()
821 count = ctx->bufcnt; in atmel_sha_update_dma_start()
822 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
823 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, in atmel_sha_update_dma_start()
826 ctx->sg = sg; in atmel_sha_update_dma_start()
827 if (!dma_map_sg(dd->dev, ctx->sg, 1, in atmel_sha_update_dma_start()
829 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
830 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
833 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
835 count = ctx->bufcnt; in atmel_sha_update_dma_start()
836 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
837 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), in atmel_sha_update_dma_start()
838 length, ctx->dma_addr, count, final); in atmel_sha_update_dma_start()
842 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in atmel_sha_update_dma_start()
843 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
844 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
847 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
850 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, in atmel_sha_update_dma_start()
856 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_stop()
858 if (ctx->flags & SHA_FLAGS_SG) { in atmel_sha_update_dma_stop()
859 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
860 if (ctx->sg->length == ctx->offset) { in atmel_sha_update_dma_stop()
861 ctx->sg = sg_next(ctx->sg); in atmel_sha_update_dma_stop()
862 if (ctx->sg) in atmel_sha_update_dma_stop()
863 ctx->offset = 0; in atmel_sha_update_dma_stop()
865 if (ctx->flags & SHA_FLAGS_PAD) { in atmel_sha_update_dma_stop()
866 dma_unmap_single(dd->dev, ctx->dma_addr, in atmel_sha_update_dma_stop()
867 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
870 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + in atmel_sha_update_dma_stop()
871 ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
877 struct ahash_request *req = dd->req; in atmel_sha_update_req()
881 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", in atmel_sha_update_req()
882 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
884 if (ctx->flags & SHA_FLAGS_CPU) in atmel_sha_update_req()
890 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", in atmel_sha_update_req()
891 err, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
898 struct ahash_request *req = dd->req; in atmel_sha_final_req()
903 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { in atmel_sha_final_req()
905 count = ctx->bufcnt; in atmel_sha_final_req()
906 ctx->bufcnt = 0; in atmel_sha_final_req()
912 count = ctx->bufcnt; in atmel_sha_final_req()
913 ctx->bufcnt = 0; in atmel_sha_final_req()
914 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); in atmel_sha_final_req()
917 dev_dbg(dd->dev, "final_req: err: %d\n", err); in atmel_sha_final_req()
925 u32 *hash = (u32 *)ctx->digest; in atmel_sha_copy_hash()
928 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_hash()
949 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
950 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_copy_hash()
957 if (!req->result) in atmel_sha_copy_ready_hash()
960 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_ready_hash()
963 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
967 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
971 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
975 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
979 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
987 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish()
989 if (ctx->digcnt[0] || ctx->digcnt[1]) in atmel_sha_finish()
992 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], in atmel_sha_finish()
993 ctx->digcnt[0], ctx->bufcnt); in atmel_sha_finish()
1001 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish_req()
1005 if (SHA_FLAGS_FINAL & dd->flags) in atmel_sha_finish_req()
1008 ctx->flags |= SHA_FLAGS_ERROR; in atmel_sha_finish_req()
1019 err = clk_enable(dd->iclk); in atmel_sha_hw_init()
1023 if (!(SHA_FLAGS_INIT & dd->flags)) { in atmel_sha_hw_init()
1025 dd->flags |= SHA_FLAGS_INIT; in atmel_sha_hw_init()
1044 dd->hw_version = atmel_sha_get_version(dd); in atmel_sha_hw_version_init()
1046 dev_info(dd->dev, in atmel_sha_hw_version_init()
1047 "version: 0x%x\n", dd->hw_version); in atmel_sha_hw_version_init()
1049 clk_disable(dd->iclk); in atmel_sha_hw_version_init()
1063 spin_lock_irqsave(&dd->lock, flags); in atmel_sha_handle_queue()
1065 ret = ahash_enqueue_request(&dd->queue, req); in atmel_sha_handle_queue()
1067 if (SHA_FLAGS_BUSY & dd->flags) { in atmel_sha_handle_queue()
1068 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1072 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue()
1073 async_req = crypto_dequeue_request(&dd->queue); in atmel_sha_handle_queue()
1075 dd->flags |= SHA_FLAGS_BUSY; in atmel_sha_handle_queue()
1077 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1083 backlog->complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
1085 ctx = crypto_tfm_ctx(async_req->tfm); in atmel_sha_handle_queue()
1087 dd->req = ahash_request_cast(async_req); in atmel_sha_handle_queue()
1088 start_async = (dd->req != req); in atmel_sha_handle_queue()
1089 dd->is_async = start_async; in atmel_sha_handle_queue()
1090 dd->force_complete = false; in atmel_sha_handle_queue()
1092 /* WARNING: ctx->start() MAY change dd->is_async. */ in atmel_sha_handle_queue()
1093 err = ctx->start(dd); in atmel_sha_handle_queue()
1101 struct ahash_request *req = dd->req; in atmel_sha_start()
1105 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", in atmel_sha_start()
1106 ctx->op, req->nbytes); in atmel_sha_start()
1114 * -EINPROGRESS: the hardware is busy and the SHA driver will resume in atmel_sha_start()
1131 dd->resume = atmel_sha_done; in atmel_sha_start()
1132 if (ctx->op == SHA_OP_UPDATE) { in atmel_sha_start()
1134 if (!err && (ctx->flags & SHA_FLAGS_FINUP)) in atmel_sha_start()
1137 } else if (ctx->op == SHA_OP_FINAL) { in atmel_sha_start()
1145 dev_dbg(dd->dev, "exit, err: %d\n", err); in atmel_sha_start()
1153 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in atmel_sha_enqueue()
1154 struct atmel_sha_dev *dd = tctx->dd; in atmel_sha_enqueue()
1156 ctx->op = op; in atmel_sha_enqueue()
1165 if (!req->nbytes) in atmel_sha_update()
1168 ctx->total = req->nbytes; in atmel_sha_update()
1169 ctx->sg = req->src; in atmel_sha_update()
1170 ctx->offset = 0; in atmel_sha_update()
1172 if (ctx->flags & SHA_FLAGS_FINUP) { in atmel_sha_update()
1173 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) in atmel_sha_update()
1175 ctx->flags |= SHA_FLAGS_CPU; in atmel_sha_update()
1176 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { in atmel_sha_update()
1187 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_final()
1189 if (ctx->flags & SHA_FLAGS_ERROR) in atmel_sha_final()
1192 if (ctx->flags & SHA_FLAGS_PAD) in atmel_sha_final()
1204 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_finup()
1207 if (err1 == -EINPROGRESS || in atmel_sha_finup()
1208 (err1 == -EBUSY && (ahash_request_flags(req) & in atmel_sha_finup()
1249 ctx->start = atmel_sha_start; in atmel_sha_cra_init()
1256 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; in atmel_sha_alg_init()
1257 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; in atmel_sha_alg_init()
1258 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); in atmel_sha_alg_init()
1259 alg->halg.base.cra_module = THIS_MODULE; in atmel_sha_alg_init()
1260 alg->halg.base.cra_init = atmel_sha_cra_init; in atmel_sha_alg_init()
1262 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); in atmel_sha_alg_init()
1264 alg->init = atmel_sha_init; in atmel_sha_alg_init()
1265 alg->update = atmel_sha_update; in atmel_sha_alg_init()
1266 alg->final = atmel_sha_final; in atmel_sha_alg_init()
1267 alg->finup = atmel_sha_finup; in atmel_sha_alg_init()
1268 alg->digest = atmel_sha_digest; in atmel_sha_alg_init()
1269 alg->export = atmel_sha_export; in atmel_sha_alg_init()
1270 alg->import = atmel_sha_import; in atmel_sha_alg_init()
1276 .halg.base.cra_driver_name = "atmel-sha1",
1283 .halg.base.cra_driver_name = "atmel-sha256",
1292 .halg.base.cra_driver_name = "atmel-sha224",
1301 .halg.base.cra_driver_name = "atmel-sha384",
1309 .halg.base.cra_driver_name = "atmel-sha512",
1328 if (SHA_FLAGS_CPU & dd->flags) { in atmel_sha_done()
1329 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1330 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; in atmel_sha_done()
1333 } else if (SHA_FLAGS_DMA_READY & dd->flags) { in atmel_sha_done()
1334 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { in atmel_sha_done()
1335 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; in atmel_sha_done()
1338 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1339 /* hash or semi-hash ready */ in atmel_sha_done()
1340 dd->flags &= ~(SHA_FLAGS_DMA_READY | in atmel_sha_done()
1343 if (err != -EINPROGRESS) in atmel_sha_done()
1351 atmel_sha_finish_req(dd->req, err); in atmel_sha_done()
1360 dd->is_async = true; in atmel_sha_done_task()
1361 (void)dd->resume(dd); in atmel_sha_done_task()
1372 if (SHA_FLAGS_BUSY & sha_dd->flags) { in atmel_sha_irq()
1373 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; in atmel_sha_irq()
1374 if (!(SHA_FLAGS_CPU & sha_dd->flags)) in atmel_sha_irq()
1375 sha_dd->flags |= SHA_FLAGS_DMA_READY; in atmel_sha_irq()
1376 tasklet_schedule(&sha_dd->done_task); in atmel_sha_irq()
1378 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); in atmel_sha_irq()
1393 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_check_aligned()
1394 struct ahash_request *req = dd->req; in atmel_sha_dma_check_aligned()
1396 size_t bs = ctx->block_size; in atmel_sha_dma_check_aligned()
1400 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_dma_check_aligned()
1407 if (len <= sg->length) { in atmel_sha_dma_check_aligned()
1408 dma->nents = nents + 1; in atmel_sha_dma_check_aligned()
1409 dma->last_sg_length = sg->length; in atmel_sha_dma_check_aligned()
1410 sg->length = ALIGN(len, sizeof(u32)); in atmel_sha_dma_check_aligned()
1415 if (!IS_ALIGNED(sg->length, bs)) in atmel_sha_dma_check_aligned()
1418 len -= sg->length; in atmel_sha_dma_check_aligned()
1427 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_callback2()
1431 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_callback2()
1433 sg = dma->sg; in atmel_sha_dma_callback2()
1434 for (nents = 0; nents < dma->nents - 1; ++nents) in atmel_sha_dma_callback2()
1436 sg->length = dma->last_sg_length; in atmel_sha_dma_callback2()
1438 dd->is_async = true; in atmel_sha_dma_callback2()
1439 (void)atmel_sha_wait_for_data_ready(dd, dd->resume); in atmel_sha_dma_callback2()
1445 atmel_sha_fn_t resume) in atmel_sha_dma_start() argument
1447 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_start()
1448 struct dma_slave_config *config = &dma->dma_conf; in atmel_sha_dma_start()
1449 struct dma_chan *chan = dma->chan; in atmel_sha_dma_start()
1455 dd->resume = resume; in atmel_sha_dma_start()
1458 * dma->nents has already been initialized by in atmel_sha_dma_start()
1461 dma->sg = src; in atmel_sha_dma_start()
1462 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1464 err = -ENOMEM; in atmel_sha_dma_start()
1468 config->src_maxburst = 16; in atmel_sha_dma_start()
1469 config->dst_maxburst = 16; in atmel_sha_dma_start()
1474 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, in atmel_sha_dma_start()
1477 err = -ENOMEM; in atmel_sha_dma_start()
1481 desc->callback = atmel_sha_dma_callback2; in atmel_sha_dma_start()
1482 desc->callback_param = dd; in atmel_sha_dma_start()
1490 return -EINPROGRESS; in atmel_sha_dma_start()
1493 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1503 struct ahash_request *req = dd->req; in atmel_sha_cpu_transfer()
1505 const u32 *words = (const u32 *)ctx->buffer; in atmel_sha_cpu_transfer()
1509 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; in atmel_sha_cpu_transfer()
1512 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); in atmel_sha_cpu_transfer()
1516 ctx->offset += ctx->bufcnt; in atmel_sha_cpu_transfer()
1517 ctx->total -= ctx->bufcnt; in atmel_sha_cpu_transfer()
1519 if (!ctx->total) in atmel_sha_cpu_transfer()
1524 * Fill ctx->buffer now with the next data to be written into in atmel_sha_cpu_transfer()
1530 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_transfer()
1531 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_transfer()
1532 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_transfer()
1538 dd->resume = atmel_sha_cpu_transfer; in atmel_sha_cpu_transfer()
1540 return -EINPROGRESS; in atmel_sha_cpu_transfer()
1544 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) in atmel_sha_cpu_transfer()
1545 return dd->cpu_transfer_complete(dd); in atmel_sha_cpu_transfer()
1547 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); in atmel_sha_cpu_transfer()
1555 atmel_sha_fn_t resume) in atmel_sha_cpu_start() argument
1557 struct ahash_request *req = dd->req; in atmel_sha_cpu_start()
1561 return resume(dd); in atmel_sha_cpu_start()
1563 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); in atmel_sha_cpu_start()
1566 ctx->flags |= SHA_FLAGS_IDATAR0; in atmel_sha_cpu_start()
1569 ctx->flags |= SHA_FLAGS_WAIT_DATARDY; in atmel_sha_cpu_start()
1571 ctx->sg = sg; in atmel_sha_cpu_start()
1572 ctx->total = len; in atmel_sha_cpu_start()
1573 ctx->offset = 0; in atmel_sha_cpu_start()
1576 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_start()
1577 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_start()
1578 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_start()
1580 dd->cpu_transfer_complete = resume; in atmel_sha_cpu_start()
1587 atmel_sha_fn_t resume) in atmel_sha_cpu_hash() argument
1589 struct ahash_request *req = dd->req; in atmel_sha_cpu_hash()
1594 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) in atmel_sha_cpu_hash()
1595 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_cpu_hash()
1597 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_cpu_hash()
1603 sg_init_one(&dd->tmp, data, datalen); in atmel_sha_cpu_hash()
1604 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); in atmel_sha_cpu_hash()
1624 kfree(hkey->keydup); in atmel_sha_hmac_key_release()
1634 if (keylen > sizeof(hkey->buffer)) { in atmel_sha_hmac_key_set()
1635 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); in atmel_sha_hmac_key_set()
1636 if (!hkey->keydup) in atmel_sha_hmac_key_set()
1637 return -ENOMEM; in atmel_sha_hmac_key_set()
1640 memcpy(hkey->buffer, key, keylen); in atmel_sha_hmac_key_set()
1643 hkey->valid = true; in atmel_sha_hmac_key_set()
1644 hkey->keylen = keylen; in atmel_sha_hmac_key_set()
1652 if (!hkey->valid) in atmel_sha_hmac_key_get()
1655 *keylen = hkey->keylen; in atmel_sha_hmac_key_get()
1656 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; in atmel_sha_hmac_key_get()
1667 atmel_sha_fn_t resume; member
1671 atmel_sha_fn_t resume);
1685 atmel_sha_fn_t resume) in atmel_sha_hmac_setup() argument
1687 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup()
1695 hmac->resume = resume; in atmel_sha_hmac_setup()
1696 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_hmac_setup()
1698 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_hmac_setup()
1699 ctx->hash_size = SHA1_DIGEST_SIZE; in atmel_sha_hmac_setup()
1703 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_hmac_setup()
1704 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1708 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_hmac_setup()
1709 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1713 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_hmac_setup()
1714 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1718 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_hmac_setup()
1719 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1723 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_setup()
1725 bs = ctx->block_size; in atmel_sha_hmac_setup()
1727 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) in atmel_sha_hmac_setup()
1728 return resume(dd); in atmel_sha_hmac_setup()
1735 memcpy((u8 *)hmac->ipad, key, keylen); in atmel_sha_hmac_setup()
1736 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); in atmel_sha_hmac_setup()
1749 struct ahash_request *req = dd->req; in atmel_sha_hmac_prehash_key_done()
1754 size_t bs = ctx->block_size; in atmel_sha_hmac_prehash_key_done()
1759 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_prehash_key_done()
1760 memset((u8 *)hmac->ipad + ds, 0, bs - ds); in atmel_sha_hmac_prehash_key_done()
1766 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_ipad_hash()
1770 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_ipad_hash()
1773 memcpy(hmac->opad, hmac->ipad, bs); in atmel_sha_hmac_compute_ipad_hash()
1775 hmac->ipad[i] ^= 0x36363636; in atmel_sha_hmac_compute_ipad_hash()
1776 hmac->opad[i] ^= 0x5c5c5c5c; in atmel_sha_hmac_compute_ipad_hash()
1779 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, in atmel_sha_hmac_compute_ipad_hash()
1785 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_opad_hash()
1789 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_opad_hash()
1790 size_t hs = ctx->hash_size; in atmel_sha_hmac_compute_opad_hash()
1794 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_compute_opad_hash()
1795 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, in atmel_sha_hmac_compute_opad_hash()
1801 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup_done()
1805 size_t hs = ctx->hash_size; in atmel_sha_hmac_setup_done()
1809 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_setup_done()
1810 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_setup_done()
1811 return hmac->resume(dd); in atmel_sha_hmac_setup_done()
1816 struct ahash_request *req = dd->req; in atmel_sha_hmac_start()
1824 switch (ctx->op) { in atmel_sha_hmac_start()
1830 dd->resume = atmel_sha_done; in atmel_sha_hmac_start()
1835 dd->resume = atmel_sha_hmac_final; in atmel_sha_hmac_start()
1844 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_start()
1855 return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); in atmel_sha_hmac_setkey()
1871 struct ahash_request *req = dd->req; in atmel_sha_hmac_init_done()
1875 size_t bs = ctx->block_size; in atmel_sha_hmac_init_done()
1876 size_t hs = ctx->hash_size; in atmel_sha_hmac_init_done()
1878 ctx->bufcnt = 0; in atmel_sha_hmac_init_done()
1879 ctx->digcnt[0] = bs; in atmel_sha_hmac_init_done()
1880 ctx->digcnt[1] = 0; in atmel_sha_hmac_init_done()
1881 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_hmac_init_done()
1882 memcpy(ctx->digest, hmac->ipad, hs); in atmel_sha_hmac_init_done()
1888 struct ahash_request *req = dd->req; in atmel_sha_hmac_final()
1892 u32 *digest = (u32 *)ctx->digest; in atmel_sha_hmac_final()
1894 size_t bs = ctx->block_size; in atmel_sha_hmac_final()
1895 size_t hs = ctx->hash_size; in atmel_sha_hmac_final()
1908 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_final()
1911 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_hmac_final()
1917 sg_init_one(&dd->tmp, digest, ds); in atmel_sha_hmac_final()
1918 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, in atmel_sha_hmac_final()
1925 * req->result might not be sizeof(u32) aligned, so copy the in atmel_sha_hmac_final_done()
1926 * digest into ctx->digest[] before memcpy() the data into in atmel_sha_hmac_final_done()
1927 * req->result. in atmel_sha_hmac_final_done()
1929 atmel_sha_copy_hash(dd->req); in atmel_sha_hmac_final_done()
1930 atmel_sha_copy_ready_hash(dd->req); in atmel_sha_hmac_final_done()
1947 struct ahash_request *req = dd->req; in atmel_sha_hmac_digest2()
1951 size_t hs = ctx->hash_size; in atmel_sha_hmac_digest2()
1957 if (!req->nbytes) in atmel_sha_hmac_digest2()
1958 return atmel_sha_complete(dd, -EINVAL); // TODO: in atmel_sha_hmac_digest2()
1961 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && in atmel_sha_hmac_digest2()
1962 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) in atmel_sha_hmac_digest2()
1968 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_hmac_digest2()
1972 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_digest2()
1976 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_hmac_digest2()
1983 atmel_sha_write(dd, SHA_MSR, req->nbytes); in atmel_sha_hmac_digest2()
1984 atmel_sha_write(dd, SHA_BCR, req->nbytes); in atmel_sha_hmac_digest2()
1990 return atmel_sha_dma_start(dd, req->src, req->nbytes, in atmel_sha_hmac_digest2()
1993 return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true, in atmel_sha_hmac_digest2()
2003 hmac->base.start = atmel_sha_hmac_start; in atmel_sha_hmac_cra_init()
2004 atmel_sha_hmac_key_init(&hmac->hkey); in atmel_sha_hmac_cra_init()
2013 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_cra_exit()
2018 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; in atmel_sha_hmac_alg_init()
2019 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; in atmel_sha_hmac_alg_init()
2020 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); in atmel_sha_hmac_alg_init()
2021 alg->halg.base.cra_module = THIS_MODULE; in atmel_sha_hmac_alg_init()
2022 alg->halg.base.cra_init = atmel_sha_hmac_cra_init; in atmel_sha_hmac_alg_init()
2023 alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit; in atmel_sha_hmac_alg_init()
2025 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); in atmel_sha_hmac_alg_init()
2027 alg->init = atmel_sha_hmac_init; in atmel_sha_hmac_alg_init()
2028 alg->update = atmel_sha_update; in atmel_sha_hmac_alg_init()
2029 alg->final = atmel_sha_final; in atmel_sha_hmac_alg_init()
2030 alg->digest = atmel_sha_hmac_digest; in atmel_sha_hmac_alg_init()
2031 alg->setkey = atmel_sha_hmac_setkey; in atmel_sha_hmac_alg_init()
2032 alg->export = atmel_sha_export; in atmel_sha_hmac_alg_init()
2033 alg->import = atmel_sha_import; in atmel_sha_hmac_alg_init()
2039 .halg.base.cra_driver_name = "atmel-hmac-sha1",
2046 .halg.base.cra_driver_name = "atmel-hmac-sha224",
2053 .halg.base.cra_driver_name = "atmel-hmac-sha256",
2060 .halg.base.cra_driver_name = "atmel-hmac-sha384",
2067 .halg.base.cra_driver_name = "atmel-hmac-sha512",
2105 struct ahash_request *req = areq->data; in atmel_sha_authenc_complete()
2108 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); in atmel_sha_authenc_complete()
2113 struct ahash_request *req = dd->req; in atmel_sha_authenc_start()
2118 * Force atmel_sha_complete() to call req->base.complete(), ie in atmel_sha_authenc_start()
2119 * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). in atmel_sha_authenc_start()
2121 dd->force_complete = true; in atmel_sha_authenc_start()
2124 return authctx->cb(authctx->aes_dev, err, dd->is_async); in atmel_sha_authenc_start()
2148 int err = -EINVAL; in atmel_sha_authenc_spawn()
2152 name = "atmel-hmac-sha1"; in atmel_sha_authenc_spawn()
2156 name = "atmel-hmac-sha224"; in atmel_sha_authenc_spawn()
2160 name = "atmel-hmac-sha256"; in atmel_sha_authenc_spawn()
2164 name = "atmel-hmac-sha384"; in atmel_sha_authenc_spawn()
2168 name = "atmel-hmac-sha512"; in atmel_sha_authenc_spawn()
2181 tctx->start = atmel_sha_authenc_start; in atmel_sha_authenc_spawn()
2182 tctx->flags = mode; in atmel_sha_authenc_spawn()
2186 err = -ENOMEM; in atmel_sha_authenc_spawn()
2189 auth->tfm = tfm; in atmel_sha_authenc_spawn()
2203 crypto_free_ahash(auth->tfm); in atmel_sha_authenc_free()
2211 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_setkey()
2225 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_schedule()
2226 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_schedule()
2236 return cb(aes_dev, -ENODEV, false); in atmel_sha_authenc_schedule()
2239 ctx->dd = dd; in atmel_sha_authenc_schedule()
2240 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_authenc_schedule()
2241 authctx->cb = cb; in atmel_sha_authenc_schedule()
2242 authctx->aes_dev = aes_dev; in atmel_sha_authenc_schedule()
2257 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init()
2260 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_init()
2263 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_init()
2265 authctx->cb = cb; in atmel_sha_authenc_init()
2266 authctx->aes_dev = aes_dev; in atmel_sha_authenc_init()
2267 authctx->assoc = assoc; in atmel_sha_authenc_init()
2268 authctx->assoclen = assoclen; in atmel_sha_authenc_init()
2269 authctx->textlen = textlen; in atmel_sha_authenc_init()
2271 ctx->flags = hmac->base.flags; in atmel_sha_authenc_init()
2278 struct ahash_request *req = dd->req; in atmel_sha_authenc_init2()
2280 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init2()
2283 size_t hs = ctx->hash_size; in atmel_sha_authenc_init2()
2289 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_authenc_init2()
2293 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_authenc_init2()
2298 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_authenc_init2()
2301 msg_size = authctx->assoclen + authctx->textlen; in atmel_sha_authenc_init2()
2308 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, in atmel_sha_authenc_init2()
2315 struct ahash_request *req = dd->req; in atmel_sha_authenc_init_done()
2318 return authctx->cb(authctx->aes_dev, 0, dd->is_async); in atmel_sha_authenc_init_done()
2327 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_final()
2328 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_final()
2330 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_authenc_final()
2332 authctx->digestlen = SHA1_DIGEST_SIZE; in atmel_sha_authenc_final()
2336 authctx->digestlen = SHA224_DIGEST_SIZE; in atmel_sha_authenc_final()
2340 authctx->digestlen = SHA256_DIGEST_SIZE; in atmel_sha_authenc_final()
2344 authctx->digestlen = SHA384_DIGEST_SIZE; in atmel_sha_authenc_final()
2348 authctx->digestlen = SHA512_DIGEST_SIZE; in atmel_sha_authenc_final()
2352 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_final()
2354 if (authctx->digestlen > digestlen) in atmel_sha_authenc_final()
2355 authctx->digestlen = digestlen; in atmel_sha_authenc_final()
2357 authctx->cb = cb; in atmel_sha_authenc_final()
2358 authctx->aes_dev = aes_dev; in atmel_sha_authenc_final()
2359 authctx->digest = digest; in atmel_sha_authenc_final()
2367 struct ahash_request *req = dd->req; in atmel_sha_authenc_final_done()
2369 size_t i, num_words = authctx->digestlen / sizeof(u32); in atmel_sha_authenc_final_done()
2372 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_authenc_final_done()
2380 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_abort()
2381 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_abort()
2383 /* Prevent atmel_sha_complete() from calling req->base.complete(). */ in atmel_sha_authenc_abort()
2384 dd->is_async = false; in atmel_sha_authenc_abort()
2385 dd->force_complete = false; in atmel_sha_authenc_abort()
2397 if (dd->caps.has_hmac) in atmel_sha_unregister_algs()
2404 if (dd->caps.has_sha224) in atmel_sha_unregister_algs()
2407 if (dd->caps.has_sha_384_512) { in atmel_sha_unregister_algs()
2425 if (dd->caps.has_sha224) { in atmel_sha_register_algs()
2433 if (dd->caps.has_sha_384_512) { in atmel_sha_register_algs()
2443 if (dd->caps.has_hmac) { in atmel_sha_register_algs()
2475 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); in atmel_sha_dma_init()
2476 if (IS_ERR(dd->dma_lch_in.chan)) { in atmel_sha_dma_init()
2477 dev_err(dd->dev, "DMA channel is not available\n"); in atmel_sha_dma_init()
2478 return PTR_ERR(dd->dma_lch_in.chan); in atmel_sha_dma_init()
2481 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + in atmel_sha_dma_init()
2483 dd->dma_lch_in.dma_conf.src_maxburst = 1; in atmel_sha_dma_init()
2484 dd->dma_lch_in.dma_conf.src_addr_width = in atmel_sha_dma_init()
2486 dd->dma_lch_in.dma_conf.dst_maxburst = 1; in atmel_sha_dma_init()
2487 dd->dma_lch_in.dma_conf.dst_addr_width = in atmel_sha_dma_init()
2489 dd->dma_lch_in.dma_conf.device_fc = false; in atmel_sha_dma_init()
2496 dma_release_channel(dd->dma_lch_in.chan); in atmel_sha_dma_cleanup()
2502 dd->caps.has_dma = 0; in atmel_sha_get_cap()
2503 dd->caps.has_dualbuff = 0; in atmel_sha_get_cap()
2504 dd->caps.has_sha224 = 0; in atmel_sha_get_cap()
2505 dd->caps.has_sha_384_512 = 0; in atmel_sha_get_cap()
2506 dd->caps.has_uihv = 0; in atmel_sha_get_cap()
2507 dd->caps.has_hmac = 0; in atmel_sha_get_cap()
2510 switch (dd->hw_version & 0xff0) { in atmel_sha_get_cap()
2512 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2513 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2514 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2515 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2516 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2517 dd->caps.has_hmac = 1; in atmel_sha_get_cap()
2520 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2521 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2522 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2523 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2524 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2527 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2528 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2529 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2530 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2533 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2534 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2535 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2540 dev_warn(dd->dev, in atmel_sha_get_cap()
2548 { .compatible = "atmel,at91sam9g46-sha" },
2558 struct device *dev = &pdev->dev; in atmel_sha_probe()
2562 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); in atmel_sha_probe()
2564 return -ENOMEM; in atmel_sha_probe()
2566 sha_dd->dev = dev; in atmel_sha_probe()
2570 INIT_LIST_HEAD(&sha_dd->list); in atmel_sha_probe()
2571 spin_lock_init(&sha_dd->lock); in atmel_sha_probe()
2573 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, in atmel_sha_probe()
2575 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, in atmel_sha_probe()
2578 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); in atmel_sha_probe()
2584 err = -ENODEV; in atmel_sha_probe()
2587 sha_dd->phys_base = sha_res->start; in atmel_sha_probe()
2590 sha_dd->irq = platform_get_irq(pdev, 0); in atmel_sha_probe()
2591 if (sha_dd->irq < 0) { in atmel_sha_probe()
2592 err = sha_dd->irq; in atmel_sha_probe()
2596 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, in atmel_sha_probe()
2597 IRQF_SHARED, "atmel-sha", sha_dd); in atmel_sha_probe()
2604 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); in atmel_sha_probe()
2605 if (IS_ERR(sha_dd->iclk)) { in atmel_sha_probe()
2607 err = PTR_ERR(sha_dd->iclk); in atmel_sha_probe()
2611 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); in atmel_sha_probe()
2612 if (IS_ERR(sha_dd->io_base)) { in atmel_sha_probe()
2614 err = PTR_ERR(sha_dd->io_base); in atmel_sha_probe()
2618 err = clk_prepare(sha_dd->iclk); in atmel_sha_probe()
2628 if (sha_dd->caps.has_dma) { in atmel_sha_probe()
2634 dma_chan_name(sha_dd->dma_lch_in.chan)); in atmel_sha_probe()
2638 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); in atmel_sha_probe()
2646 sha_dd->caps.has_sha224 ? "/SHA224" : "", in atmel_sha_probe()
2647 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); in atmel_sha_probe()
2653 list_del(&sha_dd->list); in atmel_sha_probe()
2655 if (sha_dd->caps.has_dma) in atmel_sha_probe()
2658 clk_unprepare(sha_dd->iclk); in atmel_sha_probe()
2660 tasklet_kill(&sha_dd->queue_task); in atmel_sha_probe()
2661 tasklet_kill(&sha_dd->done_task); in atmel_sha_probe()
2672 return -ENODEV; in atmel_sha_remove()
2674 list_del(&sha_dd->list); in atmel_sha_remove()
2679 tasklet_kill(&sha_dd->queue_task); in atmel_sha_remove()
2680 tasklet_kill(&sha_dd->done_task); in atmel_sha_remove()
2682 if (sha_dd->caps.has_dma) in atmel_sha_remove()
2685 clk_unprepare(sha_dd->iclk); in atmel_sha_remove()
2703 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");