Lines Matching +full:stm32f756 +full:- +full:hash
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
10 #include <crypto/internal/hash.h>
18 #include <linux/dma-mapping.h>
150 /* hash state */
226 return readl_relaxed(hdev->io_base + offset); in stm32_hash_read()
232 writel_relaxed(value, hdev->io_base + offset); in stm32_hash_write()
240 if (!hdev->pdata->has_sr) in stm32_hash_wait_busy()
241 return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status, in stm32_hash_wait_busy()
244 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, in stm32_hash_wait_busy()
260 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); in stm32_hash_write_key()
263 int keylen = ctx->keylen; in stm32_hash_write_key()
264 void *key = ctx->key; in stm32_hash_write_key()
271 keylen -= 4; in stm32_hash_write_key()
279 return -EINPROGRESS; in stm32_hash_write_key()
287 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_write_ctrl()
288 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); in stm32_hash_write_ctrl()
290 struct stm32_hash_state *state = &rctx->state; in stm32_hash_write_ctrl()
291 u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT; in stm32_hash_write_ctrl()
295 if (!(hdev->flags & HASH_FLAGS_INIT)) { in stm32_hash_write_ctrl()
296 if (hdev->pdata->ux500) { in stm32_hash_write_ctrl()
299 if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS) in stm32_hash_write_ctrl()
303 reg |= alg << hdev->pdata->alg_shift; in stm32_hash_write_ctrl()
306 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); in stm32_hash_write_ctrl()
308 if (state->flags & HASH_FLAGS_HMAC) { in stm32_hash_write_ctrl()
309 hdev->flags |= HASH_FLAGS_HMAC; in stm32_hash_write_ctrl()
311 if (ctx->keylen > crypto_ahash_blocksize(tfm)) in stm32_hash_write_ctrl()
315 if (!hdev->polled) in stm32_hash_write_ctrl()
320 hdev->flags |= HASH_FLAGS_INIT; in stm32_hash_write_ctrl()
326 rctx->state.blocklen -= sizeof(u32); in stm32_hash_write_ctrl()
328 dev_dbg(hdev->dev, "Write Control %x\n", reg); in stm32_hash_write_ctrl()
334 struct stm32_hash_state *state = &rctx->state; in stm32_hash_append_sg()
337 while ((state->bufcnt < state->blocklen) && rctx->total) { in stm32_hash_append_sg()
338 count = min(rctx->sg->length - rctx->offset, rctx->total); in stm32_hash_append_sg()
339 count = min_t(size_t, count, state->blocklen - state->bufcnt); in stm32_hash_append_sg()
342 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { in stm32_hash_append_sg()
343 rctx->sg = sg_next(rctx->sg); in stm32_hash_append_sg()
350 scatterwalk_map_and_copy(state->buffer + state->bufcnt, in stm32_hash_append_sg()
351 rctx->sg, rctx->offset, count, 0); in stm32_hash_append_sg()
353 state->bufcnt += count; in stm32_hash_append_sg()
354 rctx->offset += count; in stm32_hash_append_sg()
355 rctx->total -= count; in stm32_hash_append_sg()
357 if (rctx->offset == rctx->sg->length) { in stm32_hash_append_sg()
358 rctx->sg = sg_next(rctx->sg); in stm32_hash_append_sg()
359 if (rctx->sg) in stm32_hash_append_sg()
360 rctx->offset = 0; in stm32_hash_append_sg()
362 rctx->total = 0; in stm32_hash_append_sg()
370 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_xmit_cpu()
371 struct stm32_hash_state *state = &rctx->state; in stm32_hash_xmit_cpu()
377 hdev->flags |= HASH_FLAGS_FINAL; in stm32_hash_xmit_cpu()
380 if (!(hdev->flags & HASH_FLAGS_INIT) && !length && in stm32_hash_xmit_cpu()
381 hdev->pdata->broken_emptymsg) { in stm32_hash_xmit_cpu()
382 state->flags |= HASH_FLAGS_EMPTY; in stm32_hash_xmit_cpu()
389 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n", in stm32_hash_xmit_cpu()
392 hdev->flags |= HASH_FLAGS_CPU; in stm32_hash_xmit_cpu()
397 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
399 if ((hdev->flags & HASH_FLAGS_HMAC) && in stm32_hash_xmit_cpu()
400 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { in stm32_hash_xmit_cpu()
401 hdev->flags |= HASH_FLAGS_HMAC_KEY; in stm32_hash_xmit_cpu()
404 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
412 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
418 if (hdev->flags & HASH_FLAGS_HMAC) { in stm32_hash_xmit_cpu()
420 return -ETIMEDOUT; in stm32_hash_xmit_cpu()
423 return -EINPROGRESS; in stm32_hash_xmit_cpu()
431 struct stm32_hash_state *state = &rctx->state; in hash_swap_reg()
433 switch ((state->flags & HASH_FLAGS_ALGO_MASK) >> in hash_swap_reg()
439 if (state->flags & HASH_FLAGS_HMAC) in hash_swap_reg()
447 if (state->flags & HASH_FLAGS_HMAC) in hash_swap_reg()
457 if (state->flags & HASH_FLAGS_HMAC) in hash_swap_reg()
464 return -EINVAL; in hash_swap_reg()
470 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_update_cpu()
471 struct stm32_hash_state *state = &rctx->state; in stm32_hash_update_cpu()
472 u32 *preg = state->hw_context; in stm32_hash_update_cpu()
476 dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags); in stm32_hash_update_cpu()
478 final = state->flags & HASH_FLAGS_FINAL; in stm32_hash_update_cpu()
480 while ((rctx->total >= state->blocklen) || in stm32_hash_update_cpu()
481 (state->bufcnt + rctx->total >= state->blocklen)) { in stm32_hash_update_cpu()
483 bufcnt = state->bufcnt; in stm32_hash_update_cpu()
484 state->bufcnt = 0; in stm32_hash_update_cpu()
485 err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0); in stm32_hash_update_cpu()
493 bufcnt = state->bufcnt; in stm32_hash_update_cpu()
494 state->bufcnt = 0; in stm32_hash_update_cpu()
495 return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1); in stm32_hash_update_cpu()
498 if (!(hdev->flags & HASH_FLAGS_INIT)) in stm32_hash_update_cpu()
502 return -ETIMEDOUT; in stm32_hash_update_cpu()
506 if (!hdev->pdata->ux500) in stm32_hash_update_cpu()
513 state->flags |= HASH_FLAGS_INIT; in stm32_hash_update_cpu()
526 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, in stm32_hash_xmit_dma()
530 dev_err(hdev->dev, "dmaengine_prep_slave error\n"); in stm32_hash_xmit_dma()
531 return -ENOMEM; in stm32_hash_xmit_dma()
534 reinit_completion(&hdev->dma_completion); in stm32_hash_xmit_dma()
535 in_desc->callback = stm32_hash_dma_callback; in stm32_hash_xmit_dma()
536 in_desc->callback_param = hdev; in stm32_hash_xmit_dma()
538 hdev->flags |= HASH_FLAGS_FINAL; in stm32_hash_xmit_dma()
539 hdev->flags |= HASH_FLAGS_DMA_ACTIVE; in stm32_hash_xmit_dma()
543 if (hdev->pdata->has_mdmat) { in stm32_hash_xmit_dma()
558 return -ENOMEM; in stm32_hash_xmit_dma()
560 dma_async_issue_pending(hdev->dma_lch); in stm32_hash_xmit_dma()
562 if (!wait_for_completion_timeout(&hdev->dma_completion, in stm32_hash_xmit_dma()
564 err = -ETIMEDOUT; in stm32_hash_xmit_dma()
566 if (dma_async_is_tx_complete(hdev->dma_lch, cookie, in stm32_hash_xmit_dma()
568 err = -ETIMEDOUT; in stm32_hash_xmit_dma()
571 dev_err(hdev->dev, "DMA Error %i\n", err); in stm32_hash_xmit_dma()
572 dmaengine_terminate_all(hdev->dma_lch); in stm32_hash_xmit_dma()
576 return -EINPROGRESS; in stm32_hash_xmit_dma()
583 complete(&hdev->dma_completion); in stm32_hash_dma_callback()
588 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_hmac_dma_send()
589 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); in stm32_hash_hmac_dma_send()
593 if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) { in stm32_hash_hmac_dma_send()
596 return -ETIMEDOUT; in stm32_hash_hmac_dma_send()
598 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY)) in stm32_hash_hmac_dma_send()
599 sg_init_one(&rctx->sg_key, ctx->key, in stm32_hash_hmac_dma_send()
600 ALIGN(ctx->keylen, sizeof(u32))); in stm32_hash_hmac_dma_send()
602 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1, in stm32_hash_hmac_dma_send()
604 if (rctx->dma_ct == 0) { in stm32_hash_hmac_dma_send()
605 dev_err(hdev->dev, "dma_map_sg error\n"); in stm32_hash_hmac_dma_send()
606 return -ENOMEM; in stm32_hash_hmac_dma_send()
609 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0); in stm32_hash_hmac_dma_send()
611 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); in stm32_hash_hmac_dma_send()
626 dma_conf.dst_addr = hdev->phys_base + HASH_DIN; in stm32_hash_dma_init()
632 chan = dma_request_chan(hdev->dev, "in"); in stm32_hash_dma_init()
636 hdev->dma_lch = chan; in stm32_hash_dma_init()
638 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); in stm32_hash_dma_init()
640 dma_release_channel(hdev->dma_lch); in stm32_hash_dma_init()
641 hdev->dma_lch = NULL; in stm32_hash_dma_init()
642 dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); in stm32_hash_dma_init()
646 init_completion(&hdev->dma_completion); in stm32_hash_dma_init()
653 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_dma_send()
654 u32 *buffer = (void *)rctx->state.buffer; in stm32_hash_dma_send()
660 rctx->sg = hdev->req->src; in stm32_hash_dma_send()
661 rctx->total = hdev->req->nbytes; in stm32_hash_dma_send()
663 rctx->nents = sg_nents(rctx->sg); in stm32_hash_dma_send()
664 if (rctx->nents < 0) in stm32_hash_dma_send()
665 return -EINVAL; in stm32_hash_dma_send()
669 if (hdev->flags & HASH_FLAGS_HMAC) { in stm32_hash_dma_send()
671 if (err != -EINPROGRESS) in stm32_hash_dma_send()
675 for_each_sg(rctx->sg, tsg, rctx->nents, i) { in stm32_hash_dma_send()
677 len = sg->length; in stm32_hash_dma_send()
679 if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) { in stm32_hash_dma_send()
680 sg->length = rctx->total - bufcnt; in stm32_hash_dma_send()
682 if (hdev->dma_mode == 1) { in stm32_hash_dma_send()
683 len = (ALIGN(sg->length, 16) - 16); in stm32_hash_dma_send()
686 rctx->sg, rctx->nents, in stm32_hash_dma_send()
687 rctx->state.buffer, sg->length - len, in stm32_hash_dma_send()
688 rctx->total - sg->length + len); in stm32_hash_dma_send()
690 sg->length = len; in stm32_hash_dma_send()
692 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { in stm32_hash_dma_send()
693 len = sg->length; in stm32_hash_dma_send()
694 sg->length = ALIGN(sg->length, in stm32_hash_dma_send()
700 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, in stm32_hash_dma_send()
702 if (rctx->dma_ct == 0) { in stm32_hash_dma_send()
703 dev_err(hdev->dev, "dma_map_sg error\n"); in stm32_hash_dma_send()
704 return -ENOMEM; in stm32_hash_dma_send()
710 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); in stm32_hash_dma_send()
712 if (err == -ENOMEM) in stm32_hash_dma_send()
718 if (hdev->dma_mode == 1) { in stm32_hash_dma_send()
720 return -ETIMEDOUT; in stm32_hash_dma_send()
728 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); in stm32_hash_dma_send()
729 writesl(hdev->io_base + HASH_DIN, buffer, in stm32_hash_dma_send()
736 err = -EINPROGRESS; in stm32_hash_dma_send()
739 if (hdev->flags & HASH_FLAGS_HMAC) { in stm32_hash_dma_send()
741 return -ETIMEDOUT; in stm32_hash_dma_send()
753 if (!ctx->hdev) { in stm32_hash_find_dev()
758 ctx->hdev = hdev; in stm32_hash_find_dev()
760 hdev = ctx->hdev; in stm32_hash_find_dev()
776 if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen) in stm32_hash_dma_aligned_data()
779 if (sg_nents(req->src) > 1) { in stm32_hash_dma_aligned_data()
780 if (hdev->dma_mode == 1) in stm32_hash_dma_aligned_data()
782 for_each_sg(req->src, sg, sg_nents(req->src), i) { in stm32_hash_dma_aligned_data()
783 if ((!IS_ALIGNED(sg->length, sizeof(u32))) && in stm32_hash_dma_aligned_data()
789 if (req->src->offset % 4) in stm32_hash_dma_aligned_data()
801 struct stm32_hash_state *state = &rctx->state; in stm32_hash_init()
802 bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE; in stm32_hash_init()
804 rctx->hdev = hdev; in stm32_hash_init()
806 state->flags = HASH_FLAGS_CPU; in stm32_hash_init()
809 state->flags |= HASH_FLAGS_SHA3_MODE; in stm32_hash_init()
811 rctx->digcnt = crypto_ahash_digestsize(tfm); in stm32_hash_init()
812 switch (rctx->digcnt) { in stm32_hash_init()
814 state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
817 if (hdev->pdata->ux500) in stm32_hash_init()
818 state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
820 state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
824 state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
826 state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
830 state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
832 if (hdev->pdata->ux500) in stm32_hash_init()
833 state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
835 state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
840 state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
842 state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
846 state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
848 state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT; in stm32_hash_init()
851 return -EINVAL; in stm32_hash_init()
854 rctx->state.bufcnt = 0; in stm32_hash_init()
855 rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32); in stm32_hash_init()
856 if (rctx->state.blocklen > HASH_BUFLEN) { in stm32_hash_init()
857 dev_err(hdev->dev, "Error, block too large"); in stm32_hash_init()
858 return -EINVAL; in stm32_hash_init()
860 rctx->total = 0; in stm32_hash_init()
861 rctx->offset = 0; in stm32_hash_init()
862 rctx->data_type = HASH_DATA_8_BITS; in stm32_hash_init()
864 if (ctx->flags & HASH_FLAGS_HMAC) in stm32_hash_init()
865 state->flags |= HASH_FLAGS_HMAC; in stm32_hash_init()
867 dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags); in stm32_hash_init()
874 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); in stm32_hash_update_req()
875 struct stm32_hash_state *state = &rctx->state; in stm32_hash_update_req()
877 if (!(state->flags & HASH_FLAGS_CPU)) in stm32_hash_update_req()
885 struct ahash_request *req = hdev->req; in stm32_hash_final_req()
887 struct stm32_hash_state *state = &rctx->state; in stm32_hash_final_req()
888 int buflen = state->bufcnt; in stm32_hash_final_req()
890 if (state->flags & HASH_FLAGS_FINUP) in stm32_hash_final_req()
893 state->bufcnt = 0; in stm32_hash_final_req()
895 return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1); in stm32_hash_final_req()
903 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_emptymsg_fallback()
906 dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n", in stm32_hash_emptymsg_fallback()
907 ctx->keylen); in stm32_hash_emptymsg_fallback()
909 if (!ctx->xtfm) { in stm32_hash_emptymsg_fallback()
910 dev_err(hdev->dev, "no fallback engine\n"); in stm32_hash_emptymsg_fallback()
914 if (ctx->keylen) { in stm32_hash_emptymsg_fallback()
915 ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen); in stm32_hash_emptymsg_fallback()
917 dev_err(hdev->dev, "failed to set key ret=%d\n", ret); in stm32_hash_emptymsg_fallback()
922 ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest); in stm32_hash_emptymsg_fallback()
924 dev_err(hdev->dev, "shash digest error\n"); in stm32_hash_emptymsg_fallback()
931 struct stm32_hash_state *state = &rctx->state; in stm32_hash_copy_hash()
932 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_copy_hash()
933 __be32 *hash = (void *)rctx->digest; in stm32_hash_copy_hash() local
936 if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY)) in stm32_hash_copy_hash()
942 if (hdev->pdata->ux500) in stm32_hash_copy_hash()
943 hash[i] = cpu_to_be32(stm32_hash_read(hdev, in stm32_hash_copy_hash()
946 hash[i] = cpu_to_be32(stm32_hash_read(hdev, in stm32_hash_copy_hash()
956 reg = stm32_hash_read(rctx->hdev, HASH_SR); in stm32_hash_finish()
958 stm32_hash_write(rctx->hdev, HASH_SR, reg); in stm32_hash_finish()
960 if (!req->result) in stm32_hash_finish()
961 return -EINVAL; in stm32_hash_finish()
963 memcpy(req->result, rctx->digest, rctx->digcnt); in stm32_hash_finish()
971 struct stm32_hash_dev *hdev = rctx->hdev; in stm32_hash_finish_req()
973 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { in stm32_hash_finish_req()
978 pm_runtime_mark_last_busy(hdev->dev); in stm32_hash_finish_req()
979 pm_runtime_put_autosuspend(hdev->dev); in stm32_hash_finish_req()
981 crypto_finalize_hash_request(hdev->engine, req, err); in stm32_hash_finish_req()
987 return crypto_transfer_hash_request_to_engine(hdev->engine, req); in stm32_hash_handle_queue()
997 struct stm32_hash_state *state = &rctx->state; in stm32_hash_one_request()
1002 return -ENODEV; in stm32_hash_one_request()
1004 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", in stm32_hash_one_request()
1005 rctx->op, req->nbytes); in stm32_hash_one_request()
1007 pm_runtime_get_sync(hdev->dev); in stm32_hash_one_request()
1009 hdev->req = req; in stm32_hash_one_request()
1010 hdev->flags = 0; in stm32_hash_one_request()
1013 if (state->flags & HASH_FLAGS_INIT) { in stm32_hash_one_request()
1014 u32 *preg = rctx->state.hw_context; in stm32_hash_one_request()
1018 if (!hdev->pdata->ux500) in stm32_hash_one_request()
1028 hdev->flags |= HASH_FLAGS_INIT; in stm32_hash_one_request()
1030 if (state->flags & HASH_FLAGS_HMAC) in stm32_hash_one_request()
1031 hdev->flags |= HASH_FLAGS_HMAC | in stm32_hash_one_request()
1035 if (rctx->op == HASH_OP_UPDATE) in stm32_hash_one_request()
1037 else if (rctx->op == HASH_OP_FINAL) in stm32_hash_one_request()
1041 if (err == -EINPROGRESS && hdev->polled) { in stm32_hash_one_request()
1043 err = -ETIMEDOUT; in stm32_hash_one_request()
1045 hdev->flags |= HASH_FLAGS_OUTPUT_READY; in stm32_hash_one_request()
1050 if (err != -EINPROGRESS) in stm32_hash_one_request()
1060 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in stm32_hash_enqueue()
1061 struct stm32_hash_dev *hdev = ctx->hdev; in stm32_hash_enqueue()
1063 rctx->op = op; in stm32_hash_enqueue()
1071 struct stm32_hash_state *state = &rctx->state; in stm32_hash_update()
1073 if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU)) in stm32_hash_update()
1076 rctx->total = req->nbytes; in stm32_hash_update()
1077 rctx->sg = req->src; in stm32_hash_update()
1078 rctx->offset = 0; in stm32_hash_update()
1080 if ((state->bufcnt + rctx->total < state->blocklen)) { in stm32_hash_update()
1091 struct stm32_hash_state *state = &rctx->state; in stm32_hash_final()
1093 state->flags |= HASH_FLAGS_FINAL; in stm32_hash_final()
1103 struct stm32_hash_state *state = &rctx->state; in stm32_hash_finup()
1105 if (!req->nbytes) in stm32_hash_finup()
1108 state->flags |= HASH_FLAGS_FINUP; in stm32_hash_finup()
1109 rctx->total = req->nbytes; in stm32_hash_finup()
1110 rctx->sg = req->src; in stm32_hash_finup()
1111 rctx->offset = 0; in stm32_hash_finup()
1113 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) in stm32_hash_finup()
1114 state->flags &= ~HASH_FLAGS_CPU; in stm32_hash_finup()
1129 memcpy(out, &rctx->state, sizeof(rctx->state)); in stm32_hash_export()
1139 memcpy(&rctx->state, in, sizeof(rctx->state)); in stm32_hash_import()
1150 memcpy(ctx->key, key, keylen); in stm32_hash_setkey()
1151 ctx->keylen = keylen; in stm32_hash_setkey()
1153 return -ENOMEM; in stm32_hash_setkey()
1167 if (!hdev->pdata->ux500) in stm32_hash_init_fallback()
1172 dev_err(hdev->dev, "failed to allocate %s fallback\n", in stm32_hash_init_fallback()
1176 dev_info(hdev->dev, "allocated %s fallback\n", name); in stm32_hash_init_fallback()
1177 ctx->xtfm = xtfm; in stm32_hash_init_fallback()
1189 ctx->keylen = 0; in stm32_hash_cra_init_algs()
1192 ctx->flags |= algs_flags; in stm32_hash_cra_init_algs()
1223 if (ctx->xtfm) in stm32_hash_cra_exit()
1224 crypto_free_shash(ctx->xtfm); in stm32_hash_cra_exit()
1231 if (HASH_FLAGS_CPU & hdev->flags) { in stm32_hash_irq_thread()
1232 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { in stm32_hash_irq_thread()
1233 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; in stm32_hash_irq_thread()
1236 } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { in stm32_hash_irq_thread()
1237 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; in stm32_hash_irq_thread()
1245 stm32_hash_finish_req(hdev->req, 0); in stm32_hash_irq_thread()
1257 hdev->flags |= HASH_FLAGS_OUTPUT_READY; in stm32_hash_irq_handler()
1280 .cra_driver_name = "stm32-md5",
1309 .cra_driver_name = "stm32-hmac-md5",
1340 .cra_driver_name = "stm32-sha1",
1369 .cra_driver_name = "stm32-hmac-sha1",
1400 .cra_driver_name = "stm32-sha224",
1429 .cra_driver_name = "stm32-hmac-sha224",
1460 .cra_driver_name = "stm32-sha256",
1489 .cra_driver_name = "stm32-hmac-sha256",
1520 .cra_driver_name = "stm32-sha384",
1549 .cra_driver_name = "stm32-hmac-sha384",
1577 .cra_driver_name = "stm32-sha512",
1606 .cra_driver_name = "stm32-hmac-sha512",
1636 .cra_name = "sha3-224",
1637 .cra_driver_name = "stm32-sha3-224",
1665 .cra_name = "hmac(sha3-224)",
1666 .cra_driver_name = "stm32-hmac-sha3-224",
1693 .cra_name = "sha3-256",
1694 .cra_driver_name = "stm32-sha3-256",
1722 .cra_name = "hmac(sha3-256)",
1723 .cra_driver_name = "stm32-hmac-sha3-256",
1750 .cra_name = "sha3-384",
1751 .cra_driver_name = "stm32-sha3-384",
1779 .cra_name = "hmac(sha3-384)",
1780 .cra_driver_name = "stm32-hmac-sha3-384",
1807 .cra_name = "sha3-512",
1808 .cra_driver_name = "stm32-sha3-512",
1836 .cra_name = "hmac(sha3-512)",
1837 .cra_driver_name = "stm32-hmac-sha3-512",
1859 for (i = 0; i < hdev->pdata->algs_info_size; i++) { in stm32_hash_register_algs()
1860 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { in stm32_hash_register_algs()
1862 &hdev->pdata->algs_info[i].algs_list[j]); in stm32_hash_register_algs()
1870 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); in stm32_hash_register_algs()
1871 for (; i--; ) { in stm32_hash_register_algs()
1872 for (; j--;) in stm32_hash_register_algs()
1874 &hdev->pdata->algs_info[i].algs_list[j]); in stm32_hash_register_algs()
1884 for (i = 0; i < hdev->pdata->algs_info_size; i++) { in stm32_hash_unregister_algs()
1885 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) in stm32_hash_unregister_algs()
1887 &hdev->pdata->algs_info[i].algs_list[j]); in stm32_hash_unregister_algs()
1990 { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
1991 { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
1992 { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
1993 { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2002 hdev->pdata = of_device_get_match_data(dev); in stm32_hash_get_of_match()
2003 if (!hdev->pdata) { in stm32_hash_get_of_match()
2005 return -EINVAL; in stm32_hash_get_of_match()
2014 struct device *dev = &pdev->dev; in stm32_hash_probe()
2020 return -ENOMEM; in stm32_hash_probe()
2022 hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in stm32_hash_probe()
2023 if (IS_ERR(hdev->io_base)) in stm32_hash_probe()
2024 return PTR_ERR(hdev->io_base); in stm32_hash_probe()
2026 hdev->phys_base = res->start; in stm32_hash_probe()
2033 if (irq < 0 && irq != -ENXIO) in stm32_hash_probe()
2048 hdev->polled = true; in stm32_hash_probe()
2051 hdev->clk = devm_clk_get(&pdev->dev, NULL); in stm32_hash_probe()
2052 if (IS_ERR(hdev->clk)) in stm32_hash_probe()
2053 return dev_err_probe(dev, PTR_ERR(hdev->clk), in stm32_hash_probe()
2054 "failed to get clock for hash\n"); in stm32_hash_probe()
2056 ret = clk_prepare_enable(hdev->clk); in stm32_hash_probe()
2058 dev_err(dev, "failed to enable hash clock (%d)\n", ret); in stm32_hash_probe()
2069 hdev->rst = devm_reset_control_get(&pdev->dev, NULL); in stm32_hash_probe()
2070 if (IS_ERR(hdev->rst)) { in stm32_hash_probe()
2071 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) { in stm32_hash_probe()
2072 ret = -EPROBE_DEFER; in stm32_hash_probe()
2076 reset_control_assert(hdev->rst); in stm32_hash_probe()
2078 reset_control_deassert(hdev->rst); in stm32_hash_probe()
2081 hdev->dev = dev; in stm32_hash_probe()
2089 case -ENOENT: in stm32_hash_probe()
2090 case -ENODEV: in stm32_hash_probe()
2099 list_add_tail(&hdev->list, &stm32_hash.dev_list); in stm32_hash_probe()
2103 hdev->engine = crypto_engine_alloc_init(dev, 1); in stm32_hash_probe()
2104 if (!hdev->engine) { in stm32_hash_probe()
2105 ret = -ENOMEM; in stm32_hash_probe()
2109 ret = crypto_engine_start(hdev->engine); in stm32_hash_probe()
2113 if (hdev->pdata->ux500) in stm32_hash_probe()
2115 hdev->dma_mode = 0; in stm32_hash_probe()
2117 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK; in stm32_hash_probe()
2124 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n", in stm32_hash_probe()
2125 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode); in stm32_hash_probe()
2133 crypto_engine_exit(hdev->engine); in stm32_hash_probe()
2136 list_del(&hdev->list); in stm32_hash_probe()
2139 if (hdev->dma_lch) in stm32_hash_probe()
2140 dma_release_channel(hdev->dma_lch); in stm32_hash_probe()
2145 clk_disable_unprepare(hdev->clk); in stm32_hash_probe()
2155 ret = pm_runtime_get_sync(hdev->dev); in stm32_hash_remove()
2159 crypto_engine_exit(hdev->engine); in stm32_hash_remove()
2162 list_del(&hdev->list); in stm32_hash_remove()
2165 if (hdev->dma_lch) in stm32_hash_remove()
2166 dma_release_channel(hdev->dma_lch); in stm32_hash_remove()
2168 pm_runtime_disable(hdev->dev); in stm32_hash_remove()
2169 pm_runtime_put_noidle(hdev->dev); in stm32_hash_remove()
2172 clk_disable_unprepare(hdev->clk); in stm32_hash_remove()
2180 clk_disable_unprepare(hdev->clk); in stm32_hash_runtime_suspend()
2190 ret = clk_prepare_enable(hdev->clk); in stm32_hash_runtime_resume()
2192 dev_err(hdev->dev, "Failed to prepare_enable clock\n"); in stm32_hash_runtime_resume()
2211 .name = "stm32-hash",