Lines Matching full:cryp
105 struct mtk_cryp *cryp; member
124 static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
127 static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset) in mtk_sha_read() argument
129 return readl_relaxed(cryp->base + offset); in mtk_sha_read()
132 static inline void mtk_sha_write(struct mtk_cryp *cryp, in mtk_sha_write() argument
135 writel_relaxed(value, cryp->base + offset); in mtk_sha_write()
155 struct mtk_cryp *cryp = NULL; in mtk_sha_find_dev() local
159 if (!tctx->cryp) { in mtk_sha_find_dev()
161 cryp = tmp; in mtk_sha_find_dev()
164 tctx->cryp = cryp; in mtk_sha_find_dev()
166 cryp = tctx->cryp; in mtk_sha_find_dev()
173 tctx->id = cryp->rec; in mtk_sha_find_dev()
174 cryp->rec = !cryp->rec; in mtk_sha_find_dev()
178 return cryp; in mtk_sha_find_dev()
320 static int mtk_sha_info_update(struct mtk_cryp *cryp, in mtk_sha_info_update() argument
338 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), in mtk_sha_info_update()
340 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { in mtk_sha_info_update()
341 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); in mtk_sha_info_update()
421 static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, in mtk_sha_xmit() argument
426 struct mtk_ring *ring = cryp->ring[sha->id]; in mtk_sha_xmit()
430 err = mtk_sha_info_update(cryp, sha, len1, len2); in mtk_sha_xmit()
462 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); in mtk_sha_xmit()
463 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); in mtk_sha_xmit()
468 static int mtk_sha_dma_map(struct mtk_cryp *cryp, in mtk_sha_dma_map() argument
473 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, in mtk_sha_dma_map()
475 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { in mtk_sha_dma_map()
476 dev_err(cryp->dev, "dma map error\n"); in mtk_sha_dma_map()
482 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); in mtk_sha_dma_map()
485 static int mtk_sha_update_slow(struct mtk_cryp *cryp, in mtk_sha_update_slow() argument
496 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); in mtk_sha_update_slow()
507 return mtk_sha_dma_map(cryp, sha, ctx, count); in mtk_sha_update_slow()
512 static int mtk_sha_update_start(struct mtk_cryp *cryp, in mtk_sha_update_start() argument
523 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
528 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
532 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
562 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, in mtk_sha_update_start()
564 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { in mtk_sha_update_start()
565 dev_err(cryp->dev, "dma map bytes error\n"); in mtk_sha_update_start()
575 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, in mtk_sha_update_start()
580 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in mtk_sha_update_start()
581 dev_err(cryp->dev, "dma_map_sg error\n"); in mtk_sha_update_start()
586 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), in mtk_sha_update_start()
591 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in mtk_sha_update_start()
592 dev_err(cryp->dev, "dma_map_sg error\n"); in mtk_sha_update_start()
598 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), in mtk_sha_update_start()
602 static int mtk_sha_final_req(struct mtk_cryp *cryp, in mtk_sha_final_req() argument
614 return mtk_sha_dma_map(cryp, sha, ctx, count); in mtk_sha_final_req()
635 static void mtk_sha_finish_req(struct mtk_cryp *cryp, in mtk_sha_finish_req() argument
650 static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, in mtk_sha_handle_queue() argument
653 struct mtk_sha_rec *sha = cryp->sha[id]; in mtk_sha_handle_queue()
688 err = mtk_sha_update_start(cryp, sha); in mtk_sha_handle_queue()
691 err = mtk_sha_final_req(cryp, sha); in mtk_sha_handle_queue()
693 err = mtk_sha_final_req(cryp, sha); in mtk_sha_handle_queue()
698 mtk_sha_finish_req(cryp, sha, err); in mtk_sha_handle_queue()
710 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); in mtk_sha_enqueue()
713 static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) in mtk_sha_unmap() argument
717 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), in mtk_sha_unmap()
721 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); in mtk_sha_unmap()
728 dma_unmap_single(cryp->dev, ctx->dma_addr, in mtk_sha_unmap()
732 dma_unmap_single(cryp->dev, ctx->dma_addr, in mtk_sha_unmap()
736 static void mtk_sha_complete(struct mtk_cryp *cryp, in mtk_sha_complete() argument
741 err = mtk_sha_update_start(cryp, sha); in mtk_sha_complete()
743 mtk_sha_finish_req(cryp, sha, err); in mtk_sha_complete()
849 struct mtk_cryp *cryp = NULL; in mtk_sha_cra_init_alg() local
851 cryp = mtk_sha_find_dev(tctx); in mtk_sha_cra_init_alg()
852 if (!cryp) in mtk_sha_cra_init_alg()
1170 mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); in mtk_sha_queue_task()
1176 struct mtk_cryp *cryp = sha->cryp; in mtk_sha_done_task() local
1178 mtk_sha_unmap(cryp, sha); in mtk_sha_done_task()
1179 mtk_sha_complete(cryp, sha); in mtk_sha_done_task()
1185 struct mtk_cryp *cryp = sha->cryp; in mtk_sha_irq() local
1186 u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); in mtk_sha_irq()
1188 mtk_sha_write(cryp, RDR_STAT(sha->id), val); in mtk_sha_irq()
1191 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); in mtk_sha_irq()
1192 mtk_sha_write(cryp, RDR_THRESH(sha->id), in mtk_sha_irq()
1197 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); in mtk_sha_irq()
1206 static int mtk_sha_record_init(struct mtk_cryp *cryp) in mtk_sha_record_init() argument
1208 struct mtk_sha_rec **sha = cryp->sha; in mtk_sha_record_init()
1216 sha[i]->cryp = cryp; in mtk_sha_record_init()
1231 cryp->rec = 1; in mtk_sha_record_init()
1241 static void mtk_sha_record_free(struct mtk_cryp *cryp) in mtk_sha_record_free() argument
1246 tasklet_kill(&cryp->sha[i]->done_task); in mtk_sha_record_free()
1247 tasklet_kill(&cryp->sha[i]->queue_task); in mtk_sha_record_free()
1249 kfree(cryp->sha[i]); in mtk_sha_record_free()
1293 int mtk_hash_alg_register(struct mtk_cryp *cryp) in mtk_hash_alg_register() argument
1297 INIT_LIST_HEAD(&cryp->sha_list); in mtk_hash_alg_register()
1300 err = mtk_sha_record_init(cryp); in mtk_hash_alg_register()
1304 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, in mtk_hash_alg_register()
1305 0, "mtk-sha", cryp->sha[0]); in mtk_hash_alg_register()
1307 dev_err(cryp->dev, "unable to request sha irq0.\n"); in mtk_hash_alg_register()
1311 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, in mtk_hash_alg_register()
1312 0, "mtk-sha", cryp->sha[1]); in mtk_hash_alg_register()
1314 dev_err(cryp->dev, "unable to request sha irq1.\n"); in mtk_hash_alg_register()
1319 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2); in mtk_hash_alg_register()
1320 mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3); in mtk_hash_alg_register()
1323 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); in mtk_hash_alg_register()
1334 list_del(&cryp->sha_list); in mtk_hash_alg_register()
1337 mtk_sha_record_free(cryp); in mtk_hash_alg_register()
1340 dev_err(cryp->dev, "mtk-sha initialization failed.\n"); in mtk_hash_alg_register()
1344 void mtk_hash_alg_release(struct mtk_cryp *cryp) in mtk_hash_alg_release() argument
1347 list_del(&cryp->sha_list); in mtk_hash_alg_release()
1351 mtk_sha_record_free(cryp); in mtk_hash_alg_release()