Lines Matching defs:creq
30 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
31 unsigned int len = req->nbytes + creq->cache_ptr;
33 if (!creq->last_req)
38 iter->src.op_offset = creq->cache_ptr;
96 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
98 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
103 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
105 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
106 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
107 mv_cesa_dma_cleanup(&creq->base);
112 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113 struct mv_cesa_engine *engine = creq->base.engine;
115 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
123 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
125 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
129 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
133 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
139 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
145 padlen = mv_cesa_ahash_pad_len(creq);
148 if (creq->algo_le) {
149 __le64 bits = cpu_to_le64(creq->len << 3);
153 __be64 bits = cpu_to_be64(creq->len << 3);
163 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
164 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
165 struct mv_cesa_engine *engine = creq->base.engine;
173 mv_cesa_adjust_op(engine, &creq->op_tmpl);
175 memcpy(engine->sram_pool, &creq->op_tmpl,
176 sizeof(creq->op_tmpl));
178 memcpy_toio(engine->sram, &creq->op_tmpl,
179 sizeof(creq->op_tmpl));
184 writel_relaxed(creq->state[i],
188 if (creq->cache_ptr) {
191 creq->cache, creq->cache_ptr);
194 creq->cache, creq->cache_ptr);
197 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
200 if (!creq->last_req) {
205 if (len - creq->cache_ptr)
207 engine, req->src, creq->src_nents,
208 CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
209 len - creq->cache_ptr, sreq->offset);
211 op = &creq->op_tmpl;
215 if (creq->last_req && sreq->offset == req->nbytes &&
216 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
226 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
227 mv_cesa_set_mac_op_total_len(op, creq->len);
229 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
235 memcpy(creq->cache,
240 memcpy_fromio(creq->cache,
246 i = mv_cesa_ahash_pad_req(creq, creq->cache);
251 creq->cache, i);
255 creq->cache, i);
278 creq->cache_ptr = new_cache_ptr;
289 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
292 if (sreq->offset < (req->nbytes - creq->cache_ptr))
300 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
301 struct mv_cesa_req *basereq = &creq->base;
308 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
309 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
316 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
317 struct mv_cesa_req *base = &creq->base;
325 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
326 writel_relaxed(creq->state[i], engine->regs +
336 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
338 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
347 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
349 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
350 return mv_cesa_dma_process(&creq->base, status);
358 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
359 struct mv_cesa_engine *engine = creq->base.engine;
365 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
366 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
374 data = creq->base.chain.last->op->ctx.hash.hash;
378 creq->state[i] = readl_relaxed(engine->regs +
380 if (creq->last_req) {
385 if (creq->algo_le) {
389 result[i] = cpu_to_le32(creq->state[i]);
394 result[i] = cpu_to_be32(creq->state[i]);
404 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
406 creq->base.engine = engine;
408 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
417 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
419 if (creq->last_req)
424 if (creq->cache_ptr)
425 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
426 creq->cache,
427 creq->cache_ptr,
428 ahashreq->nbytes - creq->cache_ptr);
441 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
443 memset(creq, 0, sizeof(*creq));
451 creq->op_tmpl = *tmpl;
452 creq->len = 0;
453 creq->algo_le = algo_le;
469 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
472 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
473 !creq->last_req) {
479 sg_pcopy_to_buffer(req->src, creq->src_nents,
480 creq->cache + creq->cache_ptr,
483 creq->cache_ptr += req->nbytes;
519 struct mv_cesa_ahash_req *creq,
522 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
525 if (!creq->cache_ptr)
532 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
537 creq->cache_ptr,
545 struct mv_cesa_ahash_req *creq,
548 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
557 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
558 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
563 mv_cesa_set_mac_op_total_len(op, creq->len);
587 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
600 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
621 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
627 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
630 struct mv_cesa_req *basereq = &creq->base;
641 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
644 if (creq->src_nents) {
645 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
660 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
683 &creq->op_tmpl,
700 if (creq->last_req)
701 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
704 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
727 if (!creq->last_req)
728 creq->cache_ptr = req->nbytes + creq->cache_ptr -
731 creq->cache_ptr = 0;
751 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
761 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
763 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
764 if (creq->src_nents < 0) {
766 return creq->src_nents;
782 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
797 ret = mv_cesa_queue_req(&req->base, &creq->base);
807 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
809 creq->len += req->nbytes;
816 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
817 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
819 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
820 creq->last_req = true;
828 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
829 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
831 creq->len += req->nbytes;
832 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
833 creq->last_req = true;
842 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
848 *len = creq->len;
849 memcpy(hash, creq->state, digsize);
851 memcpy(cache, creq->cache, creq->cache_ptr);
860 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
872 mv_cesa_update_op_cfg(&creq->op_tmpl,
876 creq->len = len;
877 memcpy(creq->state, hash, digsize);
878 creq->cache_ptr = 0;
884 memcpy(creq->cache, cache, cache_ptr);
885 creq->cache_ptr = cache_ptr;
892 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
899 creq->state[0] = MD5_H0;
900 creq->state[1] = MD5_H1;
901 creq->state[2] = MD5_H2;
902 creq->state[3] = MD5_H3;
962 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
969 creq->state[0] = SHA1_H0;
970 creq->state[1] = SHA1_H1;
971 creq->state[2] = SHA1_H2;
972 creq->state[3] = SHA1_H3;
973 creq->state[4] = SHA1_H4;
1033 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1040 creq->state[0] = SHA256_H0;
1041 creq->state[1] = SHA256_H1;
1042 creq->state[2] = SHA256_H2;
1043 creq->state[3] = SHA256_H3;
1044 creq->state[4] = SHA256_H4;
1045 creq->state[5] = SHA256_H5;
1046 creq->state[6] = SHA256_H6;
1047 creq->state[7] = SHA256_H7;