1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/hash.h>
8 #include <crypto/md5.h>
9 #include <crypto/sm3.h>
10 #include <crypto/internal/hash.h>
11
12 #include "cc_driver.h"
13 #include "cc_request_mgr.h"
14 #include "cc_buffer_mgr.h"
15 #include "cc_hash.h"
16 #include "cc_sram_mgr.h"
17
18 #define CC_MAX_HASH_SEQ_LEN 12
19 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
20 #define CC_SM3_HASH_LEN_SIZE 8
21
22 struct cc_hash_handle {
23 u32 digest_len_sram_addr; /* const value in SRAM*/
24 u32 larval_digest_sram_addr; /* const value in SRAM */
25 struct list_head hash_list;
26 };
27
28 static const u32 cc_digest_len_init[] = {
29 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
30 static const u32 cc_md5_init[] = {
31 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32 static const u32 cc_sha1_init[] = {
33 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
34 static const u32 cc_sha224_init[] = {
35 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
36 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
37 static const u32 cc_sha256_init[] = {
38 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
39 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
40 static const u32 cc_digest_len_sha512_init[] = {
41 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
42
43 /*
44 * Due to the way the HW works, every double word in the SHA384 and SHA512
45 * larval hashes must be stored in hi/lo order
46 */
47 #define hilo(x) upper_32_bits(x), lower_32_bits(x)
48 static const u32 cc_sha384_init[] = {
49 hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
50 hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
51 static const u32 cc_sha512_init[] = {
52 hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
53 hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
54
55 static const u32 cc_sm3_init[] = {
56 SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
57 SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
58
59 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
60 unsigned int *seq_size);
61
62 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
63 unsigned int *seq_size);
64
65 static const void *cc_larval_digest(struct device *dev, u32 mode);
66
67 struct cc_hash_alg {
68 struct list_head entry;
69 int hash_mode;
70 int hw_mode;
71 int inter_digestsize;
72 struct cc_drvdata *drvdata;
73 struct ahash_alg ahash_alg;
74 };
75
76 struct hash_key_req_ctx {
77 u32 keylen;
78 dma_addr_t key_dma_addr;
79 u8 *key;
80 };
81
82 /* hash per-session context */
83 struct cc_hash_ctx {
84 struct cc_drvdata *drvdata;
85 /* holds the origin digest; the digest after "setkey" if HMAC,*
86 * the initial digest if HASH.
87 */
88 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
89 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
90
91 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
92 dma_addr_t digest_buff_dma_addr;
93 /* use for hmac with key large then mode block size */
94 struct hash_key_req_ctx key_params;
95 int hash_mode;
96 int hw_mode;
97 int inter_digestsize;
98 unsigned int hash_len;
99 struct completion setkey_comp;
100 bool is_hmac;
101 };
102
103 static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
104 unsigned int flow_mode, struct cc_hw_desc desc[],
105 bool is_not_last_data, unsigned int *seq_size);
106
cc_set_endianity(u32 mode,struct cc_hw_desc * desc)107 static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
108 {
109 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
110 mode == DRV_HASH_SHA512) {
111 set_bytes_swap(desc, 1);
112 } else {
113 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
114 }
115 }
116
cc_map_result(struct device * dev,struct ahash_req_ctx * state,unsigned int digestsize)117 static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
118 unsigned int digestsize)
119 {
120 state->digest_result_dma_addr =
121 dma_map_single(dev, state->digest_result_buff,
122 digestsize, DMA_BIDIRECTIONAL);
123 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
124 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
125 digestsize);
126 return -ENOMEM;
127 }
128 dev_dbg(dev, "Mapped digest result buffer %u B at va=%p to dma=%pad\n",
129 digestsize, state->digest_result_buff,
130 &state->digest_result_dma_addr);
131
132 return 0;
133 }
134
cc_init_req(struct device * dev,struct ahash_req_ctx * state,struct cc_hash_ctx * ctx)135 static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
136 struct cc_hash_ctx *ctx)
137 {
138 bool is_hmac = ctx->is_hmac;
139
140 memset(state, 0, sizeof(*state));
141
142 if (is_hmac) {
143 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
144 ctx->hw_mode != DRV_CIPHER_CMAC) {
145 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
146 ctx->inter_digestsize,
147 DMA_BIDIRECTIONAL);
148
149 memcpy(state->digest_buff, ctx->digest_buff,
150 ctx->inter_digestsize);
151 if (ctx->hash_mode == DRV_HASH_SHA512 ||
152 ctx->hash_mode == DRV_HASH_SHA384)
153 memcpy(state->digest_bytes_len,
154 cc_digest_len_sha512_init,
155 ctx->hash_len);
156 else
157 memcpy(state->digest_bytes_len,
158 cc_digest_len_init,
159 ctx->hash_len);
160 }
161
162 if (ctx->hash_mode != DRV_HASH_NULL) {
163 dma_sync_single_for_cpu(dev,
164 ctx->opad_tmp_keys_dma_addr,
165 ctx->inter_digestsize,
166 DMA_BIDIRECTIONAL);
167 memcpy(state->opad_digest_buff,
168 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
169 }
170 } else { /*hash*/
171 /* Copy the initial digests if hash flow. */
172 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
173
174 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
175 }
176 }
177
cc_map_req(struct device * dev,struct ahash_req_ctx * state,struct cc_hash_ctx * ctx)178 static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
179 struct cc_hash_ctx *ctx)
180 {
181 bool is_hmac = ctx->is_hmac;
182
183 state->digest_buff_dma_addr =
184 dma_map_single(dev, state->digest_buff,
185 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
186 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
187 dev_err(dev, "Mapping digest len %d B at va=%p for DMA failed\n",
188 ctx->inter_digestsize, state->digest_buff);
189 return -EINVAL;
190 }
191 dev_dbg(dev, "Mapped digest %d B at va=%p to dma=%pad\n",
192 ctx->inter_digestsize, state->digest_buff,
193 &state->digest_buff_dma_addr);
194
195 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
196 state->digest_bytes_len_dma_addr =
197 dma_map_single(dev, state->digest_bytes_len,
198 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
199 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
200 dev_err(dev, "Mapping digest len %u B at va=%p for DMA failed\n",
201 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
202 goto unmap_digest_buf;
203 }
204 dev_dbg(dev, "Mapped digest len %u B at va=%p to dma=%pad\n",
205 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
206 &state->digest_bytes_len_dma_addr);
207 }
208
209 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
210 state->opad_digest_dma_addr =
211 dma_map_single(dev, state->opad_digest_buff,
212 ctx->inter_digestsize,
213 DMA_BIDIRECTIONAL);
214 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
215 dev_err(dev, "Mapping opad digest %d B at va=%p for DMA failed\n",
216 ctx->inter_digestsize,
217 state->opad_digest_buff);
218 goto unmap_digest_len;
219 }
220 dev_dbg(dev, "Mapped opad digest %d B at va=%p to dma=%pad\n",
221 ctx->inter_digestsize, state->opad_digest_buff,
222 &state->opad_digest_dma_addr);
223 }
224
225 return 0;
226
227 unmap_digest_len:
228 if (state->digest_bytes_len_dma_addr) {
229 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
230 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
231 state->digest_bytes_len_dma_addr = 0;
232 }
233 unmap_digest_buf:
234 if (state->digest_buff_dma_addr) {
235 dma_unmap_single(dev, state->digest_buff_dma_addr,
236 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
237 state->digest_buff_dma_addr = 0;
238 }
239
240 return -EINVAL;
241 }
242
cc_unmap_req(struct device * dev,struct ahash_req_ctx * state,struct cc_hash_ctx * ctx)243 static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
244 struct cc_hash_ctx *ctx)
245 {
246 if (state->digest_buff_dma_addr) {
247 dma_unmap_single(dev, state->digest_buff_dma_addr,
248 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
249 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
250 &state->digest_buff_dma_addr);
251 state->digest_buff_dma_addr = 0;
252 }
253 if (state->digest_bytes_len_dma_addr) {
254 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
255 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
256 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
257 &state->digest_bytes_len_dma_addr);
258 state->digest_bytes_len_dma_addr = 0;
259 }
260 if (state->opad_digest_dma_addr) {
261 dma_unmap_single(dev, state->opad_digest_dma_addr,
262 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
263 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
264 &state->opad_digest_dma_addr);
265 state->opad_digest_dma_addr = 0;
266 }
267 }
268
cc_unmap_result(struct device * dev,struct ahash_req_ctx * state,unsigned int digestsize,u8 * result)269 static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
270 unsigned int digestsize, u8 *result)
271 {
272 if (state->digest_result_dma_addr) {
273 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
274 DMA_BIDIRECTIONAL);
275 dev_dbg(dev, "unmpa digest result buffer va (%p) pa (%pad) len %u\n",
276 state->digest_result_buff,
277 &state->digest_result_dma_addr, digestsize);
278 memcpy(result, state->digest_result_buff, digestsize);
279 }
280 state->digest_result_dma_addr = 0;
281 }
282
cc_update_complete(struct device * dev,void * cc_req,int err)283 static void cc_update_complete(struct device *dev, void *cc_req, int err)
284 {
285 struct ahash_request *req = (struct ahash_request *)cc_req;
286 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
289
290 dev_dbg(dev, "req=%p\n", req);
291
292 if (err != -EINPROGRESS) {
293 /* Not a BACKLOG notification */
294 cc_unmap_hash_request(dev, state, req->src, false);
295 cc_unmap_req(dev, state, ctx);
296 }
297
298 ahash_request_complete(req, err);
299 }
300
cc_digest_complete(struct device * dev,void * cc_req,int err)301 static void cc_digest_complete(struct device *dev, void *cc_req, int err)
302 {
303 struct ahash_request *req = (struct ahash_request *)cc_req;
304 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
305 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
306 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
307 u32 digestsize = crypto_ahash_digestsize(tfm);
308
309 dev_dbg(dev, "req=%p\n", req);
310
311 if (err != -EINPROGRESS) {
312 /* Not a BACKLOG notification */
313 cc_unmap_hash_request(dev, state, req->src, false);
314 cc_unmap_result(dev, state, digestsize, req->result);
315 cc_unmap_req(dev, state, ctx);
316 }
317
318 ahash_request_complete(req, err);
319 }
320
cc_hash_complete(struct device * dev,void * cc_req,int err)321 static void cc_hash_complete(struct device *dev, void *cc_req, int err)
322 {
323 struct ahash_request *req = (struct ahash_request *)cc_req;
324 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
326 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
327 u32 digestsize = crypto_ahash_digestsize(tfm);
328
329 dev_dbg(dev, "req=%p\n", req);
330
331 if (err != -EINPROGRESS) {
332 /* Not a BACKLOG notification */
333 cc_unmap_hash_request(dev, state, req->src, false);
334 cc_unmap_result(dev, state, digestsize, req->result);
335 cc_unmap_req(dev, state, ctx);
336 }
337
338 ahash_request_complete(req, err);
339 }
340
cc_fin_result(struct cc_hw_desc * desc,struct ahash_request * req,int idx)341 static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
342 int idx)
343 {
344 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
347 u32 digestsize = crypto_ahash_digestsize(tfm);
348
349 /* Get final MAC result */
350 hw_desc_init(&desc[idx]);
351 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
352 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
353 NS_BIT, 1);
354 set_queue_last_ind(ctx->drvdata, &desc[idx]);
355 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
356 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
357 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
358 cc_set_endianity(ctx->hash_mode, &desc[idx]);
359 idx++;
360
361 return idx;
362 }
363
cc_fin_hmac(struct cc_hw_desc * desc,struct ahash_request * req,int idx)364 static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
365 int idx)
366 {
367 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
368 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
369 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
370 u32 digestsize = crypto_ahash_digestsize(tfm);
371
372 /* store the hash digest result in the context */
373 hw_desc_init(&desc[idx]);
374 set_cipher_mode(&desc[idx], ctx->hw_mode);
375 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
376 NS_BIT, 0);
377 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
378 cc_set_endianity(ctx->hash_mode, &desc[idx]);
379 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
380 idx++;
381
382 /* Loading hash opad xor key state */
383 hw_desc_init(&desc[idx]);
384 set_cipher_mode(&desc[idx], ctx->hw_mode);
385 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
386 ctx->inter_digestsize, NS_BIT);
387 set_flow_mode(&desc[idx], S_DIN_to_HASH);
388 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
389 idx++;
390
391 /* Load the hash current length */
392 hw_desc_init(&desc[idx]);
393 set_cipher_mode(&desc[idx], ctx->hw_mode);
394 set_din_sram(&desc[idx],
395 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
396 ctx->hash_len);
397 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
398 set_flow_mode(&desc[idx], S_DIN_to_HASH);
399 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
400 idx++;
401
402 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
403 hw_desc_init(&desc[idx]);
404 set_din_no_dma(&desc[idx], 0, 0xfffff0);
405 set_dout_no_dma(&desc[idx], 0, 0, 1);
406 idx++;
407
408 /* Perform HASH update */
409 hw_desc_init(&desc[idx]);
410 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
411 digestsize, NS_BIT);
412 set_flow_mode(&desc[idx], DIN_HASH);
413 idx++;
414
415 return idx;
416 }
417
cc_hash_digest(struct ahash_request * req)418 static int cc_hash_digest(struct ahash_request *req)
419 {
420 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
421 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
422 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
423 u32 digestsize = crypto_ahash_digestsize(tfm);
424 struct scatterlist *src = req->src;
425 unsigned int nbytes = req->nbytes;
426 u8 *result = req->result;
427 struct device *dev = drvdata_to_dev(ctx->drvdata);
428 bool is_hmac = ctx->is_hmac;
429 struct cc_crypto_req cc_req = {};
430 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
431 u32 larval_digest_addr;
432 int idx = 0;
433 int rc = 0;
434 gfp_t flags = cc_gfp_flags(&req->base);
435
436 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
437 nbytes);
438
439 cc_init_req(dev, state, ctx);
440
441 if (cc_map_req(dev, state, ctx)) {
442 dev_err(dev, "map_ahash_source() failed\n");
443 return -ENOMEM;
444 }
445
446 if (cc_map_result(dev, state, digestsize)) {
447 dev_err(dev, "map_ahash_digest() failed\n");
448 cc_unmap_req(dev, state, ctx);
449 return -ENOMEM;
450 }
451
452 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
453 flags)) {
454 dev_err(dev, "map_ahash_request_final() failed\n");
455 cc_unmap_result(dev, state, digestsize, result);
456 cc_unmap_req(dev, state, ctx);
457 return -ENOMEM;
458 }
459
460 /* Setup request structure */
461 cc_req.user_cb = cc_digest_complete;
462 cc_req.user_arg = req;
463
464 /* If HMAC then load hash IPAD xor key, if HASH then load initial
465 * digest
466 */
467 hw_desc_init(&desc[idx]);
468 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
469 if (is_hmac) {
470 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
471 ctx->inter_digestsize, NS_BIT);
472 } else {
473 larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
474 ctx->hash_mode);
475 set_din_sram(&desc[idx], larval_digest_addr,
476 ctx->inter_digestsize);
477 }
478 set_flow_mode(&desc[idx], S_DIN_to_HASH);
479 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
480 idx++;
481
482 /* Load the hash current length */
483 hw_desc_init(&desc[idx]);
484 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
485
486 if (is_hmac) {
487 set_din_type(&desc[idx], DMA_DLLI,
488 state->digest_bytes_len_dma_addr,
489 ctx->hash_len, NS_BIT);
490 } else {
491 set_din_const(&desc[idx], 0, ctx->hash_len);
492 if (nbytes)
493 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
494 else
495 set_cipher_do(&desc[idx], DO_PAD);
496 }
497 set_flow_mode(&desc[idx], S_DIN_to_HASH);
498 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
499 idx++;
500
501 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
502
503 if (is_hmac) {
504 /* HW last hash block padding (aka. "DO_PAD") */
505 hw_desc_init(&desc[idx]);
506 set_cipher_mode(&desc[idx], ctx->hw_mode);
507 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
508 ctx->hash_len, NS_BIT, 0);
509 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
510 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
511 set_cipher_do(&desc[idx], DO_PAD);
512 idx++;
513
514 idx = cc_fin_hmac(desc, req, idx);
515 }
516
517 idx = cc_fin_result(desc, req, idx);
518
519 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
520 if (rc != -EINPROGRESS && rc != -EBUSY) {
521 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
522 cc_unmap_hash_request(dev, state, src, true);
523 cc_unmap_result(dev, state, digestsize, result);
524 cc_unmap_req(dev, state, ctx);
525 }
526 return rc;
527 }
528
cc_restore_hash(struct cc_hw_desc * desc,struct cc_hash_ctx * ctx,struct ahash_req_ctx * state,unsigned int idx)529 static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
530 struct ahash_req_ctx *state, unsigned int idx)
531 {
532 /* Restore hash digest */
533 hw_desc_init(&desc[idx]);
534 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
535 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
536 ctx->inter_digestsize, NS_BIT);
537 set_flow_mode(&desc[idx], S_DIN_to_HASH);
538 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
539 idx++;
540
541 /* Restore hash current length */
542 hw_desc_init(&desc[idx]);
543 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
544 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
545 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
546 ctx->hash_len, NS_BIT);
547 set_flow_mode(&desc[idx], S_DIN_to_HASH);
548 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
549 idx++;
550
551 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
552
553 return idx;
554 }
555
cc_hash_update(struct ahash_request * req)556 static int cc_hash_update(struct ahash_request *req)
557 {
558 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
559 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
560 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
561 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
562 struct scatterlist *src = req->src;
563 unsigned int nbytes = req->nbytes;
564 struct device *dev = drvdata_to_dev(ctx->drvdata);
565 struct cc_crypto_req cc_req = {};
566 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
567 u32 idx = 0;
568 int rc;
569 gfp_t flags = cc_gfp_flags(&req->base);
570
571 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
572 "hmac" : "hash", nbytes);
573
574 if (nbytes == 0) {
575 /* no real updates required */
576 return 0;
577 }
578
579 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
580 block_size, flags);
581 if (rc) {
582 if (rc == 1) {
583 dev_dbg(dev, " data size not require HW update %x\n",
584 nbytes);
585 /* No hardware updates are required */
586 return 0;
587 }
588 dev_err(dev, "map_ahash_request_update() failed\n");
589 return -ENOMEM;
590 }
591
592 if (cc_map_req(dev, state, ctx)) {
593 dev_err(dev, "map_ahash_source() failed\n");
594 cc_unmap_hash_request(dev, state, src, true);
595 return -EINVAL;
596 }
597
598 /* Setup request structure */
599 cc_req.user_cb = cc_update_complete;
600 cc_req.user_arg = req;
601
602 idx = cc_restore_hash(desc, ctx, state, idx);
603
604 /* store the hash digest result in context */
605 hw_desc_init(&desc[idx]);
606 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
607 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
608 ctx->inter_digestsize, NS_BIT, 0);
609 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
610 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
611 idx++;
612
613 /* store current hash length in context */
614 hw_desc_init(&desc[idx]);
615 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
616 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
617 ctx->hash_len, NS_BIT, 1);
618 set_queue_last_ind(ctx->drvdata, &desc[idx]);
619 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
620 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
621 idx++;
622
623 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
624 if (rc != -EINPROGRESS && rc != -EBUSY) {
625 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
626 cc_unmap_hash_request(dev, state, src, true);
627 cc_unmap_req(dev, state, ctx);
628 }
629 return rc;
630 }
631
cc_do_finup(struct ahash_request * req,bool update)632 static int cc_do_finup(struct ahash_request *req, bool update)
633 {
634 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
635 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
636 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
637 u32 digestsize = crypto_ahash_digestsize(tfm);
638 struct scatterlist *src = req->src;
639 unsigned int nbytes = req->nbytes;
640 u8 *result = req->result;
641 struct device *dev = drvdata_to_dev(ctx->drvdata);
642 bool is_hmac = ctx->is_hmac;
643 struct cc_crypto_req cc_req = {};
644 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
645 unsigned int idx = 0;
646 int rc;
647 gfp_t flags = cc_gfp_flags(&req->base);
648
649 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
650 update ? "finup" : "final", nbytes);
651
652 if (cc_map_req(dev, state, ctx)) {
653 dev_err(dev, "map_ahash_source() failed\n");
654 return -EINVAL;
655 }
656
657 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
658 flags)) {
659 dev_err(dev, "map_ahash_request_final() failed\n");
660 cc_unmap_req(dev, state, ctx);
661 return -ENOMEM;
662 }
663 if (cc_map_result(dev, state, digestsize)) {
664 dev_err(dev, "map_ahash_digest() failed\n");
665 cc_unmap_hash_request(dev, state, src, true);
666 cc_unmap_req(dev, state, ctx);
667 return -ENOMEM;
668 }
669
670 /* Setup request structure */
671 cc_req.user_cb = cc_hash_complete;
672 cc_req.user_arg = req;
673
674 idx = cc_restore_hash(desc, ctx, state, idx);
675
676 /* Pad the hash */
677 hw_desc_init(&desc[idx]);
678 set_cipher_do(&desc[idx], DO_PAD);
679 set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
680 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
681 ctx->hash_len, NS_BIT, 0);
682 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
683 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
684 idx++;
685
686 if (is_hmac)
687 idx = cc_fin_hmac(desc, req, idx);
688
689 idx = cc_fin_result(desc, req, idx);
690
691 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
692 if (rc != -EINPROGRESS && rc != -EBUSY) {
693 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
694 cc_unmap_hash_request(dev, state, src, true);
695 cc_unmap_result(dev, state, digestsize, result);
696 cc_unmap_req(dev, state, ctx);
697 }
698 return rc;
699 }
700
cc_hash_finup(struct ahash_request * req)701 static int cc_hash_finup(struct ahash_request *req)
702 {
703 return cc_do_finup(req, true);
704 }
705
706
cc_hash_final(struct ahash_request * req)707 static int cc_hash_final(struct ahash_request *req)
708 {
709 return cc_do_finup(req, false);
710 }
711
cc_hash_init(struct ahash_request * req)712 static int cc_hash_init(struct ahash_request *req)
713 {
714 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
715 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
716 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
717 struct device *dev = drvdata_to_dev(ctx->drvdata);
718
719 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
720
721 cc_init_req(dev, state, ctx);
722
723 return 0;
724 }
725
cc_hash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)726 static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
727 unsigned int keylen)
728 {
729 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
730 struct cc_crypto_req cc_req = {};
731 struct cc_hash_ctx *ctx = NULL;
732 int blocksize = 0;
733 int digestsize = 0;
734 int i, idx = 0, rc = 0;
735 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
736 u32 larval_addr;
737 struct device *dev;
738
739 ctx = crypto_ahash_ctx_dma(ahash);
740 dev = drvdata_to_dev(ctx->drvdata);
741 dev_dbg(dev, "start keylen: %d", keylen);
742
743 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
744 digestsize = crypto_ahash_digestsize(ahash);
745
746 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
747
748 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
749 * any NON-ZERO value utilizes HMAC flow
750 */
751 ctx->key_params.keylen = keylen;
752 ctx->key_params.key_dma_addr = 0;
753 ctx->is_hmac = true;
754 ctx->key_params.key = NULL;
755
756 if (keylen) {
757 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
758 if (!ctx->key_params.key)
759 return -ENOMEM;
760
761 ctx->key_params.key_dma_addr =
762 dma_map_single(dev, ctx->key_params.key, keylen,
763 DMA_TO_DEVICE);
764 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
765 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
766 ctx->key_params.key, keylen);
767 kfree_sensitive(ctx->key_params.key);
768 return -ENOMEM;
769 }
770 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
771 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
772
773 if (keylen > blocksize) {
774 /* Load hash initial state */
775 hw_desc_init(&desc[idx]);
776 set_cipher_mode(&desc[idx], ctx->hw_mode);
777 set_din_sram(&desc[idx], larval_addr,
778 ctx->inter_digestsize);
779 set_flow_mode(&desc[idx], S_DIN_to_HASH);
780 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
781 idx++;
782
783 /* Load the hash current length*/
784 hw_desc_init(&desc[idx]);
785 set_cipher_mode(&desc[idx], ctx->hw_mode);
786 set_din_const(&desc[idx], 0, ctx->hash_len);
787 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
788 set_flow_mode(&desc[idx], S_DIN_to_HASH);
789 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
790 idx++;
791
792 hw_desc_init(&desc[idx]);
793 set_din_type(&desc[idx], DMA_DLLI,
794 ctx->key_params.key_dma_addr, keylen,
795 NS_BIT);
796 set_flow_mode(&desc[idx], DIN_HASH);
797 idx++;
798
799 /* Get hashed key */
800 hw_desc_init(&desc[idx]);
801 set_cipher_mode(&desc[idx], ctx->hw_mode);
802 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
803 digestsize, NS_BIT, 0);
804 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
805 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
806 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
807 cc_set_endianity(ctx->hash_mode, &desc[idx]);
808 idx++;
809
810 hw_desc_init(&desc[idx]);
811 set_din_const(&desc[idx], 0, (blocksize - digestsize));
812 set_flow_mode(&desc[idx], BYPASS);
813 set_dout_dlli(&desc[idx],
814 (ctx->opad_tmp_keys_dma_addr +
815 digestsize),
816 (blocksize - digestsize), NS_BIT, 0);
817 idx++;
818 } else {
819 hw_desc_init(&desc[idx]);
820 set_din_type(&desc[idx], DMA_DLLI,
821 ctx->key_params.key_dma_addr, keylen,
822 NS_BIT);
823 set_flow_mode(&desc[idx], BYPASS);
824 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
825 keylen, NS_BIT, 0);
826 idx++;
827
828 if ((blocksize - keylen)) {
829 hw_desc_init(&desc[idx]);
830 set_din_const(&desc[idx], 0,
831 (blocksize - keylen));
832 set_flow_mode(&desc[idx], BYPASS);
833 set_dout_dlli(&desc[idx],
834 (ctx->opad_tmp_keys_dma_addr +
835 keylen), (blocksize - keylen),
836 NS_BIT, 0);
837 idx++;
838 }
839 }
840 } else {
841 hw_desc_init(&desc[idx]);
842 set_din_const(&desc[idx], 0, blocksize);
843 set_flow_mode(&desc[idx], BYPASS);
844 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
845 blocksize, NS_BIT, 0);
846 idx++;
847 }
848
849 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
850 if (rc) {
851 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
852 goto out;
853 }
854
855 /* calc derived HMAC key */
856 for (idx = 0, i = 0; i < 2; i++) {
857 /* Load hash initial state */
858 hw_desc_init(&desc[idx]);
859 set_cipher_mode(&desc[idx], ctx->hw_mode);
860 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
861 set_flow_mode(&desc[idx], S_DIN_to_HASH);
862 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
863 idx++;
864
865 /* Load the hash current length*/
866 hw_desc_init(&desc[idx]);
867 set_cipher_mode(&desc[idx], ctx->hw_mode);
868 set_din_const(&desc[idx], 0, ctx->hash_len);
869 set_flow_mode(&desc[idx], S_DIN_to_HASH);
870 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
871 idx++;
872
873 /* Prepare ipad key */
874 hw_desc_init(&desc[idx]);
875 set_xor_val(&desc[idx], hmac_pad_const[i]);
876 set_cipher_mode(&desc[idx], ctx->hw_mode);
877 set_flow_mode(&desc[idx], S_DIN_to_HASH);
878 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
879 idx++;
880
881 /* Perform HASH update */
882 hw_desc_init(&desc[idx]);
883 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
884 blocksize, NS_BIT);
885 set_cipher_mode(&desc[idx], ctx->hw_mode);
886 set_xor_active(&desc[idx]);
887 set_flow_mode(&desc[idx], DIN_HASH);
888 idx++;
889
890 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
891 * of the first HASH "update" state)
892 */
893 hw_desc_init(&desc[idx]);
894 set_cipher_mode(&desc[idx], ctx->hw_mode);
895 if (i > 0) /* Not first iteration */
896 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
897 ctx->inter_digestsize, NS_BIT, 0);
898 else /* First iteration */
899 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
900 ctx->inter_digestsize, NS_BIT, 0);
901 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
902 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
903 idx++;
904 }
905
906 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
907
908 out:
909 if (ctx->key_params.key_dma_addr) {
910 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
911 ctx->key_params.keylen, DMA_TO_DEVICE);
912 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
913 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
914 }
915
916 kfree_sensitive(ctx->key_params.key);
917
918 return rc;
919 }
920
cc_xcbc_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)921 static int cc_xcbc_setkey(struct crypto_ahash *ahash,
922 const u8 *key, unsigned int keylen)
923 {
924 struct cc_crypto_req cc_req = {};
925 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
926 struct device *dev = drvdata_to_dev(ctx->drvdata);
927 int rc = 0;
928 unsigned int idx = 0;
929 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
930
931 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
932
933 switch (keylen) {
934 case AES_KEYSIZE_128:
935 case AES_KEYSIZE_192:
936 case AES_KEYSIZE_256:
937 break;
938 default:
939 return -EINVAL;
940 }
941
942 ctx->key_params.keylen = keylen;
943
944 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
945 if (!ctx->key_params.key)
946 return -ENOMEM;
947
948 ctx->key_params.key_dma_addr =
949 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
950 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
951 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
952 key, keylen);
953 kfree_sensitive(ctx->key_params.key);
954 return -ENOMEM;
955 }
956 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
957 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
958
959 ctx->is_hmac = true;
960 /* 1. Load the AES key */
961 hw_desc_init(&desc[idx]);
962 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
963 keylen, NS_BIT);
964 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
965 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
966 set_key_size_aes(&desc[idx], keylen);
967 set_flow_mode(&desc[idx], S_DIN_to_AES);
968 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
969 idx++;
970
971 hw_desc_init(&desc[idx]);
972 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
973 set_flow_mode(&desc[idx], DIN_AES_DOUT);
974 set_dout_dlli(&desc[idx],
975 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
976 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
977 idx++;
978
979 hw_desc_init(&desc[idx]);
980 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
981 set_flow_mode(&desc[idx], DIN_AES_DOUT);
982 set_dout_dlli(&desc[idx],
983 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
984 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
985 idx++;
986
987 hw_desc_init(&desc[idx]);
988 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
989 set_flow_mode(&desc[idx], DIN_AES_DOUT);
990 set_dout_dlli(&desc[idx],
991 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
992 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
993 idx++;
994
995 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
996
997 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
998 ctx->key_params.keylen, DMA_TO_DEVICE);
999 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1000 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1001
1002 kfree_sensitive(ctx->key_params.key);
1003
1004 return rc;
1005 }
1006
cc_cmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)1007 static int cc_cmac_setkey(struct crypto_ahash *ahash,
1008 const u8 *key, unsigned int keylen)
1009 {
1010 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1011 struct device *dev = drvdata_to_dev(ctx->drvdata);
1012
1013 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1014
1015 ctx->is_hmac = true;
1016
1017 switch (keylen) {
1018 case AES_KEYSIZE_128:
1019 case AES_KEYSIZE_192:
1020 case AES_KEYSIZE_256:
1021 break;
1022 default:
1023 return -EINVAL;
1024 }
1025
1026 ctx->key_params.keylen = keylen;
1027
1028 /* STAT_PHASE_1: Copy key to ctx */
1029
1030 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1031 keylen, DMA_TO_DEVICE);
1032
1033 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1034 if (keylen == 24) {
1035 memset(ctx->opad_tmp_keys_buff + 24, 0,
1036 CC_AES_KEY_SIZE_MAX - 24);
1037 }
1038
1039 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1040 keylen, DMA_TO_DEVICE);
1041
1042 ctx->key_params.keylen = keylen;
1043
1044 return 0;
1045 }
1046
cc_free_ctx(struct cc_hash_ctx * ctx)1047 static void cc_free_ctx(struct cc_hash_ctx *ctx)
1048 {
1049 struct device *dev = drvdata_to_dev(ctx->drvdata);
1050
1051 if (ctx->digest_buff_dma_addr) {
1052 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1053 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1054 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1055 &ctx->digest_buff_dma_addr);
1056 ctx->digest_buff_dma_addr = 0;
1057 }
1058 if (ctx->opad_tmp_keys_dma_addr) {
1059 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1060 sizeof(ctx->opad_tmp_keys_buff),
1061 DMA_BIDIRECTIONAL);
1062 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1063 &ctx->opad_tmp_keys_dma_addr);
1064 ctx->opad_tmp_keys_dma_addr = 0;
1065 }
1066
1067 ctx->key_params.keylen = 0;
1068 }
1069
cc_alloc_ctx(struct cc_hash_ctx * ctx)1070 static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1071 {
1072 struct device *dev = drvdata_to_dev(ctx->drvdata);
1073
1074 ctx->key_params.keylen = 0;
1075
1076 ctx->digest_buff_dma_addr =
1077 dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
1078 DMA_BIDIRECTIONAL);
1079 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1080 dev_err(dev, "Mapping digest len %zu B at va=%p for DMA failed\n",
1081 sizeof(ctx->digest_buff), ctx->digest_buff);
1082 goto fail;
1083 }
1084 dev_dbg(dev, "Mapped digest %zu B at va=%p to dma=%pad\n",
1085 sizeof(ctx->digest_buff), ctx->digest_buff,
1086 &ctx->digest_buff_dma_addr);
1087
1088 ctx->opad_tmp_keys_dma_addr =
1089 dma_map_single(dev, ctx->opad_tmp_keys_buff,
1090 sizeof(ctx->opad_tmp_keys_buff),
1091 DMA_BIDIRECTIONAL);
1092 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1093 dev_err(dev, "Mapping opad digest %zu B at va=%p for DMA failed\n",
1094 sizeof(ctx->opad_tmp_keys_buff),
1095 ctx->opad_tmp_keys_buff);
1096 goto fail;
1097 }
1098 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%p to dma=%pad\n",
1099 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1100 &ctx->opad_tmp_keys_dma_addr);
1101
1102 ctx->is_hmac = false;
1103 return 0;
1104
1105 fail:
1106 cc_free_ctx(ctx);
1107 return -ENOMEM;
1108 }
1109
cc_get_hash_len(struct crypto_tfm * tfm)1110 static int cc_get_hash_len(struct crypto_tfm *tfm)
1111 {
1112 struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1113
1114 if (ctx->hash_mode == DRV_HASH_SM3)
1115 return CC_SM3_HASH_LEN_SIZE;
1116 else
1117 return cc_get_default_hash_len(ctx->drvdata);
1118 }
1119
cc_cra_init(struct crypto_tfm * tfm)1120 static int cc_cra_init(struct crypto_tfm *tfm)
1121 {
1122 struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1123 struct hash_alg_common *hash_alg_common =
1124 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1125 struct ahash_alg *ahash_alg =
1126 container_of(hash_alg_common, struct ahash_alg, halg);
1127 struct cc_hash_alg *cc_alg =
1128 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1129
1130 crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
1131 sizeof(struct ahash_req_ctx));
1132
1133 ctx->hash_mode = cc_alg->hash_mode;
1134 ctx->hw_mode = cc_alg->hw_mode;
1135 ctx->inter_digestsize = cc_alg->inter_digestsize;
1136 ctx->drvdata = cc_alg->drvdata;
1137 ctx->hash_len = cc_get_hash_len(tfm);
1138 return cc_alloc_ctx(ctx);
1139 }
1140
cc_cra_exit(struct crypto_tfm * tfm)1141 static void cc_cra_exit(struct crypto_tfm *tfm)
1142 {
1143 struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1144 struct device *dev = drvdata_to_dev(ctx->drvdata);
1145
1146 dev_dbg(dev, "cc_cra_exit");
1147 cc_free_ctx(ctx);
1148 }
1149
cc_mac_update(struct ahash_request * req)1150 static int cc_mac_update(struct ahash_request *req)
1151 {
1152 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1153 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1154 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1155 struct device *dev = drvdata_to_dev(ctx->drvdata);
1156 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1157 struct cc_crypto_req cc_req = {};
1158 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1159 int rc;
1160 u32 idx = 0;
1161 gfp_t flags = cc_gfp_flags(&req->base);
1162
1163 if (req->nbytes == 0) {
1164 /* no real updates required */
1165 return 0;
1166 }
1167
1168 state->xcbc_count++;
1169
1170 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1171 req->nbytes, block_size, flags);
1172 if (rc) {
1173 if (rc == 1) {
1174 dev_dbg(dev, " data size not require HW update %x\n",
1175 req->nbytes);
1176 /* No hardware updates are required */
1177 return 0;
1178 }
1179 dev_err(dev, "map_ahash_request_update() failed\n");
1180 return -ENOMEM;
1181 }
1182
1183 if (cc_map_req(dev, state, ctx)) {
1184 dev_err(dev, "map_ahash_source() failed\n");
1185 return -EINVAL;
1186 }
1187
1188 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1189 cc_setup_xcbc(req, desc, &idx);
1190 else
1191 cc_setup_cmac(req, desc, &idx);
1192
1193 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1194
1195 /* store the hash digest result in context */
1196 hw_desc_init(&desc[idx]);
1197 set_cipher_mode(&desc[idx], ctx->hw_mode);
1198 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1199 ctx->inter_digestsize, NS_BIT, 1);
1200 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1201 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1202 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1203 idx++;
1204
1205 /* Setup request structure */
1206 cc_req.user_cb = cc_update_complete;
1207 cc_req.user_arg = req;
1208
1209 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1210 if (rc != -EINPROGRESS && rc != -EBUSY) {
1211 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1212 cc_unmap_hash_request(dev, state, req->src, true);
1213 cc_unmap_req(dev, state, ctx);
1214 }
1215 return rc;
1216 }
1217
cc_mac_final(struct ahash_request * req)1218 static int cc_mac_final(struct ahash_request *req)
1219 {
1220 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1221 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1222 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1223 struct device *dev = drvdata_to_dev(ctx->drvdata);
1224 struct cc_crypto_req cc_req = {};
1225 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1226 int idx = 0;
1227 int rc = 0;
1228 u32 key_size, key_len;
1229 u32 digestsize = crypto_ahash_digestsize(tfm);
1230 gfp_t flags = cc_gfp_flags(&req->base);
1231 u32 rem_cnt = *cc_hash_buf_cnt(state);
1232
1233 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1234 key_size = CC_AES_128_BIT_KEY_SIZE;
1235 key_len = CC_AES_128_BIT_KEY_SIZE;
1236 } else {
1237 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1238 ctx->key_params.keylen;
1239 key_len = ctx->key_params.keylen;
1240 }
1241
1242 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1243
1244 if (cc_map_req(dev, state, ctx)) {
1245 dev_err(dev, "map_ahash_source() failed\n");
1246 return -EINVAL;
1247 }
1248
1249 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1250 req->nbytes, 0, flags)) {
1251 dev_err(dev, "map_ahash_request_final() failed\n");
1252 cc_unmap_req(dev, state, ctx);
1253 return -ENOMEM;
1254 }
1255
1256 if (cc_map_result(dev, state, digestsize)) {
1257 dev_err(dev, "map_ahash_digest() failed\n");
1258 cc_unmap_hash_request(dev, state, req->src, true);
1259 cc_unmap_req(dev, state, ctx);
1260 return -ENOMEM;
1261 }
1262
1263 /* Setup request structure */
1264 cc_req.user_cb = cc_hash_complete;
1265 cc_req.user_arg = req;
1266
1267 if (state->xcbc_count && rem_cnt == 0) {
1268 /* Load key for ECB decryption */
1269 hw_desc_init(&desc[idx]);
1270 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1271 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1272 set_din_type(&desc[idx], DMA_DLLI,
1273 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1274 key_size, NS_BIT);
1275 set_key_size_aes(&desc[idx], key_len);
1276 set_flow_mode(&desc[idx], S_DIN_to_AES);
1277 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1278 idx++;
1279
1280 /* Initiate decryption of block state to previous
1281 * block_state-XOR-M[n]
1282 */
1283 hw_desc_init(&desc[idx]);
1284 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1285 CC_AES_BLOCK_SIZE, NS_BIT);
1286 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1287 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1288 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1289 idx++;
1290
1291 /* Memory Barrier: wait for axi write to complete */
1292 hw_desc_init(&desc[idx]);
1293 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1294 set_dout_no_dma(&desc[idx], 0, 0, 1);
1295 idx++;
1296 }
1297
1298 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1299 cc_setup_xcbc(req, desc, &idx);
1300 else
1301 cc_setup_cmac(req, desc, &idx);
1302
1303 if (state->xcbc_count == 0) {
1304 hw_desc_init(&desc[idx]);
1305 set_cipher_mode(&desc[idx], ctx->hw_mode);
1306 set_key_size_aes(&desc[idx], key_len);
1307 set_cmac_size0_mode(&desc[idx]);
1308 set_flow_mode(&desc[idx], S_DIN_to_AES);
1309 idx++;
1310 } else if (rem_cnt > 0) {
1311 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1312 } else {
1313 hw_desc_init(&desc[idx]);
1314 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1315 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1316 idx++;
1317 }
1318
1319 /* Get final MAC result */
1320 hw_desc_init(&desc[idx]);
1321 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1322 digestsize, NS_BIT, 1);
1323 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1324 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1325 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1326 set_cipher_mode(&desc[idx], ctx->hw_mode);
1327 idx++;
1328
1329 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1330 if (rc != -EINPROGRESS && rc != -EBUSY) {
1331 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1332 cc_unmap_hash_request(dev, state, req->src, true);
1333 cc_unmap_result(dev, state, digestsize, req->result);
1334 cc_unmap_req(dev, state, ctx);
1335 }
1336 return rc;
1337 }
1338
cc_mac_finup(struct ahash_request * req)1339 static int cc_mac_finup(struct ahash_request *req)
1340 {
1341 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1342 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1343 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1344 struct device *dev = drvdata_to_dev(ctx->drvdata);
1345 struct cc_crypto_req cc_req = {};
1346 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1347 int idx = 0;
1348 int rc = 0;
1349 u32 key_len = 0;
1350 u32 digestsize = crypto_ahash_digestsize(tfm);
1351 gfp_t flags = cc_gfp_flags(&req->base);
1352
1353 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1354 if (state->xcbc_count > 0 && req->nbytes == 0) {
1355 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1356 return cc_mac_final(req);
1357 }
1358
1359 if (cc_map_req(dev, state, ctx)) {
1360 dev_err(dev, "map_ahash_source() failed\n");
1361 return -EINVAL;
1362 }
1363
1364 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1365 req->nbytes, 1, flags)) {
1366 dev_err(dev, "map_ahash_request_final() failed\n");
1367 cc_unmap_req(dev, state, ctx);
1368 return -ENOMEM;
1369 }
1370 if (cc_map_result(dev, state, digestsize)) {
1371 dev_err(dev, "map_ahash_digest() failed\n");
1372 cc_unmap_hash_request(dev, state, req->src, true);
1373 cc_unmap_req(dev, state, ctx);
1374 return -ENOMEM;
1375 }
1376
1377 /* Setup request structure */
1378 cc_req.user_cb = cc_hash_complete;
1379 cc_req.user_arg = req;
1380
1381 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1382 key_len = CC_AES_128_BIT_KEY_SIZE;
1383 cc_setup_xcbc(req, desc, &idx);
1384 } else {
1385 key_len = ctx->key_params.keylen;
1386 cc_setup_cmac(req, desc, &idx);
1387 }
1388
1389 if (req->nbytes == 0) {
1390 hw_desc_init(&desc[idx]);
1391 set_cipher_mode(&desc[idx], ctx->hw_mode);
1392 set_key_size_aes(&desc[idx], key_len);
1393 set_cmac_size0_mode(&desc[idx]);
1394 set_flow_mode(&desc[idx], S_DIN_to_AES);
1395 idx++;
1396 } else {
1397 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1398 }
1399
1400 /* Get final MAC result */
1401 hw_desc_init(&desc[idx]);
1402 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1403 digestsize, NS_BIT, 1);
1404 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1405 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1407 set_cipher_mode(&desc[idx], ctx->hw_mode);
1408 idx++;
1409
1410 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1411 if (rc != -EINPROGRESS && rc != -EBUSY) {
1412 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1413 cc_unmap_hash_request(dev, state, req->src, true);
1414 cc_unmap_result(dev, state, digestsize, req->result);
1415 cc_unmap_req(dev, state, ctx);
1416 }
1417 return rc;
1418 }
1419
cc_mac_digest(struct ahash_request * req)1420 static int cc_mac_digest(struct ahash_request *req)
1421 {
1422 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1423 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1424 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1425 struct device *dev = drvdata_to_dev(ctx->drvdata);
1426 u32 digestsize = crypto_ahash_digestsize(tfm);
1427 struct cc_crypto_req cc_req = {};
1428 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1429 u32 key_len;
1430 unsigned int idx = 0;
1431 int rc;
1432 gfp_t flags = cc_gfp_flags(&req->base);
1433
1434 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1435
1436 cc_init_req(dev, state, ctx);
1437
1438 if (cc_map_req(dev, state, ctx)) {
1439 dev_err(dev, "map_ahash_source() failed\n");
1440 return -ENOMEM;
1441 }
1442 if (cc_map_result(dev, state, digestsize)) {
1443 dev_err(dev, "map_ahash_digest() failed\n");
1444 cc_unmap_req(dev, state, ctx);
1445 return -ENOMEM;
1446 }
1447
1448 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1449 req->nbytes, 1, flags)) {
1450 dev_err(dev, "map_ahash_request_final() failed\n");
1451 cc_unmap_result(dev, state, digestsize, req->result);
1452 cc_unmap_req(dev, state, ctx);
1453 return -ENOMEM;
1454 }
1455
1456 /* Setup request structure */
1457 cc_req.user_cb = cc_digest_complete;
1458 cc_req.user_arg = req;
1459
1460 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1461 key_len = CC_AES_128_BIT_KEY_SIZE;
1462 cc_setup_xcbc(req, desc, &idx);
1463 } else {
1464 key_len = ctx->key_params.keylen;
1465 cc_setup_cmac(req, desc, &idx);
1466 }
1467
1468 if (req->nbytes == 0) {
1469 hw_desc_init(&desc[idx]);
1470 set_cipher_mode(&desc[idx], ctx->hw_mode);
1471 set_key_size_aes(&desc[idx], key_len);
1472 set_cmac_size0_mode(&desc[idx]);
1473 set_flow_mode(&desc[idx], S_DIN_to_AES);
1474 idx++;
1475 } else {
1476 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1477 }
1478
1479 /* Get final MAC result */
1480 hw_desc_init(&desc[idx]);
1481 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1482 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1483 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1484 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1485 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1486 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1487 set_cipher_mode(&desc[idx], ctx->hw_mode);
1488 idx++;
1489
1490 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1491 if (rc != -EINPROGRESS && rc != -EBUSY) {
1492 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1493 cc_unmap_hash_request(dev, state, req->src, true);
1494 cc_unmap_result(dev, state, digestsize, req->result);
1495 cc_unmap_req(dev, state, ctx);
1496 }
1497 return rc;
1498 }
1499
cc_hash_export(struct ahash_request * req,void * out)1500 static int cc_hash_export(struct ahash_request *req, void *out)
1501 {
1502 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1503 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1504 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1505 u8 *curr_buff = cc_hash_buf(state);
1506 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1507 const u32 tmp = CC_EXPORT_MAGIC;
1508
1509 memcpy(out, &tmp, sizeof(u32));
1510 out += sizeof(u32);
1511
1512 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1513 out += ctx->inter_digestsize;
1514
1515 memcpy(out, state->digest_bytes_len, ctx->hash_len);
1516 out += ctx->hash_len;
1517
1518 memcpy(out, &curr_buff_cnt, sizeof(u32));
1519 out += sizeof(u32);
1520
1521 memcpy(out, curr_buff, curr_buff_cnt);
1522
1523 return 0;
1524 }
1525
cc_hash_import(struct ahash_request * req,const void * in)1526 static int cc_hash_import(struct ahash_request *req, const void *in)
1527 {
1528 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1529 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1530 struct device *dev = drvdata_to_dev(ctx->drvdata);
1531 struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1532 u32 tmp;
1533
1534 memcpy(&tmp, in, sizeof(u32));
1535 if (tmp != CC_EXPORT_MAGIC)
1536 return -EINVAL;
1537 in += sizeof(u32);
1538
1539 cc_init_req(dev, state, ctx);
1540
1541 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1542 in += ctx->inter_digestsize;
1543
1544 memcpy(state->digest_bytes_len, in, ctx->hash_len);
1545 in += ctx->hash_len;
1546
1547 /* Sanity check the data as much as possible */
1548 memcpy(&tmp, in, sizeof(u32));
1549 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1550 return -EINVAL;
1551 in += sizeof(u32);
1552
1553 state->buf_cnt[0] = tmp;
1554 memcpy(state->buffers[0], in, tmp);
1555
1556 return 0;
1557 }
1558
1559 struct cc_hash_template {
1560 char name[CRYPTO_MAX_ALG_NAME];
1561 char driver_name[CRYPTO_MAX_ALG_NAME];
1562 char mac_name[CRYPTO_MAX_ALG_NAME];
1563 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1564 unsigned int blocksize;
1565 bool is_mac;
1566 bool synchronize;
1567 struct ahash_alg template_ahash;
1568 int hash_mode;
1569 int hw_mode;
1570 int inter_digestsize;
1571 struct cc_drvdata *drvdata;
1572 u32 min_hw_rev;
1573 enum cc_std_body std_body;
1574 };
1575
1576 #define CC_STATE_SIZE(_x) \
1577 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1578
1579 /* hash descriptors */
1580 static struct cc_hash_template driver_hash[] = {
1581 //Asynchronous hash template
1582 {
1583 .name = "sha1",
1584 .driver_name = "sha1-ccree",
1585 .mac_name = "hmac(sha1)",
1586 .mac_driver_name = "hmac-sha1-ccree",
1587 .blocksize = SHA1_BLOCK_SIZE,
1588 .is_mac = true,
1589 .synchronize = false,
1590 .template_ahash = {
1591 .init = cc_hash_init,
1592 .update = cc_hash_update,
1593 .final = cc_hash_final,
1594 .finup = cc_hash_finup,
1595 .digest = cc_hash_digest,
1596 .export = cc_hash_export,
1597 .import = cc_hash_import,
1598 .setkey = cc_hash_setkey,
1599 .halg = {
1600 .digestsize = SHA1_DIGEST_SIZE,
1601 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1602 },
1603 },
1604 .hash_mode = DRV_HASH_SHA1,
1605 .hw_mode = DRV_HASH_HW_SHA1,
1606 .inter_digestsize = SHA1_DIGEST_SIZE,
1607 .min_hw_rev = CC_HW_REV_630,
1608 .std_body = CC_STD_NIST,
1609 },
1610 {
1611 .name = "sha256",
1612 .driver_name = "sha256-ccree",
1613 .mac_name = "hmac(sha256)",
1614 .mac_driver_name = "hmac-sha256-ccree",
1615 .blocksize = SHA256_BLOCK_SIZE,
1616 .is_mac = true,
1617 .template_ahash = {
1618 .init = cc_hash_init,
1619 .update = cc_hash_update,
1620 .final = cc_hash_final,
1621 .finup = cc_hash_finup,
1622 .digest = cc_hash_digest,
1623 .export = cc_hash_export,
1624 .import = cc_hash_import,
1625 .setkey = cc_hash_setkey,
1626 .halg = {
1627 .digestsize = SHA256_DIGEST_SIZE,
1628 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1629 },
1630 },
1631 .hash_mode = DRV_HASH_SHA256,
1632 .hw_mode = DRV_HASH_HW_SHA256,
1633 .inter_digestsize = SHA256_DIGEST_SIZE,
1634 .min_hw_rev = CC_HW_REV_630,
1635 .std_body = CC_STD_NIST,
1636 },
1637 {
1638 .name = "sha224",
1639 .driver_name = "sha224-ccree",
1640 .mac_name = "hmac(sha224)",
1641 .mac_driver_name = "hmac-sha224-ccree",
1642 .blocksize = SHA224_BLOCK_SIZE,
1643 .is_mac = true,
1644 .template_ahash = {
1645 .init = cc_hash_init,
1646 .update = cc_hash_update,
1647 .final = cc_hash_final,
1648 .finup = cc_hash_finup,
1649 .digest = cc_hash_digest,
1650 .export = cc_hash_export,
1651 .import = cc_hash_import,
1652 .setkey = cc_hash_setkey,
1653 .halg = {
1654 .digestsize = SHA224_DIGEST_SIZE,
1655 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1656 },
1657 },
1658 .hash_mode = DRV_HASH_SHA224,
1659 .hw_mode = DRV_HASH_HW_SHA256,
1660 .inter_digestsize = SHA256_DIGEST_SIZE,
1661 .min_hw_rev = CC_HW_REV_630,
1662 .std_body = CC_STD_NIST,
1663 },
1664 {
1665 .name = "sha384",
1666 .driver_name = "sha384-ccree",
1667 .mac_name = "hmac(sha384)",
1668 .mac_driver_name = "hmac-sha384-ccree",
1669 .blocksize = SHA384_BLOCK_SIZE,
1670 .is_mac = true,
1671 .template_ahash = {
1672 .init = cc_hash_init,
1673 .update = cc_hash_update,
1674 .final = cc_hash_final,
1675 .finup = cc_hash_finup,
1676 .digest = cc_hash_digest,
1677 .export = cc_hash_export,
1678 .import = cc_hash_import,
1679 .setkey = cc_hash_setkey,
1680 .halg = {
1681 .digestsize = SHA384_DIGEST_SIZE,
1682 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1683 },
1684 },
1685 .hash_mode = DRV_HASH_SHA384,
1686 .hw_mode = DRV_HASH_HW_SHA512,
1687 .inter_digestsize = SHA512_DIGEST_SIZE,
1688 .min_hw_rev = CC_HW_REV_712,
1689 .std_body = CC_STD_NIST,
1690 },
1691 {
1692 .name = "sha512",
1693 .driver_name = "sha512-ccree",
1694 .mac_name = "hmac(sha512)",
1695 .mac_driver_name = "hmac-sha512-ccree",
1696 .blocksize = SHA512_BLOCK_SIZE,
1697 .is_mac = true,
1698 .template_ahash = {
1699 .init = cc_hash_init,
1700 .update = cc_hash_update,
1701 .final = cc_hash_final,
1702 .finup = cc_hash_finup,
1703 .digest = cc_hash_digest,
1704 .export = cc_hash_export,
1705 .import = cc_hash_import,
1706 .setkey = cc_hash_setkey,
1707 .halg = {
1708 .digestsize = SHA512_DIGEST_SIZE,
1709 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1710 },
1711 },
1712 .hash_mode = DRV_HASH_SHA512,
1713 .hw_mode = DRV_HASH_HW_SHA512,
1714 .inter_digestsize = SHA512_DIGEST_SIZE,
1715 .min_hw_rev = CC_HW_REV_712,
1716 .std_body = CC_STD_NIST,
1717 },
1718 {
1719 .name = "md5",
1720 .driver_name = "md5-ccree",
1721 .mac_name = "hmac(md5)",
1722 .mac_driver_name = "hmac-md5-ccree",
1723 .blocksize = MD5_HMAC_BLOCK_SIZE,
1724 .is_mac = true,
1725 .template_ahash = {
1726 .init = cc_hash_init,
1727 .update = cc_hash_update,
1728 .final = cc_hash_final,
1729 .finup = cc_hash_finup,
1730 .digest = cc_hash_digest,
1731 .export = cc_hash_export,
1732 .import = cc_hash_import,
1733 .setkey = cc_hash_setkey,
1734 .halg = {
1735 .digestsize = MD5_DIGEST_SIZE,
1736 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1737 },
1738 },
1739 .hash_mode = DRV_HASH_MD5,
1740 .hw_mode = DRV_HASH_HW_MD5,
1741 .inter_digestsize = MD5_DIGEST_SIZE,
1742 .min_hw_rev = CC_HW_REV_630,
1743 .std_body = CC_STD_NIST,
1744 },
1745 {
1746 .name = "sm3",
1747 .driver_name = "sm3-ccree",
1748 .blocksize = SM3_BLOCK_SIZE,
1749 .is_mac = false,
1750 .template_ahash = {
1751 .init = cc_hash_init,
1752 .update = cc_hash_update,
1753 .final = cc_hash_final,
1754 .finup = cc_hash_finup,
1755 .digest = cc_hash_digest,
1756 .export = cc_hash_export,
1757 .import = cc_hash_import,
1758 .setkey = cc_hash_setkey,
1759 .halg = {
1760 .digestsize = SM3_DIGEST_SIZE,
1761 .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1762 },
1763 },
1764 .hash_mode = DRV_HASH_SM3,
1765 .hw_mode = DRV_HASH_HW_SM3,
1766 .inter_digestsize = SM3_DIGEST_SIZE,
1767 .min_hw_rev = CC_HW_REV_713,
1768 .std_body = CC_STD_OSCCA,
1769 },
1770 {
1771 .mac_name = "xcbc(aes)",
1772 .mac_driver_name = "xcbc-aes-ccree",
1773 .blocksize = AES_BLOCK_SIZE,
1774 .is_mac = true,
1775 .template_ahash = {
1776 .init = cc_hash_init,
1777 .update = cc_mac_update,
1778 .final = cc_mac_final,
1779 .finup = cc_mac_finup,
1780 .digest = cc_mac_digest,
1781 .setkey = cc_xcbc_setkey,
1782 .export = cc_hash_export,
1783 .import = cc_hash_import,
1784 .halg = {
1785 .digestsize = AES_BLOCK_SIZE,
1786 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1787 },
1788 },
1789 .hash_mode = DRV_HASH_NULL,
1790 .hw_mode = DRV_CIPHER_XCBC_MAC,
1791 .inter_digestsize = AES_BLOCK_SIZE,
1792 .min_hw_rev = CC_HW_REV_630,
1793 .std_body = CC_STD_NIST,
1794 },
1795 {
1796 .mac_name = "cmac(aes)",
1797 .mac_driver_name = "cmac-aes-ccree",
1798 .blocksize = AES_BLOCK_SIZE,
1799 .is_mac = true,
1800 .template_ahash = {
1801 .init = cc_hash_init,
1802 .update = cc_mac_update,
1803 .final = cc_mac_final,
1804 .finup = cc_mac_finup,
1805 .digest = cc_mac_digest,
1806 .setkey = cc_cmac_setkey,
1807 .export = cc_hash_export,
1808 .import = cc_hash_import,
1809 .halg = {
1810 .digestsize = AES_BLOCK_SIZE,
1811 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1812 },
1813 },
1814 .hash_mode = DRV_HASH_NULL,
1815 .hw_mode = DRV_CIPHER_CMAC,
1816 .inter_digestsize = AES_BLOCK_SIZE,
1817 .min_hw_rev = CC_HW_REV_630,
1818 .std_body = CC_STD_NIST,
1819 },
1820 };
1821
cc_alloc_hash_alg(struct cc_hash_template * template,struct device * dev,bool keyed)1822 static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1823 struct device *dev, bool keyed)
1824 {
1825 struct cc_hash_alg *t_crypto_alg;
1826 struct crypto_alg *alg;
1827 struct ahash_alg *halg;
1828
1829 t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
1830 if (!t_crypto_alg)
1831 return ERR_PTR(-ENOMEM);
1832
1833 t_crypto_alg->ahash_alg = template->template_ahash;
1834 halg = &t_crypto_alg->ahash_alg;
1835 alg = &halg->halg.base;
1836
1837 if (keyed) {
1838 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1839 template->mac_name);
1840 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1841 template->mac_driver_name);
1842 } else {
1843 halg->setkey = NULL;
1844 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1845 template->name);
1846 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1847 template->driver_name);
1848 }
1849 alg->cra_module = THIS_MODULE;
1850 alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding();
1851 alg->cra_priority = CC_CRA_PRIO;
1852 alg->cra_blocksize = template->blocksize;
1853 alg->cra_alignmask = 0;
1854 alg->cra_exit = cc_cra_exit;
1855
1856 alg->cra_init = cc_cra_init;
1857 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1858
1859 t_crypto_alg->hash_mode = template->hash_mode;
1860 t_crypto_alg->hw_mode = template->hw_mode;
1861 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1862
1863 return t_crypto_alg;
1864 }
1865
cc_init_copy_sram(struct cc_drvdata * drvdata,const u32 * data,unsigned int size,u32 * sram_buff_ofs)1866 static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
1867 unsigned int size, u32 *sram_buff_ofs)
1868 {
1869 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1870 unsigned int larval_seq_len = 0;
1871 int rc;
1872
1873 cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
1874 larval_seq, &larval_seq_len);
1875 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1876 if (rc)
1877 return rc;
1878
1879 *sram_buff_ofs += size;
1880 return 0;
1881 }
1882
cc_init_hash_sram(struct cc_drvdata * drvdata)1883 int cc_init_hash_sram(struct cc_drvdata *drvdata)
1884 {
1885 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1886 u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
1887 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1888 bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1889 int rc = 0;
1890
1891 /* Copy-to-sram digest-len */
1892 rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
1893 sizeof(cc_digest_len_init), &sram_buff_ofs);
1894 if (rc)
1895 goto init_digest_const_err;
1896
1897 if (large_sha_supported) {
1898 /* Copy-to-sram digest-len for sha384/512 */
1899 rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
1900 sizeof(cc_digest_len_sha512_init),
1901 &sram_buff_ofs);
1902 if (rc)
1903 goto init_digest_const_err;
1904 }
1905
1906 /* The initial digests offset */
1907 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1908
1909 /* Copy-to-sram initial SHA* digests */
1910 rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
1911 &sram_buff_ofs);
1912 if (rc)
1913 goto init_digest_const_err;
1914
1915 rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
1916 &sram_buff_ofs);
1917 if (rc)
1918 goto init_digest_const_err;
1919
1920 rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
1921 &sram_buff_ofs);
1922 if (rc)
1923 goto init_digest_const_err;
1924
1925 rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
1926 &sram_buff_ofs);
1927 if (rc)
1928 goto init_digest_const_err;
1929
1930 if (sm3_supported) {
1931 rc = cc_init_copy_sram(drvdata, cc_sm3_init,
1932 sizeof(cc_sm3_init), &sram_buff_ofs);
1933 if (rc)
1934 goto init_digest_const_err;
1935 }
1936
1937 if (large_sha_supported) {
1938 rc = cc_init_copy_sram(drvdata, cc_sha384_init,
1939 sizeof(cc_sha384_init), &sram_buff_ofs);
1940 if (rc)
1941 goto init_digest_const_err;
1942
1943 rc = cc_init_copy_sram(drvdata, cc_sha512_init,
1944 sizeof(cc_sha512_init), &sram_buff_ofs);
1945 if (rc)
1946 goto init_digest_const_err;
1947 }
1948
1949 init_digest_const_err:
1950 return rc;
1951 }
1952
cc_hash_alloc(struct cc_drvdata * drvdata)1953 int cc_hash_alloc(struct cc_drvdata *drvdata)
1954 {
1955 struct cc_hash_handle *hash_handle;
1956 u32 sram_buff;
1957 u32 sram_size_to_alloc;
1958 struct device *dev = drvdata_to_dev(drvdata);
1959 int rc = 0;
1960 int alg;
1961
1962 hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
1963 if (!hash_handle)
1964 return -ENOMEM;
1965
1966 INIT_LIST_HEAD(&hash_handle->hash_list);
1967 drvdata->hash_handle = hash_handle;
1968
1969 sram_size_to_alloc = sizeof(cc_digest_len_init) +
1970 sizeof(cc_md5_init) +
1971 sizeof(cc_sha1_init) +
1972 sizeof(cc_sha224_init) +
1973 sizeof(cc_sha256_init);
1974
1975 if (drvdata->hw_rev >= CC_HW_REV_713)
1976 sram_size_to_alloc += sizeof(cc_sm3_init);
1977
1978 if (drvdata->hw_rev >= CC_HW_REV_712)
1979 sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
1980 sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
1981
1982 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1983 if (sram_buff == NULL_SRAM_ADDR) {
1984 rc = -ENOMEM;
1985 goto fail;
1986 }
1987
1988 /* The initial digest-len offset */
1989 hash_handle->digest_len_sram_addr = sram_buff;
1990
1991 /*must be set before the alg registration as it is being used there*/
1992 rc = cc_init_hash_sram(drvdata);
1993 if (rc) {
1994 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1995 goto fail;
1996 }
1997
1998 /* ahash registration */
1999 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2000 struct cc_hash_alg *t_alg;
2001 int hw_mode = driver_hash[alg].hw_mode;
2002
2003 /* Check that the HW revision and variants are suitable */
2004 if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2005 !(drvdata->std_bodies & driver_hash[alg].std_body))
2006 continue;
2007
2008 if (driver_hash[alg].is_mac) {
2009 /* register hmac version */
2010 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2011 if (IS_ERR(t_alg)) {
2012 rc = PTR_ERR(t_alg);
2013 dev_err(dev, "%s alg allocation failed\n",
2014 driver_hash[alg].driver_name);
2015 goto fail;
2016 }
2017 t_alg->drvdata = drvdata;
2018
2019 rc = crypto_register_ahash(&t_alg->ahash_alg);
2020 if (rc) {
2021 dev_err(dev, "%s alg registration failed\n",
2022 driver_hash[alg].driver_name);
2023 goto fail;
2024 }
2025
2026 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2027 }
2028 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2029 hw_mode == DRV_CIPHER_CMAC)
2030 continue;
2031
2032 /* register hash version */
2033 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2034 if (IS_ERR(t_alg)) {
2035 rc = PTR_ERR(t_alg);
2036 dev_err(dev, "%s alg allocation failed\n",
2037 driver_hash[alg].driver_name);
2038 goto fail;
2039 }
2040 t_alg->drvdata = drvdata;
2041
2042 rc = crypto_register_ahash(&t_alg->ahash_alg);
2043 if (rc) {
2044 dev_err(dev, "%s alg registration failed\n",
2045 driver_hash[alg].driver_name);
2046 goto fail;
2047 }
2048
2049 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2050 }
2051
2052 return 0;
2053
2054 fail:
2055 cc_hash_free(drvdata);
2056 return rc;
2057 }
2058
cc_hash_free(struct cc_drvdata * drvdata)2059 int cc_hash_free(struct cc_drvdata *drvdata)
2060 {
2061 struct cc_hash_alg *t_hash_alg, *hash_n;
2062 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2063
2064 list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
2065 entry) {
2066 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2067 list_del(&t_hash_alg->entry);
2068 }
2069
2070 return 0;
2071 }
2072
cc_setup_xcbc(struct ahash_request * areq,struct cc_hw_desc desc[],unsigned int * seq_size)2073 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2074 unsigned int *seq_size)
2075 {
2076 unsigned int idx = *seq_size;
2077 struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
2078 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2079 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
2080
2081 /* Setup XCBC MAC K1 */
2082 hw_desc_init(&desc[idx]);
2083 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2084 XCBC_MAC_K1_OFFSET),
2085 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2086 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2087 set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2088 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2089 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2090 set_flow_mode(&desc[idx], S_DIN_to_AES);
2091 idx++;
2092
2093 /* Setup XCBC MAC K2 */
2094 hw_desc_init(&desc[idx]);
2095 set_din_type(&desc[idx], DMA_DLLI,
2096 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2097 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2098 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2099 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2100 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2101 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2102 set_flow_mode(&desc[idx], S_DIN_to_AES);
2103 idx++;
2104
2105 /* Setup XCBC MAC K3 */
2106 hw_desc_init(&desc[idx]);
2107 set_din_type(&desc[idx], DMA_DLLI,
2108 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2109 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2110 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2111 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2112 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2113 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2114 set_flow_mode(&desc[idx], S_DIN_to_AES);
2115 idx++;
2116
2117 /* Loading MAC state */
2118 hw_desc_init(&desc[idx]);
2119 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2120 CC_AES_BLOCK_SIZE, NS_BIT);
2121 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2122 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2123 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2124 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2125 set_flow_mode(&desc[idx], S_DIN_to_AES);
2126 idx++;
2127 *seq_size = idx;
2128 }
2129
cc_setup_cmac(struct ahash_request * areq,struct cc_hw_desc desc[],unsigned int * seq_size)2130 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2131 unsigned int *seq_size)
2132 {
2133 unsigned int idx = *seq_size;
2134 struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
2135 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2136 struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
2137
2138 /* Setup CMAC Key */
2139 hw_desc_init(&desc[idx]);
2140 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2141 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2142 ctx->key_params.keylen), NS_BIT);
2143 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2144 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2145 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2146 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2147 set_flow_mode(&desc[idx], S_DIN_to_AES);
2148 idx++;
2149
2150 /* Load MAC state */
2151 hw_desc_init(&desc[idx]);
2152 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2153 CC_AES_BLOCK_SIZE, NS_BIT);
2154 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2155 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2156 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2157 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2158 set_flow_mode(&desc[idx], S_DIN_to_AES);
2159 idx++;
2160 *seq_size = idx;
2161 }
2162
cc_set_desc(struct ahash_req_ctx * areq_ctx,struct cc_hash_ctx * ctx,unsigned int flow_mode,struct cc_hw_desc desc[],bool is_not_last_data,unsigned int * seq_size)2163 static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2164 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2165 struct cc_hw_desc desc[], bool is_not_last_data,
2166 unsigned int *seq_size)
2167 {
2168 unsigned int idx = *seq_size;
2169 struct device *dev = drvdata_to_dev(ctx->drvdata);
2170
2171 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2172 hw_desc_init(&desc[idx]);
2173 set_din_type(&desc[idx], DMA_DLLI,
2174 sg_dma_address(areq_ctx->curr_sg),
2175 areq_ctx->curr_sg->length, NS_BIT);
2176 set_flow_mode(&desc[idx], flow_mode);
2177 idx++;
2178 } else {
2179 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2180 dev_dbg(dev, " NULL mode\n");
2181 /* nothing to build */
2182 return;
2183 }
2184 /* bypass */
2185 hw_desc_init(&desc[idx]);
2186 set_din_type(&desc[idx], DMA_DLLI,
2187 areq_ctx->mlli_params.mlli_dma_addr,
2188 areq_ctx->mlli_params.mlli_len, NS_BIT);
2189 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2190 areq_ctx->mlli_params.mlli_len);
2191 set_flow_mode(&desc[idx], BYPASS);
2192 idx++;
2193 /* process */
2194 hw_desc_init(&desc[idx]);
2195 set_din_type(&desc[idx], DMA_MLLI,
2196 ctx->drvdata->mlli_sram_addr,
2197 areq_ctx->mlli_nents, NS_BIT);
2198 set_flow_mode(&desc[idx], flow_mode);
2199 idx++;
2200 }
2201 if (is_not_last_data)
2202 set_din_not_last_indication(&desc[(idx - 1)]);
2203 /* return updated desc sequence size */
2204 *seq_size = idx;
2205 }
2206
cc_larval_digest(struct device * dev,u32 mode)2207 static const void *cc_larval_digest(struct device *dev, u32 mode)
2208 {
2209 switch (mode) {
2210 case DRV_HASH_MD5:
2211 return cc_md5_init;
2212 case DRV_HASH_SHA1:
2213 return cc_sha1_init;
2214 case DRV_HASH_SHA224:
2215 return cc_sha224_init;
2216 case DRV_HASH_SHA256:
2217 return cc_sha256_init;
2218 case DRV_HASH_SHA384:
2219 return cc_sha384_init;
2220 case DRV_HASH_SHA512:
2221 return cc_sha512_init;
2222 case DRV_HASH_SM3:
2223 return cc_sm3_init;
2224 default:
2225 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2226 return cc_md5_init;
2227 }
2228 }
2229
2230 /**
2231 * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
2232 * according to the given hash mode
2233 *
2234 * @drvdata: Associated device driver context
2235 * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2236 *
2237 * Return:
2238 * The address of the initial digest in SRAM
2239 */
cc_larval_digest_addr(void * drvdata,u32 mode)2240 u32 cc_larval_digest_addr(void *drvdata, u32 mode)
2241 {
2242 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2243 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2244 struct device *dev = drvdata_to_dev(_drvdata);
2245 bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2246 u32 addr;
2247
2248 switch (mode) {
2249 case DRV_HASH_NULL:
2250 break; /*Ignore*/
2251 case DRV_HASH_MD5:
2252 return (hash_handle->larval_digest_sram_addr);
2253 case DRV_HASH_SHA1:
2254 return (hash_handle->larval_digest_sram_addr +
2255 sizeof(cc_md5_init));
2256 case DRV_HASH_SHA224:
2257 return (hash_handle->larval_digest_sram_addr +
2258 sizeof(cc_md5_init) +
2259 sizeof(cc_sha1_init));
2260 case DRV_HASH_SHA256:
2261 return (hash_handle->larval_digest_sram_addr +
2262 sizeof(cc_md5_init) +
2263 sizeof(cc_sha1_init) +
2264 sizeof(cc_sha224_init));
2265 case DRV_HASH_SM3:
2266 return (hash_handle->larval_digest_sram_addr +
2267 sizeof(cc_md5_init) +
2268 sizeof(cc_sha1_init) +
2269 sizeof(cc_sha224_init) +
2270 sizeof(cc_sha256_init));
2271 case DRV_HASH_SHA384:
2272 addr = (hash_handle->larval_digest_sram_addr +
2273 sizeof(cc_md5_init) +
2274 sizeof(cc_sha1_init) +
2275 sizeof(cc_sha224_init) +
2276 sizeof(cc_sha256_init));
2277 if (sm3_supported)
2278 addr += sizeof(cc_sm3_init);
2279 return addr;
2280 case DRV_HASH_SHA512:
2281 addr = (hash_handle->larval_digest_sram_addr +
2282 sizeof(cc_md5_init) +
2283 sizeof(cc_sha1_init) +
2284 sizeof(cc_sha224_init) +
2285 sizeof(cc_sha256_init) +
2286 sizeof(cc_sha384_init));
2287 if (sm3_supported)
2288 addr += sizeof(cc_sm3_init);
2289 return addr;
2290 default:
2291 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2292 }
2293
2294 /*This is valid wrong value to avoid kernel crash*/
2295 return hash_handle->larval_digest_sram_addr;
2296 }
2297
cc_digest_len_addr(void * drvdata,u32 mode)2298 u32 cc_digest_len_addr(void *drvdata, u32 mode)
2299 {
2300 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2301 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2302 u32 digest_len_addr = hash_handle->digest_len_sram_addr;
2303
2304 switch (mode) {
2305 case DRV_HASH_SHA1:
2306 case DRV_HASH_SHA224:
2307 case DRV_HASH_SHA256:
2308 case DRV_HASH_MD5:
2309 return digest_len_addr;
2310 case DRV_HASH_SHA384:
2311 case DRV_HASH_SHA512:
2312 return digest_len_addr + sizeof(cc_digest_len_init);
2313 default:
2314 return digest_len_addr; /*to avoid kernel crash*/
2315 }
2316 }
2317