1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
5 */
6
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
24
25 #define CAAM_CRA_PRIORITY 2000
26
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 SHA512_DIGEST_SIZE * 2)
30
31 /*
32 * This is a a cache of buffers, from which the users of CAAM QI driver
33 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34 * NOTE: A more elegant solution would be to have some headroom in the frames
35 * being processed. This can be added by the dpaa2-eth driver. This would
36 * pose a problem for userspace application processing which cannot
37 * know of this limitation. So for now, this will work.
38 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
39 */
40 static struct kmem_cache *qi_cache;
41
42 struct caam_alg_entry {
43 struct device *dev;
44 int class1_alg_type;
45 int class2_alg_type;
46 bool rfc3686;
47 bool geniv;
48 bool nodkp;
49 };
50
51 struct caam_aead_alg {
52 struct aead_alg aead;
53 struct caam_alg_entry caam;
54 bool registered;
55 };
56
57 struct caam_skcipher_alg {
58 struct skcipher_alg skcipher;
59 struct caam_alg_entry caam;
60 bool registered;
61 };
62
63 /**
64 * struct caam_ctx - per-session context
65 * @flc: Flow Contexts array
66 * @key: [authentication key], encryption key
67 * @flc_dma: I/O virtual addresses of the Flow Contexts
68 * @key_dma: I/O virtual address of the key
69 * @dir: DMA direction for mapping key and Flow Contexts
70 * @dev: dpseci device
71 * @adata: authentication algorithm details
72 * @cdata: encryption algorithm details
73 * @authsize: authentication tag (a.k.a. ICV / MAC) size
74 */
75 struct caam_ctx {
76 struct caam_flc flc[NUM_OP];
77 u8 key[CAAM_MAX_KEY_SIZE];
78 dma_addr_t flc_dma[NUM_OP];
79 dma_addr_t key_dma;
80 enum dma_data_direction dir;
81 struct device *dev;
82 struct alginfo adata;
83 struct alginfo cdata;
84 unsigned int authsize;
85 bool xts_key_fallback;
86 struct crypto_skcipher *fallback;
87 };
88
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv * priv,dma_addr_t iova_addr)89 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
90 dma_addr_t iova_addr)
91 {
92 phys_addr_t phys_addr;
93
94 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
95 iova_addr;
96
97 return phys_to_virt(phys_addr);
98 }
99
100 /*
101 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
102 *
103 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
104 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
105 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
106 * hosting 16 SG entries.
107 *
108 * @flags - flags that would be used for the equivalent kmalloc(..) call
109 *
110 * Returns a pointer to a retrieved buffer on success or NULL on failure.
111 */
qi_cache_zalloc(gfp_t flags)112 static inline void *qi_cache_zalloc(gfp_t flags)
113 {
114 return kmem_cache_zalloc(qi_cache, flags);
115 }
116
117 /*
118 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
119 *
120 * @obj - buffer previously allocated by qi_cache_zalloc
121 *
122 * No checking is being done, the call is a passthrough call to
123 * kmem_cache_free(...)
124 */
qi_cache_free(void * obj)125 static inline void qi_cache_free(void *obj)
126 {
127 kmem_cache_free(qi_cache, obj);
128 }
129
to_caam_req(struct crypto_async_request * areq)130 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
131 {
132 switch (crypto_tfm_alg_type(areq->tfm)) {
133 case CRYPTO_ALG_TYPE_SKCIPHER:
134 return skcipher_request_ctx(skcipher_request_cast(areq));
135 case CRYPTO_ALG_TYPE_AEAD:
136 return aead_request_ctx(container_of(areq, struct aead_request,
137 base));
138 case CRYPTO_ALG_TYPE_AHASH:
139 return ahash_request_ctx(ahash_request_cast(areq));
140 default:
141 return ERR_PTR(-EINVAL);
142 }
143 }
144
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t qm_sg_dma,int qm_sg_bytes)145 static void caam_unmap(struct device *dev, struct scatterlist *src,
146 struct scatterlist *dst, int src_nents,
147 int dst_nents, dma_addr_t iv_dma, int ivsize,
148 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
149 int qm_sg_bytes)
150 {
151 if (dst != src) {
152 if (src_nents)
153 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
154 if (dst_nents)
155 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
156 } else {
157 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
158 }
159
160 if (iv_dma)
161 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
162
163 if (qm_sg_bytes)
164 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
165 }
166
aead_set_sh_desc(struct crypto_aead * aead)167 static int aead_set_sh_desc(struct crypto_aead *aead)
168 {
169 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
170 typeof(*alg), aead);
171 struct caam_ctx *ctx = crypto_aead_ctx(aead);
172 unsigned int ivsize = crypto_aead_ivsize(aead);
173 struct device *dev = ctx->dev;
174 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
175 struct caam_flc *flc;
176 u32 *desc;
177 u32 ctx1_iv_off = 0;
178 u32 *nonce = NULL;
179 unsigned int data_len[2];
180 u32 inl_mask;
181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
182 OP_ALG_AAI_CTR_MOD128);
183 const bool is_rfc3686 = alg->caam.rfc3686;
184
185 if (!ctx->cdata.keylen || !ctx->authsize)
186 return 0;
187
188 /*
189 * AES-CTR needs to load IV in CONTEXT1 reg
190 * at an offset of 128bits (16bytes)
191 * CONTEXT1[255:128] = IV
192 */
193 if (ctr_mode)
194 ctx1_iv_off = 16;
195
196 /*
197 * RFC3686 specific:
198 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
199 */
200 if (is_rfc3686) {
201 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
202 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
203 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
204 }
205
206 /*
207 * In case |user key| > |derived key|, using DKP<imm,imm> would result
208 * in invalid opcodes (last bytes of user key) in the resulting
209 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
210 * addresses are needed.
211 */
212 ctx->adata.key_virt = ctx->key;
213 ctx->adata.key_dma = ctx->key_dma;
214
215 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
216 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
217
218 data_len[0] = ctx->adata.keylen_pad;
219 data_len[1] = ctx->cdata.keylen;
220
221 /* aead_encrypt shared descriptor */
222 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
223 DESC_QI_AEAD_ENC_LEN) +
224 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
225 DESC_JOB_IO_LEN, data_len, &inl_mask,
226 ARRAY_SIZE(data_len)) < 0)
227 return -EINVAL;
228
229 ctx->adata.key_inline = !!(inl_mask & 1);
230 ctx->cdata.key_inline = !!(inl_mask & 2);
231
232 flc = &ctx->flc[ENCRYPT];
233 desc = flc->sh_desc;
234
235 if (alg->caam.geniv)
236 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
237 ivsize, ctx->authsize, is_rfc3686,
238 nonce, ctx1_iv_off, true,
239 priv->sec_attr.era);
240 else
241 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
242 ivsize, ctx->authsize, is_rfc3686, nonce,
243 ctx1_iv_off, true, priv->sec_attr.era);
244
245 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
246 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
247 sizeof(flc->flc) + desc_bytes(desc),
248 ctx->dir);
249
250 /* aead_decrypt shared descriptor */
251 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
252 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
253 DESC_JOB_IO_LEN, data_len, &inl_mask,
254 ARRAY_SIZE(data_len)) < 0)
255 return -EINVAL;
256
257 ctx->adata.key_inline = !!(inl_mask & 1);
258 ctx->cdata.key_inline = !!(inl_mask & 2);
259
260 flc = &ctx->flc[DECRYPT];
261 desc = flc->sh_desc;
262 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
263 ivsize, ctx->authsize, alg->caam.geniv,
264 is_rfc3686, nonce, ctx1_iv_off, true,
265 priv->sec_attr.era);
266 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
267 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
268 sizeof(flc->flc) + desc_bytes(desc),
269 ctx->dir);
270
271 return 0;
272 }
273
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)274 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
275 {
276 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
277
278 ctx->authsize = authsize;
279 aead_set_sh_desc(authenc);
280
281 return 0;
282 }
283
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)284 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
285 unsigned int keylen)
286 {
287 struct caam_ctx *ctx = crypto_aead_ctx(aead);
288 struct device *dev = ctx->dev;
289 struct crypto_authenc_keys keys;
290
291 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
292 goto badkey;
293
294 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
295 keys.authkeylen + keys.enckeylen, keys.enckeylen,
296 keys.authkeylen);
297 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
298 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
299
300 ctx->adata.keylen = keys.authkeylen;
301 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
302 OP_ALG_ALGSEL_MASK);
303
304 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
305 goto badkey;
306
307 memcpy(ctx->key, keys.authkey, keys.authkeylen);
308 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
309 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
310 keys.enckeylen, ctx->dir);
311 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
312 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
313 ctx->adata.keylen_pad + keys.enckeylen, 1);
314
315 ctx->cdata.keylen = keys.enckeylen;
316
317 memzero_explicit(&keys, sizeof(keys));
318 return aead_set_sh_desc(aead);
319 badkey:
320 memzero_explicit(&keys, sizeof(keys));
321 return -EINVAL;
322 }
323
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)324 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
325 unsigned int keylen)
326 {
327 struct crypto_authenc_keys keys;
328 int err;
329
330 err = crypto_authenc_extractkeys(&keys, key, keylen);
331 if (unlikely(err))
332 goto out;
333
334 err = -EINVAL;
335 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
336 goto out;
337
338 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
339 aead_setkey(aead, key, keylen);
340
341 out:
342 memzero_explicit(&keys, sizeof(keys));
343 return err;
344 }
345
aead_edesc_alloc(struct aead_request * req,bool encrypt)346 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
347 bool encrypt)
348 {
349 struct crypto_aead *aead = crypto_aead_reqtfm(req);
350 struct caam_request *req_ctx = aead_request_ctx(req);
351 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
352 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
353 struct caam_ctx *ctx = crypto_aead_ctx(aead);
354 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
355 typeof(*alg), aead);
356 struct device *dev = ctx->dev;
357 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
358 GFP_KERNEL : GFP_ATOMIC;
359 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
360 int src_len, dst_len = 0;
361 struct aead_edesc *edesc;
362 dma_addr_t qm_sg_dma, iv_dma = 0;
363 int ivsize = 0;
364 unsigned int authsize = ctx->authsize;
365 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
366 int in_len, out_len;
367 struct dpaa2_sg_entry *sg_table;
368
369 /* allocate space for base edesc, link tables and IV */
370 edesc = qi_cache_zalloc(GFP_DMA | flags);
371 if (unlikely(!edesc)) {
372 dev_err(dev, "could not allocate extended descriptor\n");
373 return ERR_PTR(-ENOMEM);
374 }
375
376 if (unlikely(req->dst != req->src)) {
377 src_len = req->assoclen + req->cryptlen;
378 dst_len = src_len + (encrypt ? authsize : (-authsize));
379
380 src_nents = sg_nents_for_len(req->src, src_len);
381 if (unlikely(src_nents < 0)) {
382 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
383 src_len);
384 qi_cache_free(edesc);
385 return ERR_PTR(src_nents);
386 }
387
388 dst_nents = sg_nents_for_len(req->dst, dst_len);
389 if (unlikely(dst_nents < 0)) {
390 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
391 dst_len);
392 qi_cache_free(edesc);
393 return ERR_PTR(dst_nents);
394 }
395
396 if (src_nents) {
397 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
398 DMA_TO_DEVICE);
399 if (unlikely(!mapped_src_nents)) {
400 dev_err(dev, "unable to map source\n");
401 qi_cache_free(edesc);
402 return ERR_PTR(-ENOMEM);
403 }
404 } else {
405 mapped_src_nents = 0;
406 }
407
408 if (dst_nents) {
409 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
410 DMA_FROM_DEVICE);
411 if (unlikely(!mapped_dst_nents)) {
412 dev_err(dev, "unable to map destination\n");
413 dma_unmap_sg(dev, req->src, src_nents,
414 DMA_TO_DEVICE);
415 qi_cache_free(edesc);
416 return ERR_PTR(-ENOMEM);
417 }
418 } else {
419 mapped_dst_nents = 0;
420 }
421 } else {
422 src_len = req->assoclen + req->cryptlen +
423 (encrypt ? authsize : 0);
424
425 src_nents = sg_nents_for_len(req->src, src_len);
426 if (unlikely(src_nents < 0)) {
427 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
428 src_len);
429 qi_cache_free(edesc);
430 return ERR_PTR(src_nents);
431 }
432
433 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
434 DMA_BIDIRECTIONAL);
435 if (unlikely(!mapped_src_nents)) {
436 dev_err(dev, "unable to map source\n");
437 qi_cache_free(edesc);
438 return ERR_PTR(-ENOMEM);
439 }
440 }
441
442 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
443 ivsize = crypto_aead_ivsize(aead);
444
445 /*
446 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
447 * Input is not contiguous.
448 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
449 * the end of the table by allocating more S/G entries. Logic:
450 * if (src != dst && output S/G)
451 * pad output S/G, if needed
452 * else if (src == dst && S/G)
453 * overlapping S/Gs; pad one of them
454 * else if (input S/G) ...
455 * pad input S/G, if needed
456 */
457 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
458 if (mapped_dst_nents > 1)
459 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
460 else if ((req->src == req->dst) && (mapped_src_nents > 1))
461 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
462 1 + !!ivsize +
463 pad_sg_nents(mapped_src_nents));
464 else
465 qm_sg_nents = pad_sg_nents(qm_sg_nents);
466
467 sg_table = &edesc->sgt[0];
468 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
469 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
470 CAAM_QI_MEMCACHE_SIZE)) {
471 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
472 qm_sg_nents, ivsize);
473 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
474 0, DMA_NONE, 0, 0);
475 qi_cache_free(edesc);
476 return ERR_PTR(-ENOMEM);
477 }
478
479 if (ivsize) {
480 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
481
482 /* Make sure IV is located in a DMAable area */
483 memcpy(iv, req->iv, ivsize);
484
485 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
486 if (dma_mapping_error(dev, iv_dma)) {
487 dev_err(dev, "unable to map IV\n");
488 caam_unmap(dev, req->src, req->dst, src_nents,
489 dst_nents, 0, 0, DMA_NONE, 0, 0);
490 qi_cache_free(edesc);
491 return ERR_PTR(-ENOMEM);
492 }
493 }
494
495 edesc->src_nents = src_nents;
496 edesc->dst_nents = dst_nents;
497 edesc->iv_dma = iv_dma;
498
499 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
500 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
501 /*
502 * The associated data comes already with the IV but we need
503 * to skip it when we authenticate or encrypt...
504 */
505 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
506 else
507 edesc->assoclen = cpu_to_caam32(req->assoclen);
508 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
509 DMA_TO_DEVICE);
510 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
511 dev_err(dev, "unable to map assoclen\n");
512 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
513 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
514 qi_cache_free(edesc);
515 return ERR_PTR(-ENOMEM);
516 }
517
518 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
519 qm_sg_index++;
520 if (ivsize) {
521 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
522 qm_sg_index++;
523 }
524 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
525 qm_sg_index += mapped_src_nents;
526
527 if (mapped_dst_nents > 1)
528 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
529
530 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
531 if (dma_mapping_error(dev, qm_sg_dma)) {
532 dev_err(dev, "unable to map S/G table\n");
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
534 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
535 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
536 qi_cache_free(edesc);
537 return ERR_PTR(-ENOMEM);
538 }
539
540 edesc->qm_sg_dma = qm_sg_dma;
541 edesc->qm_sg_bytes = qm_sg_bytes;
542
543 out_len = req->assoclen + req->cryptlen +
544 (encrypt ? ctx->authsize : (-ctx->authsize));
545 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
546
547 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
548 dpaa2_fl_set_final(in_fle, true);
549 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
550 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
551 dpaa2_fl_set_len(in_fle, in_len);
552
553 if (req->dst == req->src) {
554 if (mapped_src_nents == 1) {
555 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
556 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
557 } else {
558 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
559 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
560 (1 + !!ivsize) * sizeof(*sg_table));
561 }
562 } else if (!mapped_dst_nents) {
563 /*
564 * crypto engine requires the output entry to be present when
565 * "frame list" FD is used.
566 * Since engine does not support FMT=2'b11 (unused entry type),
567 * leaving out_fle zeroized is the best option.
568 */
569 goto skip_out_fle;
570 } else if (mapped_dst_nents == 1) {
571 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
572 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
573 } else {
574 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
575 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
576 sizeof(*sg_table));
577 }
578
579 dpaa2_fl_set_len(out_fle, out_len);
580
581 skip_out_fle:
582 return edesc;
583 }
584
chachapoly_set_sh_desc(struct crypto_aead * aead)585 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
586 {
587 struct caam_ctx *ctx = crypto_aead_ctx(aead);
588 unsigned int ivsize = crypto_aead_ivsize(aead);
589 struct device *dev = ctx->dev;
590 struct caam_flc *flc;
591 u32 *desc;
592
593 if (!ctx->cdata.keylen || !ctx->authsize)
594 return 0;
595
596 flc = &ctx->flc[ENCRYPT];
597 desc = flc->sh_desc;
598 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
599 ctx->authsize, true, true);
600 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
601 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
602 sizeof(flc->flc) + desc_bytes(desc),
603 ctx->dir);
604
605 flc = &ctx->flc[DECRYPT];
606 desc = flc->sh_desc;
607 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
608 ctx->authsize, false, true);
609 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
610 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
611 sizeof(flc->flc) + desc_bytes(desc),
612 ctx->dir);
613
614 return 0;
615 }
616
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)617 static int chachapoly_setauthsize(struct crypto_aead *aead,
618 unsigned int authsize)
619 {
620 struct caam_ctx *ctx = crypto_aead_ctx(aead);
621
622 if (authsize != POLY1305_DIGEST_SIZE)
623 return -EINVAL;
624
625 ctx->authsize = authsize;
626 return chachapoly_set_sh_desc(aead);
627 }
628
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)629 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
630 unsigned int keylen)
631 {
632 struct caam_ctx *ctx = crypto_aead_ctx(aead);
633 unsigned int ivsize = crypto_aead_ivsize(aead);
634 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
635
636 if (keylen != CHACHA_KEY_SIZE + saltlen)
637 return -EINVAL;
638
639 ctx->cdata.key_virt = key;
640 ctx->cdata.keylen = keylen - saltlen;
641
642 return chachapoly_set_sh_desc(aead);
643 }
644
gcm_set_sh_desc(struct crypto_aead * aead)645 static int gcm_set_sh_desc(struct crypto_aead *aead)
646 {
647 struct caam_ctx *ctx = crypto_aead_ctx(aead);
648 struct device *dev = ctx->dev;
649 unsigned int ivsize = crypto_aead_ivsize(aead);
650 struct caam_flc *flc;
651 u32 *desc;
652 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
653 ctx->cdata.keylen;
654
655 if (!ctx->cdata.keylen || !ctx->authsize)
656 return 0;
657
658 /*
659 * AES GCM encrypt shared descriptor
660 * Job Descriptor and Shared Descriptor
661 * must fit into the 64-word Descriptor h/w Buffer
662 */
663 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
664 ctx->cdata.key_inline = true;
665 ctx->cdata.key_virt = ctx->key;
666 } else {
667 ctx->cdata.key_inline = false;
668 ctx->cdata.key_dma = ctx->key_dma;
669 }
670
671 flc = &ctx->flc[ENCRYPT];
672 desc = flc->sh_desc;
673 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
674 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
675 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
676 sizeof(flc->flc) + desc_bytes(desc),
677 ctx->dir);
678
679 /*
680 * Job Descriptor and Shared Descriptors
681 * must all fit into the 64-word Descriptor h/w Buffer
682 */
683 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
684 ctx->cdata.key_inline = true;
685 ctx->cdata.key_virt = ctx->key;
686 } else {
687 ctx->cdata.key_inline = false;
688 ctx->cdata.key_dma = ctx->key_dma;
689 }
690
691 flc = &ctx->flc[DECRYPT];
692 desc = flc->sh_desc;
693 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
694 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
695 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
696 sizeof(flc->flc) + desc_bytes(desc),
697 ctx->dir);
698
699 return 0;
700 }
701
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)702 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
703 {
704 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
705 int err;
706
707 err = crypto_gcm_check_authsize(authsize);
708 if (err)
709 return err;
710
711 ctx->authsize = authsize;
712 gcm_set_sh_desc(authenc);
713
714 return 0;
715 }
716
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)717 static int gcm_setkey(struct crypto_aead *aead,
718 const u8 *key, unsigned int keylen)
719 {
720 struct caam_ctx *ctx = crypto_aead_ctx(aead);
721 struct device *dev = ctx->dev;
722 int ret;
723
724 ret = aes_check_keylen(keylen);
725 if (ret)
726 return ret;
727 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
728 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
729
730 memcpy(ctx->key, key, keylen);
731 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
732 ctx->cdata.keylen = keylen;
733
734 return gcm_set_sh_desc(aead);
735 }
736
rfc4106_set_sh_desc(struct crypto_aead * aead)737 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
738 {
739 struct caam_ctx *ctx = crypto_aead_ctx(aead);
740 struct device *dev = ctx->dev;
741 unsigned int ivsize = crypto_aead_ivsize(aead);
742 struct caam_flc *flc;
743 u32 *desc;
744 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
745 ctx->cdata.keylen;
746
747 if (!ctx->cdata.keylen || !ctx->authsize)
748 return 0;
749
750 ctx->cdata.key_virt = ctx->key;
751
752 /*
753 * RFC4106 encrypt shared descriptor
754 * Job Descriptor and Shared Descriptor
755 * must fit into the 64-word Descriptor h/w Buffer
756 */
757 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
758 ctx->cdata.key_inline = true;
759 } else {
760 ctx->cdata.key_inline = false;
761 ctx->cdata.key_dma = ctx->key_dma;
762 }
763
764 flc = &ctx->flc[ENCRYPT];
765 desc = flc->sh_desc;
766 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
767 true);
768 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
769 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
770 sizeof(flc->flc) + desc_bytes(desc),
771 ctx->dir);
772
773 /*
774 * Job Descriptor and Shared Descriptors
775 * must all fit into the 64-word Descriptor h/w Buffer
776 */
777 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
778 ctx->cdata.key_inline = true;
779 } else {
780 ctx->cdata.key_inline = false;
781 ctx->cdata.key_dma = ctx->key_dma;
782 }
783
784 flc = &ctx->flc[DECRYPT];
785 desc = flc->sh_desc;
786 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
787 true);
788 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
789 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
790 sizeof(flc->flc) + desc_bytes(desc),
791 ctx->dir);
792
793 return 0;
794 }
795
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)796 static int rfc4106_setauthsize(struct crypto_aead *authenc,
797 unsigned int authsize)
798 {
799 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
800 int err;
801
802 err = crypto_rfc4106_check_authsize(authsize);
803 if (err)
804 return err;
805
806 ctx->authsize = authsize;
807 rfc4106_set_sh_desc(authenc);
808
809 return 0;
810 }
811
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)812 static int rfc4106_setkey(struct crypto_aead *aead,
813 const u8 *key, unsigned int keylen)
814 {
815 struct caam_ctx *ctx = crypto_aead_ctx(aead);
816 struct device *dev = ctx->dev;
817 int ret;
818
819 ret = aes_check_keylen(keylen - 4);
820 if (ret)
821 return ret;
822
823 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
824 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
825
826 memcpy(ctx->key, key, keylen);
827 /*
828 * The last four bytes of the key material are used as the salt value
829 * in the nonce. Update the AES key length.
830 */
831 ctx->cdata.keylen = keylen - 4;
832 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
833 ctx->dir);
834
835 return rfc4106_set_sh_desc(aead);
836 }
837
rfc4543_set_sh_desc(struct crypto_aead * aead)838 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
839 {
840 struct caam_ctx *ctx = crypto_aead_ctx(aead);
841 struct device *dev = ctx->dev;
842 unsigned int ivsize = crypto_aead_ivsize(aead);
843 struct caam_flc *flc;
844 u32 *desc;
845 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
846 ctx->cdata.keylen;
847
848 if (!ctx->cdata.keylen || !ctx->authsize)
849 return 0;
850
851 ctx->cdata.key_virt = ctx->key;
852
853 /*
854 * RFC4543 encrypt shared descriptor
855 * Job Descriptor and Shared Descriptor
856 * must fit into the 64-word Descriptor h/w Buffer
857 */
858 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
859 ctx->cdata.key_inline = true;
860 } else {
861 ctx->cdata.key_inline = false;
862 ctx->cdata.key_dma = ctx->key_dma;
863 }
864
865 flc = &ctx->flc[ENCRYPT];
866 desc = flc->sh_desc;
867 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
868 true);
869 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
870 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
871 sizeof(flc->flc) + desc_bytes(desc),
872 ctx->dir);
873
874 /*
875 * Job Descriptor and Shared Descriptors
876 * must all fit into the 64-word Descriptor h/w Buffer
877 */
878 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
879 ctx->cdata.key_inline = true;
880 } else {
881 ctx->cdata.key_inline = false;
882 ctx->cdata.key_dma = ctx->key_dma;
883 }
884
885 flc = &ctx->flc[DECRYPT];
886 desc = flc->sh_desc;
887 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
888 true);
889 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
890 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
891 sizeof(flc->flc) + desc_bytes(desc),
892 ctx->dir);
893
894 return 0;
895 }
896
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)897 static int rfc4543_setauthsize(struct crypto_aead *authenc,
898 unsigned int authsize)
899 {
900 struct caam_ctx *ctx = crypto_aead_ctx(authenc);
901
902 if (authsize != 16)
903 return -EINVAL;
904
905 ctx->authsize = authsize;
906 rfc4543_set_sh_desc(authenc);
907
908 return 0;
909 }
910
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)911 static int rfc4543_setkey(struct crypto_aead *aead,
912 const u8 *key, unsigned int keylen)
913 {
914 struct caam_ctx *ctx = crypto_aead_ctx(aead);
915 struct device *dev = ctx->dev;
916 int ret;
917
918 ret = aes_check_keylen(keylen - 4);
919 if (ret)
920 return ret;
921
922 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
923 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
924
925 memcpy(ctx->key, key, keylen);
926 /*
927 * The last four bytes of the key material are used as the salt value
928 * in the nonce. Update the AES key length.
929 */
930 ctx->cdata.keylen = keylen - 4;
931 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
932 ctx->dir);
933
934 return rfc4543_set_sh_desc(aead);
935 }
936
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)937 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
938 unsigned int keylen, const u32 ctx1_iv_off)
939 {
940 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
941 struct caam_skcipher_alg *alg =
942 container_of(crypto_skcipher_alg(skcipher),
943 struct caam_skcipher_alg, skcipher);
944 struct device *dev = ctx->dev;
945 struct caam_flc *flc;
946 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
947 u32 *desc;
948 const bool is_rfc3686 = alg->caam.rfc3686;
949
950 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
951 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
952
953 ctx->cdata.keylen = keylen;
954 ctx->cdata.key_virt = key;
955 ctx->cdata.key_inline = true;
956
957 /* skcipher_encrypt shared descriptor */
958 flc = &ctx->flc[ENCRYPT];
959 desc = flc->sh_desc;
960 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
961 ctx1_iv_off);
962 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
963 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
964 sizeof(flc->flc) + desc_bytes(desc),
965 ctx->dir);
966
967 /* skcipher_decrypt shared descriptor */
968 flc = &ctx->flc[DECRYPT];
969 desc = flc->sh_desc;
970 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
971 ctx1_iv_off);
972 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
973 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
974 sizeof(flc->flc) + desc_bytes(desc),
975 ctx->dir);
976
977 return 0;
978 }
979
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)980 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
981 const u8 *key, unsigned int keylen)
982 {
983 int err;
984
985 err = aes_check_keylen(keylen);
986 if (err)
987 return err;
988
989 return skcipher_setkey(skcipher, key, keylen, 0);
990 }
991
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)992 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
993 const u8 *key, unsigned int keylen)
994 {
995 u32 ctx1_iv_off;
996 int err;
997
998 /*
999 * RFC3686 specific:
1000 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1001 * | *key = {KEY, NONCE}
1002 */
1003 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1004 keylen -= CTR_RFC3686_NONCE_SIZE;
1005
1006 err = aes_check_keylen(keylen);
1007 if (err)
1008 return err;
1009
1010 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1011 }
1012
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1013 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1014 const u8 *key, unsigned int keylen)
1015 {
1016 u32 ctx1_iv_off;
1017 int err;
1018
1019 /*
1020 * AES-CTR needs to load IV in CONTEXT1 reg
1021 * at an offset of 128bits (16bytes)
1022 * CONTEXT1[255:128] = IV
1023 */
1024 ctx1_iv_off = 16;
1025
1026 err = aes_check_keylen(keylen);
1027 if (err)
1028 return err;
1029
1030 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1031 }
1032
chacha20_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1033 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1034 const u8 *key, unsigned int keylen)
1035 {
1036 if (keylen != CHACHA_KEY_SIZE)
1037 return -EINVAL;
1038
1039 return skcipher_setkey(skcipher, key, keylen, 0);
1040 }
1041
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1042 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1043 const u8 *key, unsigned int keylen)
1044 {
1045 return verify_skcipher_des_key(skcipher, key) ?:
1046 skcipher_setkey(skcipher, key, keylen, 0);
1047 }
1048
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1049 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1050 const u8 *key, unsigned int keylen)
1051 {
1052 return verify_skcipher_des3_key(skcipher, key) ?:
1053 skcipher_setkey(skcipher, key, keylen, 0);
1054 }
1055
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1056 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1057 unsigned int keylen)
1058 {
1059 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1060 struct device *dev = ctx->dev;
1061 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1062 struct caam_flc *flc;
1063 u32 *desc;
1064 int err;
1065
1066 err = xts_verify_key(skcipher, key, keylen);
1067 if (err) {
1068 dev_dbg(dev, "key size mismatch\n");
1069 return err;
1070 }
1071
1072 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1073 ctx->xts_key_fallback = true;
1074
1075 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1076 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1077 if (err)
1078 return err;
1079 }
1080
1081 ctx->cdata.keylen = keylen;
1082 ctx->cdata.key_virt = key;
1083 ctx->cdata.key_inline = true;
1084
1085 /* xts_skcipher_encrypt shared descriptor */
1086 flc = &ctx->flc[ENCRYPT];
1087 desc = flc->sh_desc;
1088 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1089 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1090 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1091 sizeof(flc->flc) + desc_bytes(desc),
1092 ctx->dir);
1093
1094 /* xts_skcipher_decrypt shared descriptor */
1095 flc = &ctx->flc[DECRYPT];
1096 desc = flc->sh_desc;
1097 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1098 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1099 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1100 sizeof(flc->flc) + desc_bytes(desc),
1101 ctx->dir);
1102
1103 return 0;
1104 }
1105
skcipher_edesc_alloc(struct skcipher_request * req)1106 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1107 {
1108 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1109 struct caam_request *req_ctx = skcipher_request_ctx(req);
1110 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1111 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1112 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1113 struct device *dev = ctx->dev;
1114 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1115 GFP_KERNEL : GFP_ATOMIC;
1116 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1117 struct skcipher_edesc *edesc;
1118 dma_addr_t iv_dma;
1119 u8 *iv;
1120 int ivsize = crypto_skcipher_ivsize(skcipher);
1121 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1122 struct dpaa2_sg_entry *sg_table;
1123
1124 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1125 if (unlikely(src_nents < 0)) {
1126 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1127 req->cryptlen);
1128 return ERR_PTR(src_nents);
1129 }
1130
1131 if (unlikely(req->dst != req->src)) {
1132 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1133 if (unlikely(dst_nents < 0)) {
1134 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1135 req->cryptlen);
1136 return ERR_PTR(dst_nents);
1137 }
1138
1139 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1140 DMA_TO_DEVICE);
1141 if (unlikely(!mapped_src_nents)) {
1142 dev_err(dev, "unable to map source\n");
1143 return ERR_PTR(-ENOMEM);
1144 }
1145
1146 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1147 DMA_FROM_DEVICE);
1148 if (unlikely(!mapped_dst_nents)) {
1149 dev_err(dev, "unable to map destination\n");
1150 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1151 return ERR_PTR(-ENOMEM);
1152 }
1153 } else {
1154 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1155 DMA_BIDIRECTIONAL);
1156 if (unlikely(!mapped_src_nents)) {
1157 dev_err(dev, "unable to map source\n");
1158 return ERR_PTR(-ENOMEM);
1159 }
1160 }
1161
1162 qm_sg_ents = 1 + mapped_src_nents;
1163 dst_sg_idx = qm_sg_ents;
1164
1165 /*
1166 * Input, output HW S/G tables: [IV, src][dst, IV]
1167 * IV entries point to the same buffer
1168 * If src == dst, S/G entries are reused (S/G tables overlap)
1169 *
1170 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1171 * the end of the table by allocating more S/G entries.
1172 */
1173 if (req->src != req->dst)
1174 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1175 else
1176 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1177
1178 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1179 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1180 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1181 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1182 qm_sg_ents, ivsize);
1183 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1184 0, DMA_NONE, 0, 0);
1185 return ERR_PTR(-ENOMEM);
1186 }
1187
1188 /* allocate space for base edesc, link tables and IV */
1189 edesc = qi_cache_zalloc(GFP_DMA | flags);
1190 if (unlikely(!edesc)) {
1191 dev_err(dev, "could not allocate extended descriptor\n");
1192 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1193 0, DMA_NONE, 0, 0);
1194 return ERR_PTR(-ENOMEM);
1195 }
1196
1197 /* Make sure IV is located in a DMAable area */
1198 sg_table = &edesc->sgt[0];
1199 iv = (u8 *)(sg_table + qm_sg_ents);
1200 memcpy(iv, req->iv, ivsize);
1201
1202 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1203 if (dma_mapping_error(dev, iv_dma)) {
1204 dev_err(dev, "unable to map IV\n");
1205 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1206 0, DMA_NONE, 0, 0);
1207 qi_cache_free(edesc);
1208 return ERR_PTR(-ENOMEM);
1209 }
1210
1211 edesc->src_nents = src_nents;
1212 edesc->dst_nents = dst_nents;
1213 edesc->iv_dma = iv_dma;
1214 edesc->qm_sg_bytes = qm_sg_bytes;
1215
1216 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1217 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1218
1219 if (req->src != req->dst)
1220 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1221
1222 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1223 ivsize, 0);
1224
1225 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1226 DMA_TO_DEVICE);
1227 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1228 dev_err(dev, "unable to map S/G table\n");
1229 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1230 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1231 qi_cache_free(edesc);
1232 return ERR_PTR(-ENOMEM);
1233 }
1234
1235 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1236 dpaa2_fl_set_final(in_fle, true);
1237 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1238 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1239
1240 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1241 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1242
1243 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1244
1245 if (req->src == req->dst)
1246 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1247 sizeof(*sg_table));
1248 else
1249 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1250 sizeof(*sg_table));
1251
1252 return edesc;
1253 }
1254
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1255 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1256 struct aead_request *req)
1257 {
1258 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1259 int ivsize = crypto_aead_ivsize(aead);
1260
1261 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1262 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1263 edesc->qm_sg_bytes);
1264 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1265 }
1266
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1267 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1268 struct skcipher_request *req)
1269 {
1270 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1271 int ivsize = crypto_skcipher_ivsize(skcipher);
1272
1273 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1274 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1275 edesc->qm_sg_bytes);
1276 }
1277
aead_encrypt_done(void * cbk_ctx,u32 status)1278 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1279 {
1280 struct crypto_async_request *areq = cbk_ctx;
1281 struct aead_request *req = container_of(areq, struct aead_request,
1282 base);
1283 struct caam_request *req_ctx = to_caam_req(areq);
1284 struct aead_edesc *edesc = req_ctx->edesc;
1285 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1286 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1287 int ecode = 0;
1288
1289 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1290
1291 if (unlikely(status))
1292 ecode = caam_qi2_strstatus(ctx->dev, status);
1293
1294 aead_unmap(ctx->dev, edesc, req);
1295 qi_cache_free(edesc);
1296 aead_request_complete(req, ecode);
1297 }
1298
aead_decrypt_done(void * cbk_ctx,u32 status)1299 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1300 {
1301 struct crypto_async_request *areq = cbk_ctx;
1302 struct aead_request *req = container_of(areq, struct aead_request,
1303 base);
1304 struct caam_request *req_ctx = to_caam_req(areq);
1305 struct aead_edesc *edesc = req_ctx->edesc;
1306 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1307 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1308 int ecode = 0;
1309
1310 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1311
1312 if (unlikely(status))
1313 ecode = caam_qi2_strstatus(ctx->dev, status);
1314
1315 aead_unmap(ctx->dev, edesc, req);
1316 qi_cache_free(edesc);
1317 aead_request_complete(req, ecode);
1318 }
1319
aead_encrypt(struct aead_request * req)1320 static int aead_encrypt(struct aead_request *req)
1321 {
1322 struct aead_edesc *edesc;
1323 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1324 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1325 struct caam_request *caam_req = aead_request_ctx(req);
1326 int ret;
1327
1328 /* allocate extended descriptor */
1329 edesc = aead_edesc_alloc(req, true);
1330 if (IS_ERR(edesc))
1331 return PTR_ERR(edesc);
1332
1333 caam_req->flc = &ctx->flc[ENCRYPT];
1334 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1335 caam_req->cbk = aead_encrypt_done;
1336 caam_req->ctx = &req->base;
1337 caam_req->edesc = edesc;
1338 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1339 if (ret != -EINPROGRESS &&
1340 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1341 aead_unmap(ctx->dev, edesc, req);
1342 qi_cache_free(edesc);
1343 }
1344
1345 return ret;
1346 }
1347
aead_decrypt(struct aead_request * req)1348 static int aead_decrypt(struct aead_request *req)
1349 {
1350 struct aead_edesc *edesc;
1351 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1352 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1353 struct caam_request *caam_req = aead_request_ctx(req);
1354 int ret;
1355
1356 /* allocate extended descriptor */
1357 edesc = aead_edesc_alloc(req, false);
1358 if (IS_ERR(edesc))
1359 return PTR_ERR(edesc);
1360
1361 caam_req->flc = &ctx->flc[DECRYPT];
1362 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1363 caam_req->cbk = aead_decrypt_done;
1364 caam_req->ctx = &req->base;
1365 caam_req->edesc = edesc;
1366 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1367 if (ret != -EINPROGRESS &&
1368 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1369 aead_unmap(ctx->dev, edesc, req);
1370 qi_cache_free(edesc);
1371 }
1372
1373 return ret;
1374 }
1375
ipsec_gcm_encrypt(struct aead_request * req)1376 static int ipsec_gcm_encrypt(struct aead_request *req)
1377 {
1378 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1379 }
1380
ipsec_gcm_decrypt(struct aead_request * req)1381 static int ipsec_gcm_decrypt(struct aead_request *req)
1382 {
1383 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1384 }
1385
skcipher_encrypt_done(void * cbk_ctx,u32 status)1386 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1387 {
1388 struct crypto_async_request *areq = cbk_ctx;
1389 struct skcipher_request *req = skcipher_request_cast(areq);
1390 struct caam_request *req_ctx = to_caam_req(areq);
1391 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1392 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1393 struct skcipher_edesc *edesc = req_ctx->edesc;
1394 int ecode = 0;
1395 int ivsize = crypto_skcipher_ivsize(skcipher);
1396
1397 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1398
1399 if (unlikely(status))
1400 ecode = caam_qi2_strstatus(ctx->dev, status);
1401
1402 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1403 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1404 edesc->src_nents > 1 ? 100 : ivsize, 1);
1405 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1407 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1408
1409 skcipher_unmap(ctx->dev, edesc, req);
1410
1411 /*
1412 * The crypto API expects us to set the IV (req->iv) to the last
1413 * ciphertext block (CBC mode) or last counter (CTR mode).
1414 * This is used e.g. by the CTS mode.
1415 */
1416 if (!ecode)
1417 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1418 ivsize);
1419
1420 qi_cache_free(edesc);
1421 skcipher_request_complete(req, ecode);
1422 }
1423
skcipher_decrypt_done(void * cbk_ctx,u32 status)1424 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1425 {
1426 struct crypto_async_request *areq = cbk_ctx;
1427 struct skcipher_request *req = skcipher_request_cast(areq);
1428 struct caam_request *req_ctx = to_caam_req(areq);
1429 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1430 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1431 struct skcipher_edesc *edesc = req_ctx->edesc;
1432 int ecode = 0;
1433 int ivsize = crypto_skcipher_ivsize(skcipher);
1434
1435 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1436
1437 if (unlikely(status))
1438 ecode = caam_qi2_strstatus(ctx->dev, status);
1439
1440 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1441 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1442 edesc->src_nents > 1 ? 100 : ivsize, 1);
1443 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1444 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1445 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1446
1447 skcipher_unmap(ctx->dev, edesc, req);
1448
1449 /*
1450 * The crypto API expects us to set the IV (req->iv) to the last
1451 * ciphertext block (CBC mode) or last counter (CTR mode).
1452 * This is used e.g. by the CTS mode.
1453 */
1454 if (!ecode)
1455 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1456 ivsize);
1457
1458 qi_cache_free(edesc);
1459 skcipher_request_complete(req, ecode);
1460 }
1461
xts_skcipher_ivsize(struct skcipher_request * req)1462 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1463 {
1464 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1465 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1466
1467 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1468 }
1469
skcipher_encrypt(struct skcipher_request * req)1470 static int skcipher_encrypt(struct skcipher_request *req)
1471 {
1472 struct skcipher_edesc *edesc;
1473 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1474 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1475 struct caam_request *caam_req = skcipher_request_ctx(req);
1476 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1477 int ret;
1478
1479 /*
1480 * XTS is expected to return an error even for input length = 0
1481 * Note that the case input length < block size will be caught during
1482 * HW offloading and return an error.
1483 */
1484 if (!req->cryptlen && !ctx->fallback)
1485 return 0;
1486
1487 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1488 ctx->xts_key_fallback)) {
1489 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1490 skcipher_request_set_callback(&caam_req->fallback_req,
1491 req->base.flags,
1492 req->base.complete,
1493 req->base.data);
1494 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1495 req->dst, req->cryptlen, req->iv);
1496
1497 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1498 }
1499
1500 /* allocate extended descriptor */
1501 edesc = skcipher_edesc_alloc(req);
1502 if (IS_ERR(edesc))
1503 return PTR_ERR(edesc);
1504
1505 caam_req->flc = &ctx->flc[ENCRYPT];
1506 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1507 caam_req->cbk = skcipher_encrypt_done;
1508 caam_req->ctx = &req->base;
1509 caam_req->edesc = edesc;
1510 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1511 if (ret != -EINPROGRESS &&
1512 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1513 skcipher_unmap(ctx->dev, edesc, req);
1514 qi_cache_free(edesc);
1515 }
1516
1517 return ret;
1518 }
1519
skcipher_decrypt(struct skcipher_request * req)1520 static int skcipher_decrypt(struct skcipher_request *req)
1521 {
1522 struct skcipher_edesc *edesc;
1523 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1524 struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1525 struct caam_request *caam_req = skcipher_request_ctx(req);
1526 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1527 int ret;
1528
1529 /*
1530 * XTS is expected to return an error even for input length = 0
1531 * Note that the case input length < block size will be caught during
1532 * HW offloading and return an error.
1533 */
1534 if (!req->cryptlen && !ctx->fallback)
1535 return 0;
1536
1537 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1538 ctx->xts_key_fallback)) {
1539 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1540 skcipher_request_set_callback(&caam_req->fallback_req,
1541 req->base.flags,
1542 req->base.complete,
1543 req->base.data);
1544 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1545 req->dst, req->cryptlen, req->iv);
1546
1547 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1548 }
1549
1550 /* allocate extended descriptor */
1551 edesc = skcipher_edesc_alloc(req);
1552 if (IS_ERR(edesc))
1553 return PTR_ERR(edesc);
1554
1555 caam_req->flc = &ctx->flc[DECRYPT];
1556 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1557 caam_req->cbk = skcipher_decrypt_done;
1558 caam_req->ctx = &req->base;
1559 caam_req->edesc = edesc;
1560 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1561 if (ret != -EINPROGRESS &&
1562 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1563 skcipher_unmap(ctx->dev, edesc, req);
1564 qi_cache_free(edesc);
1565 }
1566
1567 return ret;
1568 }
1569
caam_cra_init(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)1570 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1571 bool uses_dkp)
1572 {
1573 dma_addr_t dma_addr;
1574 int i;
1575
1576 /* copy descriptor header template value */
1577 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1578 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1579
1580 ctx->dev = caam->dev;
1581 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1582
1583 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1584 offsetof(struct caam_ctx, flc_dma),
1585 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1586 if (dma_mapping_error(ctx->dev, dma_addr)) {
1587 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1588 return -ENOMEM;
1589 }
1590
1591 for (i = 0; i < NUM_OP; i++)
1592 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1593 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1594
1595 return 0;
1596 }
1597
caam_cra_init_skcipher(struct crypto_skcipher * tfm)1598 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1599 {
1600 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1601 struct caam_skcipher_alg *caam_alg =
1602 container_of(alg, typeof(*caam_alg), skcipher);
1603 struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1604 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1605 int ret = 0;
1606
1607 if (alg_aai == OP_ALG_AAI_XTS) {
1608 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1609 struct crypto_skcipher *fallback;
1610
1611 fallback = crypto_alloc_skcipher(tfm_name, 0,
1612 CRYPTO_ALG_NEED_FALLBACK);
1613 if (IS_ERR(fallback)) {
1614 dev_err(ctx->dev, "Failed to allocate %s fallback: %ld\n",
1615 tfm_name, PTR_ERR(fallback));
1616 return PTR_ERR(fallback);
1617 }
1618
1619 ctx->fallback = fallback;
1620 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1621 crypto_skcipher_reqsize(fallback));
1622 } else {
1623 crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1624 }
1625
1626 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1627 if (ret && ctx->fallback)
1628 crypto_free_skcipher(ctx->fallback);
1629
1630 return ret;
1631 }
1632
caam_cra_init_aead(struct crypto_aead * tfm)1633 static int caam_cra_init_aead(struct crypto_aead *tfm)
1634 {
1635 struct aead_alg *alg = crypto_aead_alg(tfm);
1636 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1637 aead);
1638
1639 crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1640 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1641 !caam_alg->caam.nodkp);
1642 }
1643
caam_exit_common(struct caam_ctx * ctx)1644 static void caam_exit_common(struct caam_ctx *ctx)
1645 {
1646 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1647 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1648 DMA_ATTR_SKIP_CPU_SYNC);
1649 }
1650
caam_cra_exit(struct crypto_skcipher * tfm)1651 static void caam_cra_exit(struct crypto_skcipher *tfm)
1652 {
1653 struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1654
1655 if (ctx->fallback)
1656 crypto_free_skcipher(ctx->fallback);
1657 caam_exit_common(ctx);
1658 }
1659
caam_cra_exit_aead(struct crypto_aead * tfm)1660 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1661 {
1662 caam_exit_common(crypto_aead_ctx(tfm));
1663 }
1664
1665 static struct caam_skcipher_alg driver_algs[] = {
1666 {
1667 .skcipher = {
1668 .base = {
1669 .cra_name = "cbc(aes)",
1670 .cra_driver_name = "cbc-aes-caam-qi2",
1671 .cra_blocksize = AES_BLOCK_SIZE,
1672 },
1673 .setkey = aes_skcipher_setkey,
1674 .encrypt = skcipher_encrypt,
1675 .decrypt = skcipher_decrypt,
1676 .min_keysize = AES_MIN_KEY_SIZE,
1677 .max_keysize = AES_MAX_KEY_SIZE,
1678 .ivsize = AES_BLOCK_SIZE,
1679 },
1680 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1681 },
1682 {
1683 .skcipher = {
1684 .base = {
1685 .cra_name = "cbc(des3_ede)",
1686 .cra_driver_name = "cbc-3des-caam-qi2",
1687 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1688 },
1689 .setkey = des3_skcipher_setkey,
1690 .encrypt = skcipher_encrypt,
1691 .decrypt = skcipher_decrypt,
1692 .min_keysize = DES3_EDE_KEY_SIZE,
1693 .max_keysize = DES3_EDE_KEY_SIZE,
1694 .ivsize = DES3_EDE_BLOCK_SIZE,
1695 },
1696 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1697 },
1698 {
1699 .skcipher = {
1700 .base = {
1701 .cra_name = "cbc(des)",
1702 .cra_driver_name = "cbc-des-caam-qi2",
1703 .cra_blocksize = DES_BLOCK_SIZE,
1704 },
1705 .setkey = des_skcipher_setkey,
1706 .encrypt = skcipher_encrypt,
1707 .decrypt = skcipher_decrypt,
1708 .min_keysize = DES_KEY_SIZE,
1709 .max_keysize = DES_KEY_SIZE,
1710 .ivsize = DES_BLOCK_SIZE,
1711 },
1712 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1713 },
1714 {
1715 .skcipher = {
1716 .base = {
1717 .cra_name = "ctr(aes)",
1718 .cra_driver_name = "ctr-aes-caam-qi2",
1719 .cra_blocksize = 1,
1720 },
1721 .setkey = ctr_skcipher_setkey,
1722 .encrypt = skcipher_encrypt,
1723 .decrypt = skcipher_decrypt,
1724 .min_keysize = AES_MIN_KEY_SIZE,
1725 .max_keysize = AES_MAX_KEY_SIZE,
1726 .ivsize = AES_BLOCK_SIZE,
1727 .chunksize = AES_BLOCK_SIZE,
1728 },
1729 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1730 OP_ALG_AAI_CTR_MOD128,
1731 },
1732 {
1733 .skcipher = {
1734 .base = {
1735 .cra_name = "rfc3686(ctr(aes))",
1736 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1737 .cra_blocksize = 1,
1738 },
1739 .setkey = rfc3686_skcipher_setkey,
1740 .encrypt = skcipher_encrypt,
1741 .decrypt = skcipher_decrypt,
1742 .min_keysize = AES_MIN_KEY_SIZE +
1743 CTR_RFC3686_NONCE_SIZE,
1744 .max_keysize = AES_MAX_KEY_SIZE +
1745 CTR_RFC3686_NONCE_SIZE,
1746 .ivsize = CTR_RFC3686_IV_SIZE,
1747 .chunksize = AES_BLOCK_SIZE,
1748 },
1749 .caam = {
1750 .class1_alg_type = OP_ALG_ALGSEL_AES |
1751 OP_ALG_AAI_CTR_MOD128,
1752 .rfc3686 = true,
1753 },
1754 },
1755 {
1756 .skcipher = {
1757 .base = {
1758 .cra_name = "xts(aes)",
1759 .cra_driver_name = "xts-aes-caam-qi2",
1760 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1761 .cra_blocksize = AES_BLOCK_SIZE,
1762 },
1763 .setkey = xts_skcipher_setkey,
1764 .encrypt = skcipher_encrypt,
1765 .decrypt = skcipher_decrypt,
1766 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1767 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1768 .ivsize = AES_BLOCK_SIZE,
1769 },
1770 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1771 },
1772 {
1773 .skcipher = {
1774 .base = {
1775 .cra_name = "chacha20",
1776 .cra_driver_name = "chacha20-caam-qi2",
1777 .cra_blocksize = 1,
1778 },
1779 .setkey = chacha20_skcipher_setkey,
1780 .encrypt = skcipher_encrypt,
1781 .decrypt = skcipher_decrypt,
1782 .min_keysize = CHACHA_KEY_SIZE,
1783 .max_keysize = CHACHA_KEY_SIZE,
1784 .ivsize = CHACHA_IV_SIZE,
1785 },
1786 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1787 },
1788 };
1789
1790 static struct caam_aead_alg driver_aeads[] = {
1791 {
1792 .aead = {
1793 .base = {
1794 .cra_name = "rfc4106(gcm(aes))",
1795 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1796 .cra_blocksize = 1,
1797 },
1798 .setkey = rfc4106_setkey,
1799 .setauthsize = rfc4106_setauthsize,
1800 .encrypt = ipsec_gcm_encrypt,
1801 .decrypt = ipsec_gcm_decrypt,
1802 .ivsize = 8,
1803 .maxauthsize = AES_BLOCK_SIZE,
1804 },
1805 .caam = {
1806 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1807 .nodkp = true,
1808 },
1809 },
1810 {
1811 .aead = {
1812 .base = {
1813 .cra_name = "rfc4543(gcm(aes))",
1814 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1815 .cra_blocksize = 1,
1816 },
1817 .setkey = rfc4543_setkey,
1818 .setauthsize = rfc4543_setauthsize,
1819 .encrypt = ipsec_gcm_encrypt,
1820 .decrypt = ipsec_gcm_decrypt,
1821 .ivsize = 8,
1822 .maxauthsize = AES_BLOCK_SIZE,
1823 },
1824 .caam = {
1825 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1826 .nodkp = true,
1827 },
1828 },
1829 /* Galois Counter Mode */
1830 {
1831 .aead = {
1832 .base = {
1833 .cra_name = "gcm(aes)",
1834 .cra_driver_name = "gcm-aes-caam-qi2",
1835 .cra_blocksize = 1,
1836 },
1837 .setkey = gcm_setkey,
1838 .setauthsize = gcm_setauthsize,
1839 .encrypt = aead_encrypt,
1840 .decrypt = aead_decrypt,
1841 .ivsize = 12,
1842 .maxauthsize = AES_BLOCK_SIZE,
1843 },
1844 .caam = {
1845 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1846 .nodkp = true,
1847 }
1848 },
1849 /* single-pass ipsec_esp descriptor */
1850 {
1851 .aead = {
1852 .base = {
1853 .cra_name = "authenc(hmac(md5),cbc(aes))",
1854 .cra_driver_name = "authenc-hmac-md5-"
1855 "cbc-aes-caam-qi2",
1856 .cra_blocksize = AES_BLOCK_SIZE,
1857 },
1858 .setkey = aead_setkey,
1859 .setauthsize = aead_setauthsize,
1860 .encrypt = aead_encrypt,
1861 .decrypt = aead_decrypt,
1862 .ivsize = AES_BLOCK_SIZE,
1863 .maxauthsize = MD5_DIGEST_SIZE,
1864 },
1865 .caam = {
1866 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1867 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1868 OP_ALG_AAI_HMAC_PRECOMP,
1869 }
1870 },
1871 {
1872 .aead = {
1873 .base = {
1874 .cra_name = "echainiv(authenc(hmac(md5),"
1875 "cbc(aes)))",
1876 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1877 "cbc-aes-caam-qi2",
1878 .cra_blocksize = AES_BLOCK_SIZE,
1879 },
1880 .setkey = aead_setkey,
1881 .setauthsize = aead_setauthsize,
1882 .encrypt = aead_encrypt,
1883 .decrypt = aead_decrypt,
1884 .ivsize = AES_BLOCK_SIZE,
1885 .maxauthsize = MD5_DIGEST_SIZE,
1886 },
1887 .caam = {
1888 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1889 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1890 OP_ALG_AAI_HMAC_PRECOMP,
1891 .geniv = true,
1892 }
1893 },
1894 {
1895 .aead = {
1896 .base = {
1897 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1898 .cra_driver_name = "authenc-hmac-sha1-"
1899 "cbc-aes-caam-qi2",
1900 .cra_blocksize = AES_BLOCK_SIZE,
1901 },
1902 .setkey = aead_setkey,
1903 .setauthsize = aead_setauthsize,
1904 .encrypt = aead_encrypt,
1905 .decrypt = aead_decrypt,
1906 .ivsize = AES_BLOCK_SIZE,
1907 .maxauthsize = SHA1_DIGEST_SIZE,
1908 },
1909 .caam = {
1910 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1911 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1912 OP_ALG_AAI_HMAC_PRECOMP,
1913 }
1914 },
1915 {
1916 .aead = {
1917 .base = {
1918 .cra_name = "echainiv(authenc(hmac(sha1),"
1919 "cbc(aes)))",
1920 .cra_driver_name = "echainiv-authenc-"
1921 "hmac-sha1-cbc-aes-caam-qi2",
1922 .cra_blocksize = AES_BLOCK_SIZE,
1923 },
1924 .setkey = aead_setkey,
1925 .setauthsize = aead_setauthsize,
1926 .encrypt = aead_encrypt,
1927 .decrypt = aead_decrypt,
1928 .ivsize = AES_BLOCK_SIZE,
1929 .maxauthsize = SHA1_DIGEST_SIZE,
1930 },
1931 .caam = {
1932 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1933 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1934 OP_ALG_AAI_HMAC_PRECOMP,
1935 .geniv = true,
1936 },
1937 },
1938 {
1939 .aead = {
1940 .base = {
1941 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1942 .cra_driver_name = "authenc-hmac-sha224-"
1943 "cbc-aes-caam-qi2",
1944 .cra_blocksize = AES_BLOCK_SIZE,
1945 },
1946 .setkey = aead_setkey,
1947 .setauthsize = aead_setauthsize,
1948 .encrypt = aead_encrypt,
1949 .decrypt = aead_decrypt,
1950 .ivsize = AES_BLOCK_SIZE,
1951 .maxauthsize = SHA224_DIGEST_SIZE,
1952 },
1953 .caam = {
1954 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1955 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1956 OP_ALG_AAI_HMAC_PRECOMP,
1957 }
1958 },
1959 {
1960 .aead = {
1961 .base = {
1962 .cra_name = "echainiv(authenc(hmac(sha224),"
1963 "cbc(aes)))",
1964 .cra_driver_name = "echainiv-authenc-"
1965 "hmac-sha224-cbc-aes-caam-qi2",
1966 .cra_blocksize = AES_BLOCK_SIZE,
1967 },
1968 .setkey = aead_setkey,
1969 .setauthsize = aead_setauthsize,
1970 .encrypt = aead_encrypt,
1971 .decrypt = aead_decrypt,
1972 .ivsize = AES_BLOCK_SIZE,
1973 .maxauthsize = SHA224_DIGEST_SIZE,
1974 },
1975 .caam = {
1976 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1977 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1978 OP_ALG_AAI_HMAC_PRECOMP,
1979 .geniv = true,
1980 }
1981 },
1982 {
1983 .aead = {
1984 .base = {
1985 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1986 .cra_driver_name = "authenc-hmac-sha256-"
1987 "cbc-aes-caam-qi2",
1988 .cra_blocksize = AES_BLOCK_SIZE,
1989 },
1990 .setkey = aead_setkey,
1991 .setauthsize = aead_setauthsize,
1992 .encrypt = aead_encrypt,
1993 .decrypt = aead_decrypt,
1994 .ivsize = AES_BLOCK_SIZE,
1995 .maxauthsize = SHA256_DIGEST_SIZE,
1996 },
1997 .caam = {
1998 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1999 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2000 OP_ALG_AAI_HMAC_PRECOMP,
2001 }
2002 },
2003 {
2004 .aead = {
2005 .base = {
2006 .cra_name = "echainiv(authenc(hmac(sha256),"
2007 "cbc(aes)))",
2008 .cra_driver_name = "echainiv-authenc-"
2009 "hmac-sha256-cbc-aes-"
2010 "caam-qi2",
2011 .cra_blocksize = AES_BLOCK_SIZE,
2012 },
2013 .setkey = aead_setkey,
2014 .setauthsize = aead_setauthsize,
2015 .encrypt = aead_encrypt,
2016 .decrypt = aead_decrypt,
2017 .ivsize = AES_BLOCK_SIZE,
2018 .maxauthsize = SHA256_DIGEST_SIZE,
2019 },
2020 .caam = {
2021 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2022 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2023 OP_ALG_AAI_HMAC_PRECOMP,
2024 .geniv = true,
2025 }
2026 },
2027 {
2028 .aead = {
2029 .base = {
2030 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2031 .cra_driver_name = "authenc-hmac-sha384-"
2032 "cbc-aes-caam-qi2",
2033 .cra_blocksize = AES_BLOCK_SIZE,
2034 },
2035 .setkey = aead_setkey,
2036 .setauthsize = aead_setauthsize,
2037 .encrypt = aead_encrypt,
2038 .decrypt = aead_decrypt,
2039 .ivsize = AES_BLOCK_SIZE,
2040 .maxauthsize = SHA384_DIGEST_SIZE,
2041 },
2042 .caam = {
2043 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2044 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2045 OP_ALG_AAI_HMAC_PRECOMP,
2046 }
2047 },
2048 {
2049 .aead = {
2050 .base = {
2051 .cra_name = "echainiv(authenc(hmac(sha384),"
2052 "cbc(aes)))",
2053 .cra_driver_name = "echainiv-authenc-"
2054 "hmac-sha384-cbc-aes-"
2055 "caam-qi2",
2056 .cra_blocksize = AES_BLOCK_SIZE,
2057 },
2058 .setkey = aead_setkey,
2059 .setauthsize = aead_setauthsize,
2060 .encrypt = aead_encrypt,
2061 .decrypt = aead_decrypt,
2062 .ivsize = AES_BLOCK_SIZE,
2063 .maxauthsize = SHA384_DIGEST_SIZE,
2064 },
2065 .caam = {
2066 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2067 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2068 OP_ALG_AAI_HMAC_PRECOMP,
2069 .geniv = true,
2070 }
2071 },
2072 {
2073 .aead = {
2074 .base = {
2075 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2076 .cra_driver_name = "authenc-hmac-sha512-"
2077 "cbc-aes-caam-qi2",
2078 .cra_blocksize = AES_BLOCK_SIZE,
2079 },
2080 .setkey = aead_setkey,
2081 .setauthsize = aead_setauthsize,
2082 .encrypt = aead_encrypt,
2083 .decrypt = aead_decrypt,
2084 .ivsize = AES_BLOCK_SIZE,
2085 .maxauthsize = SHA512_DIGEST_SIZE,
2086 },
2087 .caam = {
2088 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2089 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2090 OP_ALG_AAI_HMAC_PRECOMP,
2091 }
2092 },
2093 {
2094 .aead = {
2095 .base = {
2096 .cra_name = "echainiv(authenc(hmac(sha512),"
2097 "cbc(aes)))",
2098 .cra_driver_name = "echainiv-authenc-"
2099 "hmac-sha512-cbc-aes-"
2100 "caam-qi2",
2101 .cra_blocksize = AES_BLOCK_SIZE,
2102 },
2103 .setkey = aead_setkey,
2104 .setauthsize = aead_setauthsize,
2105 .encrypt = aead_encrypt,
2106 .decrypt = aead_decrypt,
2107 .ivsize = AES_BLOCK_SIZE,
2108 .maxauthsize = SHA512_DIGEST_SIZE,
2109 },
2110 .caam = {
2111 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2112 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2113 OP_ALG_AAI_HMAC_PRECOMP,
2114 .geniv = true,
2115 }
2116 },
2117 {
2118 .aead = {
2119 .base = {
2120 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2121 .cra_driver_name = "authenc-hmac-md5-"
2122 "cbc-des3_ede-caam-qi2",
2123 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2124 },
2125 .setkey = des3_aead_setkey,
2126 .setauthsize = aead_setauthsize,
2127 .encrypt = aead_encrypt,
2128 .decrypt = aead_decrypt,
2129 .ivsize = DES3_EDE_BLOCK_SIZE,
2130 .maxauthsize = MD5_DIGEST_SIZE,
2131 },
2132 .caam = {
2133 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2134 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2135 OP_ALG_AAI_HMAC_PRECOMP,
2136 }
2137 },
2138 {
2139 .aead = {
2140 .base = {
2141 .cra_name = "echainiv(authenc(hmac(md5),"
2142 "cbc(des3_ede)))",
2143 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2144 "cbc-des3_ede-caam-qi2",
2145 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2146 },
2147 .setkey = des3_aead_setkey,
2148 .setauthsize = aead_setauthsize,
2149 .encrypt = aead_encrypt,
2150 .decrypt = aead_decrypt,
2151 .ivsize = DES3_EDE_BLOCK_SIZE,
2152 .maxauthsize = MD5_DIGEST_SIZE,
2153 },
2154 .caam = {
2155 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2156 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2157 OP_ALG_AAI_HMAC_PRECOMP,
2158 .geniv = true,
2159 }
2160 },
2161 {
2162 .aead = {
2163 .base = {
2164 .cra_name = "authenc(hmac(sha1),"
2165 "cbc(des3_ede))",
2166 .cra_driver_name = "authenc-hmac-sha1-"
2167 "cbc-des3_ede-caam-qi2",
2168 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2169 },
2170 .setkey = des3_aead_setkey,
2171 .setauthsize = aead_setauthsize,
2172 .encrypt = aead_encrypt,
2173 .decrypt = aead_decrypt,
2174 .ivsize = DES3_EDE_BLOCK_SIZE,
2175 .maxauthsize = SHA1_DIGEST_SIZE,
2176 },
2177 .caam = {
2178 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2179 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2180 OP_ALG_AAI_HMAC_PRECOMP,
2181 },
2182 },
2183 {
2184 .aead = {
2185 .base = {
2186 .cra_name = "echainiv(authenc(hmac(sha1),"
2187 "cbc(des3_ede)))",
2188 .cra_driver_name = "echainiv-authenc-"
2189 "hmac-sha1-"
2190 "cbc-des3_ede-caam-qi2",
2191 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2192 },
2193 .setkey = des3_aead_setkey,
2194 .setauthsize = aead_setauthsize,
2195 .encrypt = aead_encrypt,
2196 .decrypt = aead_decrypt,
2197 .ivsize = DES3_EDE_BLOCK_SIZE,
2198 .maxauthsize = SHA1_DIGEST_SIZE,
2199 },
2200 .caam = {
2201 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2202 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2203 OP_ALG_AAI_HMAC_PRECOMP,
2204 .geniv = true,
2205 }
2206 },
2207 {
2208 .aead = {
2209 .base = {
2210 .cra_name = "authenc(hmac(sha224),"
2211 "cbc(des3_ede))",
2212 .cra_driver_name = "authenc-hmac-sha224-"
2213 "cbc-des3_ede-caam-qi2",
2214 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2215 },
2216 .setkey = des3_aead_setkey,
2217 .setauthsize = aead_setauthsize,
2218 .encrypt = aead_encrypt,
2219 .decrypt = aead_decrypt,
2220 .ivsize = DES3_EDE_BLOCK_SIZE,
2221 .maxauthsize = SHA224_DIGEST_SIZE,
2222 },
2223 .caam = {
2224 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2225 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2226 OP_ALG_AAI_HMAC_PRECOMP,
2227 },
2228 },
2229 {
2230 .aead = {
2231 .base = {
2232 .cra_name = "echainiv(authenc(hmac(sha224),"
2233 "cbc(des3_ede)))",
2234 .cra_driver_name = "echainiv-authenc-"
2235 "hmac-sha224-"
2236 "cbc-des3_ede-caam-qi2",
2237 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2238 },
2239 .setkey = des3_aead_setkey,
2240 .setauthsize = aead_setauthsize,
2241 .encrypt = aead_encrypt,
2242 .decrypt = aead_decrypt,
2243 .ivsize = DES3_EDE_BLOCK_SIZE,
2244 .maxauthsize = SHA224_DIGEST_SIZE,
2245 },
2246 .caam = {
2247 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2248 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2249 OP_ALG_AAI_HMAC_PRECOMP,
2250 .geniv = true,
2251 }
2252 },
2253 {
2254 .aead = {
2255 .base = {
2256 .cra_name = "authenc(hmac(sha256),"
2257 "cbc(des3_ede))",
2258 .cra_driver_name = "authenc-hmac-sha256-"
2259 "cbc-des3_ede-caam-qi2",
2260 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2261 },
2262 .setkey = des3_aead_setkey,
2263 .setauthsize = aead_setauthsize,
2264 .encrypt = aead_encrypt,
2265 .decrypt = aead_decrypt,
2266 .ivsize = DES3_EDE_BLOCK_SIZE,
2267 .maxauthsize = SHA256_DIGEST_SIZE,
2268 },
2269 .caam = {
2270 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2271 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2272 OP_ALG_AAI_HMAC_PRECOMP,
2273 },
2274 },
2275 {
2276 .aead = {
2277 .base = {
2278 .cra_name = "echainiv(authenc(hmac(sha256),"
2279 "cbc(des3_ede)))",
2280 .cra_driver_name = "echainiv-authenc-"
2281 "hmac-sha256-"
2282 "cbc-des3_ede-caam-qi2",
2283 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2284 },
2285 .setkey = des3_aead_setkey,
2286 .setauthsize = aead_setauthsize,
2287 .encrypt = aead_encrypt,
2288 .decrypt = aead_decrypt,
2289 .ivsize = DES3_EDE_BLOCK_SIZE,
2290 .maxauthsize = SHA256_DIGEST_SIZE,
2291 },
2292 .caam = {
2293 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2294 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2295 OP_ALG_AAI_HMAC_PRECOMP,
2296 .geniv = true,
2297 }
2298 },
2299 {
2300 .aead = {
2301 .base = {
2302 .cra_name = "authenc(hmac(sha384),"
2303 "cbc(des3_ede))",
2304 .cra_driver_name = "authenc-hmac-sha384-"
2305 "cbc-des3_ede-caam-qi2",
2306 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2307 },
2308 .setkey = des3_aead_setkey,
2309 .setauthsize = aead_setauthsize,
2310 .encrypt = aead_encrypt,
2311 .decrypt = aead_decrypt,
2312 .ivsize = DES3_EDE_BLOCK_SIZE,
2313 .maxauthsize = SHA384_DIGEST_SIZE,
2314 },
2315 .caam = {
2316 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2317 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2318 OP_ALG_AAI_HMAC_PRECOMP,
2319 },
2320 },
2321 {
2322 .aead = {
2323 .base = {
2324 .cra_name = "echainiv(authenc(hmac(sha384),"
2325 "cbc(des3_ede)))",
2326 .cra_driver_name = "echainiv-authenc-"
2327 "hmac-sha384-"
2328 "cbc-des3_ede-caam-qi2",
2329 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2330 },
2331 .setkey = des3_aead_setkey,
2332 .setauthsize = aead_setauthsize,
2333 .encrypt = aead_encrypt,
2334 .decrypt = aead_decrypt,
2335 .ivsize = DES3_EDE_BLOCK_SIZE,
2336 .maxauthsize = SHA384_DIGEST_SIZE,
2337 },
2338 .caam = {
2339 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2340 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2341 OP_ALG_AAI_HMAC_PRECOMP,
2342 .geniv = true,
2343 }
2344 },
2345 {
2346 .aead = {
2347 .base = {
2348 .cra_name = "authenc(hmac(sha512),"
2349 "cbc(des3_ede))",
2350 .cra_driver_name = "authenc-hmac-sha512-"
2351 "cbc-des3_ede-caam-qi2",
2352 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2353 },
2354 .setkey = des3_aead_setkey,
2355 .setauthsize = aead_setauthsize,
2356 .encrypt = aead_encrypt,
2357 .decrypt = aead_decrypt,
2358 .ivsize = DES3_EDE_BLOCK_SIZE,
2359 .maxauthsize = SHA512_DIGEST_SIZE,
2360 },
2361 .caam = {
2362 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2363 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2364 OP_ALG_AAI_HMAC_PRECOMP,
2365 },
2366 },
2367 {
2368 .aead = {
2369 .base = {
2370 .cra_name = "echainiv(authenc(hmac(sha512),"
2371 "cbc(des3_ede)))",
2372 .cra_driver_name = "echainiv-authenc-"
2373 "hmac-sha512-"
2374 "cbc-des3_ede-caam-qi2",
2375 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2376 },
2377 .setkey = des3_aead_setkey,
2378 .setauthsize = aead_setauthsize,
2379 .encrypt = aead_encrypt,
2380 .decrypt = aead_decrypt,
2381 .ivsize = DES3_EDE_BLOCK_SIZE,
2382 .maxauthsize = SHA512_DIGEST_SIZE,
2383 },
2384 .caam = {
2385 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2386 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2387 OP_ALG_AAI_HMAC_PRECOMP,
2388 .geniv = true,
2389 }
2390 },
2391 {
2392 .aead = {
2393 .base = {
2394 .cra_name = "authenc(hmac(md5),cbc(des))",
2395 .cra_driver_name = "authenc-hmac-md5-"
2396 "cbc-des-caam-qi2",
2397 .cra_blocksize = DES_BLOCK_SIZE,
2398 },
2399 .setkey = aead_setkey,
2400 .setauthsize = aead_setauthsize,
2401 .encrypt = aead_encrypt,
2402 .decrypt = aead_decrypt,
2403 .ivsize = DES_BLOCK_SIZE,
2404 .maxauthsize = MD5_DIGEST_SIZE,
2405 },
2406 .caam = {
2407 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2408 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2409 OP_ALG_AAI_HMAC_PRECOMP,
2410 },
2411 },
2412 {
2413 .aead = {
2414 .base = {
2415 .cra_name = "echainiv(authenc(hmac(md5),"
2416 "cbc(des)))",
2417 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2418 "cbc-des-caam-qi2",
2419 .cra_blocksize = DES_BLOCK_SIZE,
2420 },
2421 .setkey = aead_setkey,
2422 .setauthsize = aead_setauthsize,
2423 .encrypt = aead_encrypt,
2424 .decrypt = aead_decrypt,
2425 .ivsize = DES_BLOCK_SIZE,
2426 .maxauthsize = MD5_DIGEST_SIZE,
2427 },
2428 .caam = {
2429 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2430 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2431 OP_ALG_AAI_HMAC_PRECOMP,
2432 .geniv = true,
2433 }
2434 },
2435 {
2436 .aead = {
2437 .base = {
2438 .cra_name = "authenc(hmac(sha1),cbc(des))",
2439 .cra_driver_name = "authenc-hmac-sha1-"
2440 "cbc-des-caam-qi2",
2441 .cra_blocksize = DES_BLOCK_SIZE,
2442 },
2443 .setkey = aead_setkey,
2444 .setauthsize = aead_setauthsize,
2445 .encrypt = aead_encrypt,
2446 .decrypt = aead_decrypt,
2447 .ivsize = DES_BLOCK_SIZE,
2448 .maxauthsize = SHA1_DIGEST_SIZE,
2449 },
2450 .caam = {
2451 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2452 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2453 OP_ALG_AAI_HMAC_PRECOMP,
2454 },
2455 },
2456 {
2457 .aead = {
2458 .base = {
2459 .cra_name = "echainiv(authenc(hmac(sha1),"
2460 "cbc(des)))",
2461 .cra_driver_name = "echainiv-authenc-"
2462 "hmac-sha1-cbc-des-caam-qi2",
2463 .cra_blocksize = DES_BLOCK_SIZE,
2464 },
2465 .setkey = aead_setkey,
2466 .setauthsize = aead_setauthsize,
2467 .encrypt = aead_encrypt,
2468 .decrypt = aead_decrypt,
2469 .ivsize = DES_BLOCK_SIZE,
2470 .maxauthsize = SHA1_DIGEST_SIZE,
2471 },
2472 .caam = {
2473 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2474 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2475 OP_ALG_AAI_HMAC_PRECOMP,
2476 .geniv = true,
2477 }
2478 },
2479 {
2480 .aead = {
2481 .base = {
2482 .cra_name = "authenc(hmac(sha224),cbc(des))",
2483 .cra_driver_name = "authenc-hmac-sha224-"
2484 "cbc-des-caam-qi2",
2485 .cra_blocksize = DES_BLOCK_SIZE,
2486 },
2487 .setkey = aead_setkey,
2488 .setauthsize = aead_setauthsize,
2489 .encrypt = aead_encrypt,
2490 .decrypt = aead_decrypt,
2491 .ivsize = DES_BLOCK_SIZE,
2492 .maxauthsize = SHA224_DIGEST_SIZE,
2493 },
2494 .caam = {
2495 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2496 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2497 OP_ALG_AAI_HMAC_PRECOMP,
2498 },
2499 },
2500 {
2501 .aead = {
2502 .base = {
2503 .cra_name = "echainiv(authenc(hmac(sha224),"
2504 "cbc(des)))",
2505 .cra_driver_name = "echainiv-authenc-"
2506 "hmac-sha224-cbc-des-"
2507 "caam-qi2",
2508 .cra_blocksize = DES_BLOCK_SIZE,
2509 },
2510 .setkey = aead_setkey,
2511 .setauthsize = aead_setauthsize,
2512 .encrypt = aead_encrypt,
2513 .decrypt = aead_decrypt,
2514 .ivsize = DES_BLOCK_SIZE,
2515 .maxauthsize = SHA224_DIGEST_SIZE,
2516 },
2517 .caam = {
2518 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2519 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2520 OP_ALG_AAI_HMAC_PRECOMP,
2521 .geniv = true,
2522 }
2523 },
2524 {
2525 .aead = {
2526 .base = {
2527 .cra_name = "authenc(hmac(sha256),cbc(des))",
2528 .cra_driver_name = "authenc-hmac-sha256-"
2529 "cbc-des-caam-qi2",
2530 .cra_blocksize = DES_BLOCK_SIZE,
2531 },
2532 .setkey = aead_setkey,
2533 .setauthsize = aead_setauthsize,
2534 .encrypt = aead_encrypt,
2535 .decrypt = aead_decrypt,
2536 .ivsize = DES_BLOCK_SIZE,
2537 .maxauthsize = SHA256_DIGEST_SIZE,
2538 },
2539 .caam = {
2540 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2541 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2542 OP_ALG_AAI_HMAC_PRECOMP,
2543 },
2544 },
2545 {
2546 .aead = {
2547 .base = {
2548 .cra_name = "echainiv(authenc(hmac(sha256),"
2549 "cbc(des)))",
2550 .cra_driver_name = "echainiv-authenc-"
2551 "hmac-sha256-cbc-des-"
2552 "caam-qi2",
2553 .cra_blocksize = DES_BLOCK_SIZE,
2554 },
2555 .setkey = aead_setkey,
2556 .setauthsize = aead_setauthsize,
2557 .encrypt = aead_encrypt,
2558 .decrypt = aead_decrypt,
2559 .ivsize = DES_BLOCK_SIZE,
2560 .maxauthsize = SHA256_DIGEST_SIZE,
2561 },
2562 .caam = {
2563 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2564 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2565 OP_ALG_AAI_HMAC_PRECOMP,
2566 .geniv = true,
2567 },
2568 },
2569 {
2570 .aead = {
2571 .base = {
2572 .cra_name = "authenc(hmac(sha384),cbc(des))",
2573 .cra_driver_name = "authenc-hmac-sha384-"
2574 "cbc-des-caam-qi2",
2575 .cra_blocksize = DES_BLOCK_SIZE,
2576 },
2577 .setkey = aead_setkey,
2578 .setauthsize = aead_setauthsize,
2579 .encrypt = aead_encrypt,
2580 .decrypt = aead_decrypt,
2581 .ivsize = DES_BLOCK_SIZE,
2582 .maxauthsize = SHA384_DIGEST_SIZE,
2583 },
2584 .caam = {
2585 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2586 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2587 OP_ALG_AAI_HMAC_PRECOMP,
2588 },
2589 },
2590 {
2591 .aead = {
2592 .base = {
2593 .cra_name = "echainiv(authenc(hmac(sha384),"
2594 "cbc(des)))",
2595 .cra_driver_name = "echainiv-authenc-"
2596 "hmac-sha384-cbc-des-"
2597 "caam-qi2",
2598 .cra_blocksize = DES_BLOCK_SIZE,
2599 },
2600 .setkey = aead_setkey,
2601 .setauthsize = aead_setauthsize,
2602 .encrypt = aead_encrypt,
2603 .decrypt = aead_decrypt,
2604 .ivsize = DES_BLOCK_SIZE,
2605 .maxauthsize = SHA384_DIGEST_SIZE,
2606 },
2607 .caam = {
2608 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2609 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2610 OP_ALG_AAI_HMAC_PRECOMP,
2611 .geniv = true,
2612 }
2613 },
2614 {
2615 .aead = {
2616 .base = {
2617 .cra_name = "authenc(hmac(sha512),cbc(des))",
2618 .cra_driver_name = "authenc-hmac-sha512-"
2619 "cbc-des-caam-qi2",
2620 .cra_blocksize = DES_BLOCK_SIZE,
2621 },
2622 .setkey = aead_setkey,
2623 .setauthsize = aead_setauthsize,
2624 .encrypt = aead_encrypt,
2625 .decrypt = aead_decrypt,
2626 .ivsize = DES_BLOCK_SIZE,
2627 .maxauthsize = SHA512_DIGEST_SIZE,
2628 },
2629 .caam = {
2630 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2631 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2632 OP_ALG_AAI_HMAC_PRECOMP,
2633 }
2634 },
2635 {
2636 .aead = {
2637 .base = {
2638 .cra_name = "echainiv(authenc(hmac(sha512),"
2639 "cbc(des)))",
2640 .cra_driver_name = "echainiv-authenc-"
2641 "hmac-sha512-cbc-des-"
2642 "caam-qi2",
2643 .cra_blocksize = DES_BLOCK_SIZE,
2644 },
2645 .setkey = aead_setkey,
2646 .setauthsize = aead_setauthsize,
2647 .encrypt = aead_encrypt,
2648 .decrypt = aead_decrypt,
2649 .ivsize = DES_BLOCK_SIZE,
2650 .maxauthsize = SHA512_DIGEST_SIZE,
2651 },
2652 .caam = {
2653 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2654 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2655 OP_ALG_AAI_HMAC_PRECOMP,
2656 .geniv = true,
2657 }
2658 },
2659 {
2660 .aead = {
2661 .base = {
2662 .cra_name = "authenc(hmac(md5),"
2663 "rfc3686(ctr(aes)))",
2664 .cra_driver_name = "authenc-hmac-md5-"
2665 "rfc3686-ctr-aes-caam-qi2",
2666 .cra_blocksize = 1,
2667 },
2668 .setkey = aead_setkey,
2669 .setauthsize = aead_setauthsize,
2670 .encrypt = aead_encrypt,
2671 .decrypt = aead_decrypt,
2672 .ivsize = CTR_RFC3686_IV_SIZE,
2673 .maxauthsize = MD5_DIGEST_SIZE,
2674 },
2675 .caam = {
2676 .class1_alg_type = OP_ALG_ALGSEL_AES |
2677 OP_ALG_AAI_CTR_MOD128,
2678 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2679 OP_ALG_AAI_HMAC_PRECOMP,
2680 .rfc3686 = true,
2681 },
2682 },
2683 {
2684 .aead = {
2685 .base = {
2686 .cra_name = "seqiv(authenc("
2687 "hmac(md5),rfc3686(ctr(aes))))",
2688 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2689 "rfc3686-ctr-aes-caam-qi2",
2690 .cra_blocksize = 1,
2691 },
2692 .setkey = aead_setkey,
2693 .setauthsize = aead_setauthsize,
2694 .encrypt = aead_encrypt,
2695 .decrypt = aead_decrypt,
2696 .ivsize = CTR_RFC3686_IV_SIZE,
2697 .maxauthsize = MD5_DIGEST_SIZE,
2698 },
2699 .caam = {
2700 .class1_alg_type = OP_ALG_ALGSEL_AES |
2701 OP_ALG_AAI_CTR_MOD128,
2702 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2703 OP_ALG_AAI_HMAC_PRECOMP,
2704 .rfc3686 = true,
2705 .geniv = true,
2706 },
2707 },
2708 {
2709 .aead = {
2710 .base = {
2711 .cra_name = "authenc(hmac(sha1),"
2712 "rfc3686(ctr(aes)))",
2713 .cra_driver_name = "authenc-hmac-sha1-"
2714 "rfc3686-ctr-aes-caam-qi2",
2715 .cra_blocksize = 1,
2716 },
2717 .setkey = aead_setkey,
2718 .setauthsize = aead_setauthsize,
2719 .encrypt = aead_encrypt,
2720 .decrypt = aead_decrypt,
2721 .ivsize = CTR_RFC3686_IV_SIZE,
2722 .maxauthsize = SHA1_DIGEST_SIZE,
2723 },
2724 .caam = {
2725 .class1_alg_type = OP_ALG_ALGSEL_AES |
2726 OP_ALG_AAI_CTR_MOD128,
2727 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2728 OP_ALG_AAI_HMAC_PRECOMP,
2729 .rfc3686 = true,
2730 },
2731 },
2732 {
2733 .aead = {
2734 .base = {
2735 .cra_name = "seqiv(authenc("
2736 "hmac(sha1),rfc3686(ctr(aes))))",
2737 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2738 "rfc3686-ctr-aes-caam-qi2",
2739 .cra_blocksize = 1,
2740 },
2741 .setkey = aead_setkey,
2742 .setauthsize = aead_setauthsize,
2743 .encrypt = aead_encrypt,
2744 .decrypt = aead_decrypt,
2745 .ivsize = CTR_RFC3686_IV_SIZE,
2746 .maxauthsize = SHA1_DIGEST_SIZE,
2747 },
2748 .caam = {
2749 .class1_alg_type = OP_ALG_ALGSEL_AES |
2750 OP_ALG_AAI_CTR_MOD128,
2751 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2752 OP_ALG_AAI_HMAC_PRECOMP,
2753 .rfc3686 = true,
2754 .geniv = true,
2755 },
2756 },
2757 {
2758 .aead = {
2759 .base = {
2760 .cra_name = "authenc(hmac(sha224),"
2761 "rfc3686(ctr(aes)))",
2762 .cra_driver_name = "authenc-hmac-sha224-"
2763 "rfc3686-ctr-aes-caam-qi2",
2764 .cra_blocksize = 1,
2765 },
2766 .setkey = aead_setkey,
2767 .setauthsize = aead_setauthsize,
2768 .encrypt = aead_encrypt,
2769 .decrypt = aead_decrypt,
2770 .ivsize = CTR_RFC3686_IV_SIZE,
2771 .maxauthsize = SHA224_DIGEST_SIZE,
2772 },
2773 .caam = {
2774 .class1_alg_type = OP_ALG_ALGSEL_AES |
2775 OP_ALG_AAI_CTR_MOD128,
2776 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2777 OP_ALG_AAI_HMAC_PRECOMP,
2778 .rfc3686 = true,
2779 },
2780 },
2781 {
2782 .aead = {
2783 .base = {
2784 .cra_name = "seqiv(authenc("
2785 "hmac(sha224),rfc3686(ctr(aes))))",
2786 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2787 "rfc3686-ctr-aes-caam-qi2",
2788 .cra_blocksize = 1,
2789 },
2790 .setkey = aead_setkey,
2791 .setauthsize = aead_setauthsize,
2792 .encrypt = aead_encrypt,
2793 .decrypt = aead_decrypt,
2794 .ivsize = CTR_RFC3686_IV_SIZE,
2795 .maxauthsize = SHA224_DIGEST_SIZE,
2796 },
2797 .caam = {
2798 .class1_alg_type = OP_ALG_ALGSEL_AES |
2799 OP_ALG_AAI_CTR_MOD128,
2800 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2801 OP_ALG_AAI_HMAC_PRECOMP,
2802 .rfc3686 = true,
2803 .geniv = true,
2804 },
2805 },
2806 {
2807 .aead = {
2808 .base = {
2809 .cra_name = "authenc(hmac(sha256),"
2810 "rfc3686(ctr(aes)))",
2811 .cra_driver_name = "authenc-hmac-sha256-"
2812 "rfc3686-ctr-aes-caam-qi2",
2813 .cra_blocksize = 1,
2814 },
2815 .setkey = aead_setkey,
2816 .setauthsize = aead_setauthsize,
2817 .encrypt = aead_encrypt,
2818 .decrypt = aead_decrypt,
2819 .ivsize = CTR_RFC3686_IV_SIZE,
2820 .maxauthsize = SHA256_DIGEST_SIZE,
2821 },
2822 .caam = {
2823 .class1_alg_type = OP_ALG_ALGSEL_AES |
2824 OP_ALG_AAI_CTR_MOD128,
2825 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2826 OP_ALG_AAI_HMAC_PRECOMP,
2827 .rfc3686 = true,
2828 },
2829 },
2830 {
2831 .aead = {
2832 .base = {
2833 .cra_name = "seqiv(authenc(hmac(sha256),"
2834 "rfc3686(ctr(aes))))",
2835 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2836 "rfc3686-ctr-aes-caam-qi2",
2837 .cra_blocksize = 1,
2838 },
2839 .setkey = aead_setkey,
2840 .setauthsize = aead_setauthsize,
2841 .encrypt = aead_encrypt,
2842 .decrypt = aead_decrypt,
2843 .ivsize = CTR_RFC3686_IV_SIZE,
2844 .maxauthsize = SHA256_DIGEST_SIZE,
2845 },
2846 .caam = {
2847 .class1_alg_type = OP_ALG_ALGSEL_AES |
2848 OP_ALG_AAI_CTR_MOD128,
2849 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2850 OP_ALG_AAI_HMAC_PRECOMP,
2851 .rfc3686 = true,
2852 .geniv = true,
2853 },
2854 },
2855 {
2856 .aead = {
2857 .base = {
2858 .cra_name = "authenc(hmac(sha384),"
2859 "rfc3686(ctr(aes)))",
2860 .cra_driver_name = "authenc-hmac-sha384-"
2861 "rfc3686-ctr-aes-caam-qi2",
2862 .cra_blocksize = 1,
2863 },
2864 .setkey = aead_setkey,
2865 .setauthsize = aead_setauthsize,
2866 .encrypt = aead_encrypt,
2867 .decrypt = aead_decrypt,
2868 .ivsize = CTR_RFC3686_IV_SIZE,
2869 .maxauthsize = SHA384_DIGEST_SIZE,
2870 },
2871 .caam = {
2872 .class1_alg_type = OP_ALG_ALGSEL_AES |
2873 OP_ALG_AAI_CTR_MOD128,
2874 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2875 OP_ALG_AAI_HMAC_PRECOMP,
2876 .rfc3686 = true,
2877 },
2878 },
2879 {
2880 .aead = {
2881 .base = {
2882 .cra_name = "seqiv(authenc(hmac(sha384),"
2883 "rfc3686(ctr(aes))))",
2884 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2885 "rfc3686-ctr-aes-caam-qi2",
2886 .cra_blocksize = 1,
2887 },
2888 .setkey = aead_setkey,
2889 .setauthsize = aead_setauthsize,
2890 .encrypt = aead_encrypt,
2891 .decrypt = aead_decrypt,
2892 .ivsize = CTR_RFC3686_IV_SIZE,
2893 .maxauthsize = SHA384_DIGEST_SIZE,
2894 },
2895 .caam = {
2896 .class1_alg_type = OP_ALG_ALGSEL_AES |
2897 OP_ALG_AAI_CTR_MOD128,
2898 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2899 OP_ALG_AAI_HMAC_PRECOMP,
2900 .rfc3686 = true,
2901 .geniv = true,
2902 },
2903 },
2904 {
2905 .aead = {
2906 .base = {
2907 .cra_name = "rfc7539(chacha20,poly1305)",
2908 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2909 "caam-qi2",
2910 .cra_blocksize = 1,
2911 },
2912 .setkey = chachapoly_setkey,
2913 .setauthsize = chachapoly_setauthsize,
2914 .encrypt = aead_encrypt,
2915 .decrypt = aead_decrypt,
2916 .ivsize = CHACHAPOLY_IV_SIZE,
2917 .maxauthsize = POLY1305_DIGEST_SIZE,
2918 },
2919 .caam = {
2920 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2921 OP_ALG_AAI_AEAD,
2922 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2923 OP_ALG_AAI_AEAD,
2924 .nodkp = true,
2925 },
2926 },
2927 {
2928 .aead = {
2929 .base = {
2930 .cra_name = "rfc7539esp(chacha20,poly1305)",
2931 .cra_driver_name = "rfc7539esp-chacha20-"
2932 "poly1305-caam-qi2",
2933 .cra_blocksize = 1,
2934 },
2935 .setkey = chachapoly_setkey,
2936 .setauthsize = chachapoly_setauthsize,
2937 .encrypt = aead_encrypt,
2938 .decrypt = aead_decrypt,
2939 .ivsize = 8,
2940 .maxauthsize = POLY1305_DIGEST_SIZE,
2941 },
2942 .caam = {
2943 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2944 OP_ALG_AAI_AEAD,
2945 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2946 OP_ALG_AAI_AEAD,
2947 .nodkp = true,
2948 },
2949 },
2950 {
2951 .aead = {
2952 .base = {
2953 .cra_name = "authenc(hmac(sha512),"
2954 "rfc3686(ctr(aes)))",
2955 .cra_driver_name = "authenc-hmac-sha512-"
2956 "rfc3686-ctr-aes-caam-qi2",
2957 .cra_blocksize = 1,
2958 },
2959 .setkey = aead_setkey,
2960 .setauthsize = aead_setauthsize,
2961 .encrypt = aead_encrypt,
2962 .decrypt = aead_decrypt,
2963 .ivsize = CTR_RFC3686_IV_SIZE,
2964 .maxauthsize = SHA512_DIGEST_SIZE,
2965 },
2966 .caam = {
2967 .class1_alg_type = OP_ALG_ALGSEL_AES |
2968 OP_ALG_AAI_CTR_MOD128,
2969 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2970 OP_ALG_AAI_HMAC_PRECOMP,
2971 .rfc3686 = true,
2972 },
2973 },
2974 {
2975 .aead = {
2976 .base = {
2977 .cra_name = "seqiv(authenc(hmac(sha512),"
2978 "rfc3686(ctr(aes))))",
2979 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2980 "rfc3686-ctr-aes-caam-qi2",
2981 .cra_blocksize = 1,
2982 },
2983 .setkey = aead_setkey,
2984 .setauthsize = aead_setauthsize,
2985 .encrypt = aead_encrypt,
2986 .decrypt = aead_decrypt,
2987 .ivsize = CTR_RFC3686_IV_SIZE,
2988 .maxauthsize = SHA512_DIGEST_SIZE,
2989 },
2990 .caam = {
2991 .class1_alg_type = OP_ALG_ALGSEL_AES |
2992 OP_ALG_AAI_CTR_MOD128,
2993 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2994 OP_ALG_AAI_HMAC_PRECOMP,
2995 .rfc3686 = true,
2996 .geniv = true,
2997 },
2998 },
2999 };
3000
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3001 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3002 {
3003 struct skcipher_alg *alg = &t_alg->skcipher;
3004
3005 alg->base.cra_module = THIS_MODULE;
3006 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3007 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3008 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3009 CRYPTO_ALG_KERN_DRIVER_ONLY);
3010
3011 alg->init = caam_cra_init_skcipher;
3012 alg->exit = caam_cra_exit;
3013 }
3014
caam_aead_alg_init(struct caam_aead_alg * t_alg)3015 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3016 {
3017 struct aead_alg *alg = &t_alg->aead;
3018
3019 alg->base.cra_module = THIS_MODULE;
3020 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3021 alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3022 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3023 CRYPTO_ALG_KERN_DRIVER_ONLY;
3024
3025 alg->init = caam_cra_init_aead;
3026 alg->exit = caam_cra_exit_aead;
3027 }
3028
3029 /* max hash key is max split key size */
3030 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3031
3032 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3033
3034 /* caam context sizes for hashes: running digest + 8 */
3035 #define HASH_MSG_LEN 8
3036 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3037
3038 enum hash_optype {
3039 UPDATE = 0,
3040 UPDATE_FIRST,
3041 FINALIZE,
3042 DIGEST,
3043 HASH_NUM_OP
3044 };
3045
3046 /**
3047 * struct caam_hash_ctx - ahash per-session context
3048 * @flc: Flow Contexts array
3049 * @key: authentication key
3050 * @flc_dma: I/O virtual addresses of the Flow Contexts
3051 * @dev: dpseci device
3052 * @ctx_len: size of Context Register
3053 * @adata: hashing algorithm details
3054 */
3055 struct caam_hash_ctx {
3056 struct caam_flc flc[HASH_NUM_OP];
3057 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3058 dma_addr_t flc_dma[HASH_NUM_OP];
3059 struct device *dev;
3060 int ctx_len;
3061 struct alginfo adata;
3062 };
3063
3064 /* ahash state */
3065 struct caam_hash_state {
3066 struct caam_request caam_req;
3067 dma_addr_t buf_dma;
3068 dma_addr_t ctx_dma;
3069 int ctx_dma_len;
3070 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3071 int buflen;
3072 int next_buflen;
3073 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3074 int (*update)(struct ahash_request *req);
3075 int (*final)(struct ahash_request *req);
3076 int (*finup)(struct ahash_request *req);
3077 };
3078
3079 struct caam_export_state {
3080 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3081 u8 caam_ctx[MAX_CTX_LEN];
3082 int buflen;
3083 int (*update)(struct ahash_request *req);
3084 int (*final)(struct ahash_request *req);
3085 int (*finup)(struct ahash_request *req);
3086 };
3087
3088 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device * dev,struct dpaa2_sg_entry * qm_sg,struct caam_hash_state * state)3089 static inline int buf_map_to_qm_sg(struct device *dev,
3090 struct dpaa2_sg_entry *qm_sg,
3091 struct caam_hash_state *state)
3092 {
3093 int buflen = state->buflen;
3094
3095 if (!buflen)
3096 return 0;
3097
3098 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3099 DMA_TO_DEVICE);
3100 if (dma_mapping_error(dev, state->buf_dma)) {
3101 dev_err(dev, "unable to map buf\n");
3102 state->buf_dma = 0;
3103 return -ENOMEM;
3104 }
3105
3106 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3107
3108 return 0;
3109 }
3110
3111 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device * dev,struct caam_hash_state * state,int ctx_len,struct dpaa2_sg_entry * qm_sg,u32 flag)3112 static inline int ctx_map_to_qm_sg(struct device *dev,
3113 struct caam_hash_state *state, int ctx_len,
3114 struct dpaa2_sg_entry *qm_sg, u32 flag)
3115 {
3116 state->ctx_dma_len = ctx_len;
3117 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3118 if (dma_mapping_error(dev, state->ctx_dma)) {
3119 dev_err(dev, "unable to map ctx\n");
3120 state->ctx_dma = 0;
3121 return -ENOMEM;
3122 }
3123
3124 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3125
3126 return 0;
3127 }
3128
ahash_set_sh_desc(struct crypto_ahash * ahash)3129 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3130 {
3131 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3132 int digestsize = crypto_ahash_digestsize(ahash);
3133 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3134 struct caam_flc *flc;
3135 u32 *desc;
3136
3137 /* ahash_update shared descriptor */
3138 flc = &ctx->flc[UPDATE];
3139 desc = flc->sh_desc;
3140 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3141 ctx->ctx_len, true, priv->sec_attr.era);
3142 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3143 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3144 desc_bytes(desc), DMA_BIDIRECTIONAL);
3145 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3146 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3147 1);
3148
3149 /* ahash_update_first shared descriptor */
3150 flc = &ctx->flc[UPDATE_FIRST];
3151 desc = flc->sh_desc;
3152 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3153 ctx->ctx_len, false, priv->sec_attr.era);
3154 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3155 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3156 desc_bytes(desc), DMA_BIDIRECTIONAL);
3157 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3158 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3159 1);
3160
3161 /* ahash_final shared descriptor */
3162 flc = &ctx->flc[FINALIZE];
3163 desc = flc->sh_desc;
3164 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3165 ctx->ctx_len, true, priv->sec_attr.era);
3166 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3167 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3168 desc_bytes(desc), DMA_BIDIRECTIONAL);
3169 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3170 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3171 1);
3172
3173 /* ahash_digest shared descriptor */
3174 flc = &ctx->flc[DIGEST];
3175 desc = flc->sh_desc;
3176 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3177 ctx->ctx_len, false, priv->sec_attr.era);
3178 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3179 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3180 desc_bytes(desc), DMA_BIDIRECTIONAL);
3181 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3182 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3183 1);
3184
3185 return 0;
3186 }
3187
3188 struct split_key_sh_result {
3189 struct completion completion;
3190 int err;
3191 struct device *dev;
3192 };
3193
split_key_sh_done(void * cbk_ctx,u32 err)3194 static void split_key_sh_done(void *cbk_ctx, u32 err)
3195 {
3196 struct split_key_sh_result *res = cbk_ctx;
3197
3198 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3199
3200 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3201 complete(&res->completion);
3202 }
3203
3204 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)3205 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3206 u32 digestsize)
3207 {
3208 struct caam_request *req_ctx;
3209 u32 *desc;
3210 struct split_key_sh_result result;
3211 dma_addr_t key_dma;
3212 struct caam_flc *flc;
3213 dma_addr_t flc_dma;
3214 int ret = -ENOMEM;
3215 struct dpaa2_fl_entry *in_fle, *out_fle;
3216
3217 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3218 if (!req_ctx)
3219 return -ENOMEM;
3220
3221 in_fle = &req_ctx->fd_flt[1];
3222 out_fle = &req_ctx->fd_flt[0];
3223
3224 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3225 if (!flc)
3226 goto err_flc;
3227
3228 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3229 if (dma_mapping_error(ctx->dev, key_dma)) {
3230 dev_err(ctx->dev, "unable to map key memory\n");
3231 goto err_key_dma;
3232 }
3233
3234 desc = flc->sh_desc;
3235
3236 init_sh_desc(desc, 0);
3237
3238 /* descriptor to perform unkeyed hash on key_in */
3239 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3240 OP_ALG_AS_INITFINAL);
3241 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3242 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3243 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3244 LDST_SRCDST_BYTE_CONTEXT);
3245
3246 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3247 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3248 desc_bytes(desc), DMA_TO_DEVICE);
3249 if (dma_mapping_error(ctx->dev, flc_dma)) {
3250 dev_err(ctx->dev, "unable to map shared descriptor\n");
3251 goto err_flc_dma;
3252 }
3253
3254 dpaa2_fl_set_final(in_fle, true);
3255 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3256 dpaa2_fl_set_addr(in_fle, key_dma);
3257 dpaa2_fl_set_len(in_fle, *keylen);
3258 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3259 dpaa2_fl_set_addr(out_fle, key_dma);
3260 dpaa2_fl_set_len(out_fle, digestsize);
3261
3262 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3263 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3264 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3265 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3266 1);
3267
3268 result.err = 0;
3269 init_completion(&result.completion);
3270 result.dev = ctx->dev;
3271
3272 req_ctx->flc = flc;
3273 req_ctx->flc_dma = flc_dma;
3274 req_ctx->cbk = split_key_sh_done;
3275 req_ctx->ctx = &result;
3276
3277 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3278 if (ret == -EINPROGRESS) {
3279 /* in progress */
3280 wait_for_completion(&result.completion);
3281 ret = result.err;
3282 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3283 DUMP_PREFIX_ADDRESS, 16, 4, key,
3284 digestsize, 1);
3285 }
3286
3287 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3288 DMA_TO_DEVICE);
3289 err_flc_dma:
3290 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3291 err_key_dma:
3292 kfree(flc);
3293 err_flc:
3294 kfree(req_ctx);
3295
3296 *keylen = digestsize;
3297
3298 return ret;
3299 }
3300
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)3301 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3302 unsigned int keylen)
3303 {
3304 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3305 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3306 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3307 int ret;
3308 u8 *hashed_key = NULL;
3309
3310 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3311
3312 if (keylen > blocksize) {
3313 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3314 if (!hashed_key)
3315 return -ENOMEM;
3316 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3317 if (ret)
3318 goto bad_free_key;
3319 key = hashed_key;
3320 }
3321
3322 ctx->adata.keylen = keylen;
3323 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3324 OP_ALG_ALGSEL_MASK);
3325 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3326 goto bad_free_key;
3327
3328 ctx->adata.key_virt = key;
3329 ctx->adata.key_inline = true;
3330
3331 /*
3332 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3333 * in invalid opcodes (last bytes of user key) in the resulting
3334 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3335 * addresses are needed.
3336 */
3337 if (keylen > ctx->adata.keylen_pad) {
3338 memcpy(ctx->key, key, keylen);
3339 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3340 ctx->adata.keylen_pad,
3341 DMA_TO_DEVICE);
3342 }
3343
3344 ret = ahash_set_sh_desc(ahash);
3345 kfree(hashed_key);
3346 return ret;
3347 bad_free_key:
3348 kfree(hashed_key);
3349 return -EINVAL;
3350 }
3351
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req)3352 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3353 struct ahash_request *req)
3354 {
3355 struct caam_hash_state *state = ahash_request_ctx(req);
3356
3357 if (edesc->src_nents)
3358 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3359
3360 if (edesc->qm_sg_bytes)
3361 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3362 DMA_TO_DEVICE);
3363
3364 if (state->buf_dma) {
3365 dma_unmap_single(dev, state->buf_dma, state->buflen,
3366 DMA_TO_DEVICE);
3367 state->buf_dma = 0;
3368 }
3369 }
3370
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,u32 flag)3371 static inline void ahash_unmap_ctx(struct device *dev,
3372 struct ahash_edesc *edesc,
3373 struct ahash_request *req, u32 flag)
3374 {
3375 struct caam_hash_state *state = ahash_request_ctx(req);
3376
3377 if (state->ctx_dma) {
3378 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3379 state->ctx_dma = 0;
3380 }
3381 ahash_unmap(dev, edesc, req);
3382 }
3383
ahash_done(void * cbk_ctx,u32 status)3384 static void ahash_done(void *cbk_ctx, u32 status)
3385 {
3386 struct crypto_async_request *areq = cbk_ctx;
3387 struct ahash_request *req = ahash_request_cast(areq);
3388 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3389 struct caam_hash_state *state = ahash_request_ctx(req);
3390 struct ahash_edesc *edesc = state->caam_req.edesc;
3391 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3392 int digestsize = crypto_ahash_digestsize(ahash);
3393 int ecode = 0;
3394
3395 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3396
3397 if (unlikely(status))
3398 ecode = caam_qi2_strstatus(ctx->dev, status);
3399
3400 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3401 memcpy(req->result, state->caam_ctx, digestsize);
3402 qi_cache_free(edesc);
3403
3404 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3405 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3406 ctx->ctx_len, 1);
3407
3408 req->base.complete(&req->base, ecode);
3409 }
3410
ahash_done_bi(void * cbk_ctx,u32 status)3411 static void ahash_done_bi(void *cbk_ctx, u32 status)
3412 {
3413 struct crypto_async_request *areq = cbk_ctx;
3414 struct ahash_request *req = ahash_request_cast(areq);
3415 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3416 struct caam_hash_state *state = ahash_request_ctx(req);
3417 struct ahash_edesc *edesc = state->caam_req.edesc;
3418 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3419 int ecode = 0;
3420
3421 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3422
3423 if (unlikely(status))
3424 ecode = caam_qi2_strstatus(ctx->dev, status);
3425
3426 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3427 qi_cache_free(edesc);
3428
3429 scatterwalk_map_and_copy(state->buf, req->src,
3430 req->nbytes - state->next_buflen,
3431 state->next_buflen, 0);
3432 state->buflen = state->next_buflen;
3433
3434 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3435 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3436 state->buflen, 1);
3437
3438 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3439 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3440 ctx->ctx_len, 1);
3441 if (req->result)
3442 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3443 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3444 crypto_ahash_digestsize(ahash), 1);
3445
3446 req->base.complete(&req->base, ecode);
3447 }
3448
ahash_done_ctx_src(void * cbk_ctx,u32 status)3449 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3450 {
3451 struct crypto_async_request *areq = cbk_ctx;
3452 struct ahash_request *req = ahash_request_cast(areq);
3453 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3454 struct caam_hash_state *state = ahash_request_ctx(req);
3455 struct ahash_edesc *edesc = state->caam_req.edesc;
3456 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3457 int digestsize = crypto_ahash_digestsize(ahash);
3458 int ecode = 0;
3459
3460 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3461
3462 if (unlikely(status))
3463 ecode = caam_qi2_strstatus(ctx->dev, status);
3464
3465 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3466 memcpy(req->result, state->caam_ctx, digestsize);
3467 qi_cache_free(edesc);
3468
3469 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3470 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3471 ctx->ctx_len, 1);
3472
3473 req->base.complete(&req->base, ecode);
3474 }
3475
ahash_done_ctx_dst(void * cbk_ctx,u32 status)3476 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3477 {
3478 struct crypto_async_request *areq = cbk_ctx;
3479 struct ahash_request *req = ahash_request_cast(areq);
3480 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3481 struct caam_hash_state *state = ahash_request_ctx(req);
3482 struct ahash_edesc *edesc = state->caam_req.edesc;
3483 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3484 int ecode = 0;
3485
3486 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3487
3488 if (unlikely(status))
3489 ecode = caam_qi2_strstatus(ctx->dev, status);
3490
3491 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3492 qi_cache_free(edesc);
3493
3494 scatterwalk_map_and_copy(state->buf, req->src,
3495 req->nbytes - state->next_buflen,
3496 state->next_buflen, 0);
3497 state->buflen = state->next_buflen;
3498
3499 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3500 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3501 state->buflen, 1);
3502
3503 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3504 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3505 ctx->ctx_len, 1);
3506 if (req->result)
3507 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3508 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3509 crypto_ahash_digestsize(ahash), 1);
3510
3511 req->base.complete(&req->base, ecode);
3512 }
3513
ahash_update_ctx(struct ahash_request * req)3514 static int ahash_update_ctx(struct ahash_request *req)
3515 {
3516 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3518 struct caam_hash_state *state = ahash_request_ctx(req);
3519 struct caam_request *req_ctx = &state->caam_req;
3520 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3521 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3522 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3523 GFP_KERNEL : GFP_ATOMIC;
3524 u8 *buf = state->buf;
3525 int *buflen = &state->buflen;
3526 int *next_buflen = &state->next_buflen;
3527 int in_len = *buflen + req->nbytes, to_hash;
3528 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3529 struct ahash_edesc *edesc;
3530 int ret = 0;
3531
3532 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3533 to_hash = in_len - *next_buflen;
3534
3535 if (to_hash) {
3536 struct dpaa2_sg_entry *sg_table;
3537 int src_len = req->nbytes - *next_buflen;
3538
3539 src_nents = sg_nents_for_len(req->src, src_len);
3540 if (src_nents < 0) {
3541 dev_err(ctx->dev, "Invalid number of src SG.\n");
3542 return src_nents;
3543 }
3544
3545 if (src_nents) {
3546 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3547 DMA_TO_DEVICE);
3548 if (!mapped_nents) {
3549 dev_err(ctx->dev, "unable to DMA map source\n");
3550 return -ENOMEM;
3551 }
3552 } else {
3553 mapped_nents = 0;
3554 }
3555
3556 /* allocate space for base edesc and link tables */
3557 edesc = qi_cache_zalloc(GFP_DMA | flags);
3558 if (!edesc) {
3559 dma_unmap_sg(ctx->dev, req->src, src_nents,
3560 DMA_TO_DEVICE);
3561 return -ENOMEM;
3562 }
3563
3564 edesc->src_nents = src_nents;
3565 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3566 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3567 sizeof(*sg_table);
3568 sg_table = &edesc->sgt[0];
3569
3570 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3571 DMA_BIDIRECTIONAL);
3572 if (ret)
3573 goto unmap_ctx;
3574
3575 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3576 if (ret)
3577 goto unmap_ctx;
3578
3579 if (mapped_nents) {
3580 sg_to_qm_sg_last(req->src, src_len,
3581 sg_table + qm_sg_src_index, 0);
3582 } else {
3583 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3584 true);
3585 }
3586
3587 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3588 qm_sg_bytes, DMA_TO_DEVICE);
3589 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3590 dev_err(ctx->dev, "unable to map S/G table\n");
3591 ret = -ENOMEM;
3592 goto unmap_ctx;
3593 }
3594 edesc->qm_sg_bytes = qm_sg_bytes;
3595
3596 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3597 dpaa2_fl_set_final(in_fle, true);
3598 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3599 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3600 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3601 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3602 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3603 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3604
3605 req_ctx->flc = &ctx->flc[UPDATE];
3606 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3607 req_ctx->cbk = ahash_done_bi;
3608 req_ctx->ctx = &req->base;
3609 req_ctx->edesc = edesc;
3610
3611 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3612 if (ret != -EINPROGRESS &&
3613 !(ret == -EBUSY &&
3614 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3615 goto unmap_ctx;
3616 } else if (*next_buflen) {
3617 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3618 req->nbytes, 0);
3619 *buflen = *next_buflen;
3620
3621 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3622 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3623 *buflen, 1);
3624 }
3625
3626 return ret;
3627 unmap_ctx:
3628 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3629 qi_cache_free(edesc);
3630 return ret;
3631 }
3632
ahash_final_ctx(struct ahash_request * req)3633 static int ahash_final_ctx(struct ahash_request *req)
3634 {
3635 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3636 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3637 struct caam_hash_state *state = ahash_request_ctx(req);
3638 struct caam_request *req_ctx = &state->caam_req;
3639 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3640 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3641 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3642 GFP_KERNEL : GFP_ATOMIC;
3643 int buflen = state->buflen;
3644 int qm_sg_bytes;
3645 int digestsize = crypto_ahash_digestsize(ahash);
3646 struct ahash_edesc *edesc;
3647 struct dpaa2_sg_entry *sg_table;
3648 int ret;
3649
3650 /* allocate space for base edesc and link tables */
3651 edesc = qi_cache_zalloc(GFP_DMA | flags);
3652 if (!edesc)
3653 return -ENOMEM;
3654
3655 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3656 sg_table = &edesc->sgt[0];
3657
3658 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3659 DMA_BIDIRECTIONAL);
3660 if (ret)
3661 goto unmap_ctx;
3662
3663 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3664 if (ret)
3665 goto unmap_ctx;
3666
3667 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3668
3669 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3670 DMA_TO_DEVICE);
3671 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3672 dev_err(ctx->dev, "unable to map S/G table\n");
3673 ret = -ENOMEM;
3674 goto unmap_ctx;
3675 }
3676 edesc->qm_sg_bytes = qm_sg_bytes;
3677
3678 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3679 dpaa2_fl_set_final(in_fle, true);
3680 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3681 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3682 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3683 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3684 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3685 dpaa2_fl_set_len(out_fle, digestsize);
3686
3687 req_ctx->flc = &ctx->flc[FINALIZE];
3688 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3689 req_ctx->cbk = ahash_done_ctx_src;
3690 req_ctx->ctx = &req->base;
3691 req_ctx->edesc = edesc;
3692
3693 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3694 if (ret == -EINPROGRESS ||
3695 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3696 return ret;
3697
3698 unmap_ctx:
3699 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3700 qi_cache_free(edesc);
3701 return ret;
3702 }
3703
ahash_finup_ctx(struct ahash_request * req)3704 static int ahash_finup_ctx(struct ahash_request *req)
3705 {
3706 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3707 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3708 struct caam_hash_state *state = ahash_request_ctx(req);
3709 struct caam_request *req_ctx = &state->caam_req;
3710 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3711 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3712 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3713 GFP_KERNEL : GFP_ATOMIC;
3714 int buflen = state->buflen;
3715 int qm_sg_bytes, qm_sg_src_index;
3716 int src_nents, mapped_nents;
3717 int digestsize = crypto_ahash_digestsize(ahash);
3718 struct ahash_edesc *edesc;
3719 struct dpaa2_sg_entry *sg_table;
3720 int ret;
3721
3722 src_nents = sg_nents_for_len(req->src, req->nbytes);
3723 if (src_nents < 0) {
3724 dev_err(ctx->dev, "Invalid number of src SG.\n");
3725 return src_nents;
3726 }
3727
3728 if (src_nents) {
3729 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3730 DMA_TO_DEVICE);
3731 if (!mapped_nents) {
3732 dev_err(ctx->dev, "unable to DMA map source\n");
3733 return -ENOMEM;
3734 }
3735 } else {
3736 mapped_nents = 0;
3737 }
3738
3739 /* allocate space for base edesc and link tables */
3740 edesc = qi_cache_zalloc(GFP_DMA | flags);
3741 if (!edesc) {
3742 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3743 return -ENOMEM;
3744 }
3745
3746 edesc->src_nents = src_nents;
3747 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3748 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3749 sizeof(*sg_table);
3750 sg_table = &edesc->sgt[0];
3751
3752 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3753 DMA_BIDIRECTIONAL);
3754 if (ret)
3755 goto unmap_ctx;
3756
3757 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3758 if (ret)
3759 goto unmap_ctx;
3760
3761 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3762
3763 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3764 DMA_TO_DEVICE);
3765 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3766 dev_err(ctx->dev, "unable to map S/G table\n");
3767 ret = -ENOMEM;
3768 goto unmap_ctx;
3769 }
3770 edesc->qm_sg_bytes = qm_sg_bytes;
3771
3772 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3773 dpaa2_fl_set_final(in_fle, true);
3774 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3775 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3776 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3777 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3778 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3779 dpaa2_fl_set_len(out_fle, digestsize);
3780
3781 req_ctx->flc = &ctx->flc[FINALIZE];
3782 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3783 req_ctx->cbk = ahash_done_ctx_src;
3784 req_ctx->ctx = &req->base;
3785 req_ctx->edesc = edesc;
3786
3787 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3788 if (ret == -EINPROGRESS ||
3789 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3790 return ret;
3791
3792 unmap_ctx:
3793 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3794 qi_cache_free(edesc);
3795 return ret;
3796 }
3797
ahash_digest(struct ahash_request * req)3798 static int ahash_digest(struct ahash_request *req)
3799 {
3800 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3801 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3802 struct caam_hash_state *state = ahash_request_ctx(req);
3803 struct caam_request *req_ctx = &state->caam_req;
3804 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3805 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3806 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3807 GFP_KERNEL : GFP_ATOMIC;
3808 int digestsize = crypto_ahash_digestsize(ahash);
3809 int src_nents, mapped_nents;
3810 struct ahash_edesc *edesc;
3811 int ret = -ENOMEM;
3812
3813 state->buf_dma = 0;
3814
3815 src_nents = sg_nents_for_len(req->src, req->nbytes);
3816 if (src_nents < 0) {
3817 dev_err(ctx->dev, "Invalid number of src SG.\n");
3818 return src_nents;
3819 }
3820
3821 if (src_nents) {
3822 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3823 DMA_TO_DEVICE);
3824 if (!mapped_nents) {
3825 dev_err(ctx->dev, "unable to map source for DMA\n");
3826 return ret;
3827 }
3828 } else {
3829 mapped_nents = 0;
3830 }
3831
3832 /* allocate space for base edesc and link tables */
3833 edesc = qi_cache_zalloc(GFP_DMA | flags);
3834 if (!edesc) {
3835 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3836 return ret;
3837 }
3838
3839 edesc->src_nents = src_nents;
3840 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3841
3842 if (mapped_nents > 1) {
3843 int qm_sg_bytes;
3844 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3845
3846 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3847 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3848 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3849 qm_sg_bytes, DMA_TO_DEVICE);
3850 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3851 dev_err(ctx->dev, "unable to map S/G table\n");
3852 goto unmap;
3853 }
3854 edesc->qm_sg_bytes = qm_sg_bytes;
3855 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3856 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3857 } else {
3858 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3859 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3860 }
3861
3862 state->ctx_dma_len = digestsize;
3863 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3864 DMA_FROM_DEVICE);
3865 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3866 dev_err(ctx->dev, "unable to map ctx\n");
3867 state->ctx_dma = 0;
3868 goto unmap;
3869 }
3870
3871 dpaa2_fl_set_final(in_fle, true);
3872 dpaa2_fl_set_len(in_fle, req->nbytes);
3873 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3874 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3875 dpaa2_fl_set_len(out_fle, digestsize);
3876
3877 req_ctx->flc = &ctx->flc[DIGEST];
3878 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3879 req_ctx->cbk = ahash_done;
3880 req_ctx->ctx = &req->base;
3881 req_ctx->edesc = edesc;
3882 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3883 if (ret == -EINPROGRESS ||
3884 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3885 return ret;
3886
3887 unmap:
3888 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3889 qi_cache_free(edesc);
3890 return ret;
3891 }
3892
ahash_final_no_ctx(struct ahash_request * req)3893 static int ahash_final_no_ctx(struct ahash_request *req)
3894 {
3895 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3896 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3897 struct caam_hash_state *state = ahash_request_ctx(req);
3898 struct caam_request *req_ctx = &state->caam_req;
3899 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3900 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3901 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3902 GFP_KERNEL : GFP_ATOMIC;
3903 u8 *buf = state->buf;
3904 int buflen = state->buflen;
3905 int digestsize = crypto_ahash_digestsize(ahash);
3906 struct ahash_edesc *edesc;
3907 int ret = -ENOMEM;
3908
3909 /* allocate space for base edesc and link tables */
3910 edesc = qi_cache_zalloc(GFP_DMA | flags);
3911 if (!edesc)
3912 return ret;
3913
3914 if (buflen) {
3915 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3916 DMA_TO_DEVICE);
3917 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3918 dev_err(ctx->dev, "unable to map src\n");
3919 goto unmap;
3920 }
3921 }
3922
3923 state->ctx_dma_len = digestsize;
3924 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3925 DMA_FROM_DEVICE);
3926 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3927 dev_err(ctx->dev, "unable to map ctx\n");
3928 state->ctx_dma = 0;
3929 goto unmap;
3930 }
3931
3932 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3933 dpaa2_fl_set_final(in_fle, true);
3934 /*
3935 * crypto engine requires the input entry to be present when
3936 * "frame list" FD is used.
3937 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3938 * in_fle zeroized (except for "Final" flag) is the best option.
3939 */
3940 if (buflen) {
3941 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3942 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3943 dpaa2_fl_set_len(in_fle, buflen);
3944 }
3945 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3946 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3947 dpaa2_fl_set_len(out_fle, digestsize);
3948
3949 req_ctx->flc = &ctx->flc[DIGEST];
3950 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3951 req_ctx->cbk = ahash_done;
3952 req_ctx->ctx = &req->base;
3953 req_ctx->edesc = edesc;
3954
3955 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3956 if (ret == -EINPROGRESS ||
3957 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3958 return ret;
3959
3960 unmap:
3961 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3962 qi_cache_free(edesc);
3963 return ret;
3964 }
3965
ahash_update_no_ctx(struct ahash_request * req)3966 static int ahash_update_no_ctx(struct ahash_request *req)
3967 {
3968 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3969 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3970 struct caam_hash_state *state = ahash_request_ctx(req);
3971 struct caam_request *req_ctx = &state->caam_req;
3972 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3973 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3974 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3975 GFP_KERNEL : GFP_ATOMIC;
3976 u8 *buf = state->buf;
3977 int *buflen = &state->buflen;
3978 int *next_buflen = &state->next_buflen;
3979 int in_len = *buflen + req->nbytes, to_hash;
3980 int qm_sg_bytes, src_nents, mapped_nents;
3981 struct ahash_edesc *edesc;
3982 int ret = 0;
3983
3984 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3985 to_hash = in_len - *next_buflen;
3986
3987 if (to_hash) {
3988 struct dpaa2_sg_entry *sg_table;
3989 int src_len = req->nbytes - *next_buflen;
3990
3991 src_nents = sg_nents_for_len(req->src, src_len);
3992 if (src_nents < 0) {
3993 dev_err(ctx->dev, "Invalid number of src SG.\n");
3994 return src_nents;
3995 }
3996
3997 if (src_nents) {
3998 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3999 DMA_TO_DEVICE);
4000 if (!mapped_nents) {
4001 dev_err(ctx->dev, "unable to DMA map source\n");
4002 return -ENOMEM;
4003 }
4004 } else {
4005 mapped_nents = 0;
4006 }
4007
4008 /* allocate space for base edesc and link tables */
4009 edesc = qi_cache_zalloc(GFP_DMA | flags);
4010 if (!edesc) {
4011 dma_unmap_sg(ctx->dev, req->src, src_nents,
4012 DMA_TO_DEVICE);
4013 return -ENOMEM;
4014 }
4015
4016 edesc->src_nents = src_nents;
4017 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4018 sizeof(*sg_table);
4019 sg_table = &edesc->sgt[0];
4020
4021 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4022 if (ret)
4023 goto unmap_ctx;
4024
4025 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4026
4027 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4028 qm_sg_bytes, DMA_TO_DEVICE);
4029 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4030 dev_err(ctx->dev, "unable to map S/G table\n");
4031 ret = -ENOMEM;
4032 goto unmap_ctx;
4033 }
4034 edesc->qm_sg_bytes = qm_sg_bytes;
4035
4036 state->ctx_dma_len = ctx->ctx_len;
4037 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4038 ctx->ctx_len, DMA_FROM_DEVICE);
4039 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4040 dev_err(ctx->dev, "unable to map ctx\n");
4041 state->ctx_dma = 0;
4042 ret = -ENOMEM;
4043 goto unmap_ctx;
4044 }
4045
4046 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4047 dpaa2_fl_set_final(in_fle, true);
4048 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4049 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4050 dpaa2_fl_set_len(in_fle, to_hash);
4051 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4052 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4053 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4054
4055 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4056 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4057 req_ctx->cbk = ahash_done_ctx_dst;
4058 req_ctx->ctx = &req->base;
4059 req_ctx->edesc = edesc;
4060
4061 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4062 if (ret != -EINPROGRESS &&
4063 !(ret == -EBUSY &&
4064 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4065 goto unmap_ctx;
4066
4067 state->update = ahash_update_ctx;
4068 state->finup = ahash_finup_ctx;
4069 state->final = ahash_final_ctx;
4070 } else if (*next_buflen) {
4071 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4072 req->nbytes, 0);
4073 *buflen = *next_buflen;
4074
4075 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4076 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4077 *buflen, 1);
4078 }
4079
4080 return ret;
4081 unmap_ctx:
4082 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4083 qi_cache_free(edesc);
4084 return ret;
4085 }
4086
ahash_finup_no_ctx(struct ahash_request * req)4087 static int ahash_finup_no_ctx(struct ahash_request *req)
4088 {
4089 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4090 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4091 struct caam_hash_state *state = ahash_request_ctx(req);
4092 struct caam_request *req_ctx = &state->caam_req;
4093 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4094 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4095 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4096 GFP_KERNEL : GFP_ATOMIC;
4097 int buflen = state->buflen;
4098 int qm_sg_bytes, src_nents, mapped_nents;
4099 int digestsize = crypto_ahash_digestsize(ahash);
4100 struct ahash_edesc *edesc;
4101 struct dpaa2_sg_entry *sg_table;
4102 int ret = -ENOMEM;
4103
4104 src_nents = sg_nents_for_len(req->src, req->nbytes);
4105 if (src_nents < 0) {
4106 dev_err(ctx->dev, "Invalid number of src SG.\n");
4107 return src_nents;
4108 }
4109
4110 if (src_nents) {
4111 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4112 DMA_TO_DEVICE);
4113 if (!mapped_nents) {
4114 dev_err(ctx->dev, "unable to DMA map source\n");
4115 return ret;
4116 }
4117 } else {
4118 mapped_nents = 0;
4119 }
4120
4121 /* allocate space for base edesc and link tables */
4122 edesc = qi_cache_zalloc(GFP_DMA | flags);
4123 if (!edesc) {
4124 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4125 return ret;
4126 }
4127
4128 edesc->src_nents = src_nents;
4129 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4130 sg_table = &edesc->sgt[0];
4131
4132 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4133 if (ret)
4134 goto unmap;
4135
4136 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4137
4138 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4139 DMA_TO_DEVICE);
4140 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4141 dev_err(ctx->dev, "unable to map S/G table\n");
4142 ret = -ENOMEM;
4143 goto unmap;
4144 }
4145 edesc->qm_sg_bytes = qm_sg_bytes;
4146
4147 state->ctx_dma_len = digestsize;
4148 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4149 DMA_FROM_DEVICE);
4150 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4151 dev_err(ctx->dev, "unable to map ctx\n");
4152 state->ctx_dma = 0;
4153 ret = -ENOMEM;
4154 goto unmap;
4155 }
4156
4157 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4158 dpaa2_fl_set_final(in_fle, true);
4159 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4160 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4161 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4162 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4163 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4164 dpaa2_fl_set_len(out_fle, digestsize);
4165
4166 req_ctx->flc = &ctx->flc[DIGEST];
4167 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4168 req_ctx->cbk = ahash_done;
4169 req_ctx->ctx = &req->base;
4170 req_ctx->edesc = edesc;
4171 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4172 if (ret != -EINPROGRESS &&
4173 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4174 goto unmap;
4175
4176 return ret;
4177 unmap:
4178 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4179 qi_cache_free(edesc);
4180 return ret;
4181 }
4182
ahash_update_first(struct ahash_request * req)4183 static int ahash_update_first(struct ahash_request *req)
4184 {
4185 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4186 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
4187 struct caam_hash_state *state = ahash_request_ctx(req);
4188 struct caam_request *req_ctx = &state->caam_req;
4189 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4190 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4191 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4192 GFP_KERNEL : GFP_ATOMIC;
4193 u8 *buf = state->buf;
4194 int *buflen = &state->buflen;
4195 int *next_buflen = &state->next_buflen;
4196 int to_hash;
4197 int src_nents, mapped_nents;
4198 struct ahash_edesc *edesc;
4199 int ret = 0;
4200
4201 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4202 1);
4203 to_hash = req->nbytes - *next_buflen;
4204
4205 if (to_hash) {
4206 struct dpaa2_sg_entry *sg_table;
4207 int src_len = req->nbytes - *next_buflen;
4208
4209 src_nents = sg_nents_for_len(req->src, src_len);
4210 if (src_nents < 0) {
4211 dev_err(ctx->dev, "Invalid number of src SG.\n");
4212 return src_nents;
4213 }
4214
4215 if (src_nents) {
4216 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4217 DMA_TO_DEVICE);
4218 if (!mapped_nents) {
4219 dev_err(ctx->dev, "unable to map source for DMA\n");
4220 return -ENOMEM;
4221 }
4222 } else {
4223 mapped_nents = 0;
4224 }
4225
4226 /* allocate space for base edesc and link tables */
4227 edesc = qi_cache_zalloc(GFP_DMA | flags);
4228 if (!edesc) {
4229 dma_unmap_sg(ctx->dev, req->src, src_nents,
4230 DMA_TO_DEVICE);
4231 return -ENOMEM;
4232 }
4233
4234 edesc->src_nents = src_nents;
4235 sg_table = &edesc->sgt[0];
4236
4237 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4238 dpaa2_fl_set_final(in_fle, true);
4239 dpaa2_fl_set_len(in_fle, to_hash);
4240
4241 if (mapped_nents > 1) {
4242 int qm_sg_bytes;
4243
4244 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4245 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4246 sizeof(*sg_table);
4247 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4248 qm_sg_bytes,
4249 DMA_TO_DEVICE);
4250 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4251 dev_err(ctx->dev, "unable to map S/G table\n");
4252 ret = -ENOMEM;
4253 goto unmap_ctx;
4254 }
4255 edesc->qm_sg_bytes = qm_sg_bytes;
4256 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4257 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4258 } else {
4259 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4260 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4261 }
4262
4263 state->ctx_dma_len = ctx->ctx_len;
4264 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4265 ctx->ctx_len, DMA_FROM_DEVICE);
4266 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4267 dev_err(ctx->dev, "unable to map ctx\n");
4268 state->ctx_dma = 0;
4269 ret = -ENOMEM;
4270 goto unmap_ctx;
4271 }
4272
4273 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4274 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4275 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4276
4277 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4278 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4279 req_ctx->cbk = ahash_done_ctx_dst;
4280 req_ctx->ctx = &req->base;
4281 req_ctx->edesc = edesc;
4282
4283 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4284 if (ret != -EINPROGRESS &&
4285 !(ret == -EBUSY && req->base.flags &
4286 CRYPTO_TFM_REQ_MAY_BACKLOG))
4287 goto unmap_ctx;
4288
4289 state->update = ahash_update_ctx;
4290 state->finup = ahash_finup_ctx;
4291 state->final = ahash_final_ctx;
4292 } else if (*next_buflen) {
4293 state->update = ahash_update_no_ctx;
4294 state->finup = ahash_finup_no_ctx;
4295 state->final = ahash_final_no_ctx;
4296 scatterwalk_map_and_copy(buf, req->src, 0,
4297 req->nbytes, 0);
4298 *buflen = *next_buflen;
4299
4300 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4301 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4302 *buflen, 1);
4303 }
4304
4305 return ret;
4306 unmap_ctx:
4307 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4308 qi_cache_free(edesc);
4309 return ret;
4310 }
4311
ahash_finup_first(struct ahash_request * req)4312 static int ahash_finup_first(struct ahash_request *req)
4313 {
4314 return ahash_digest(req);
4315 }
4316
ahash_init(struct ahash_request * req)4317 static int ahash_init(struct ahash_request *req)
4318 {
4319 struct caam_hash_state *state = ahash_request_ctx(req);
4320
4321 state->update = ahash_update_first;
4322 state->finup = ahash_finup_first;
4323 state->final = ahash_final_no_ctx;
4324
4325 state->ctx_dma = 0;
4326 state->ctx_dma_len = 0;
4327 state->buf_dma = 0;
4328 state->buflen = 0;
4329 state->next_buflen = 0;
4330
4331 return 0;
4332 }
4333
ahash_update(struct ahash_request * req)4334 static int ahash_update(struct ahash_request *req)
4335 {
4336 struct caam_hash_state *state = ahash_request_ctx(req);
4337
4338 return state->update(req);
4339 }
4340
ahash_finup(struct ahash_request * req)4341 static int ahash_finup(struct ahash_request *req)
4342 {
4343 struct caam_hash_state *state = ahash_request_ctx(req);
4344
4345 return state->finup(req);
4346 }
4347
ahash_final(struct ahash_request * req)4348 static int ahash_final(struct ahash_request *req)
4349 {
4350 struct caam_hash_state *state = ahash_request_ctx(req);
4351
4352 return state->final(req);
4353 }
4354
ahash_export(struct ahash_request * req,void * out)4355 static int ahash_export(struct ahash_request *req, void *out)
4356 {
4357 struct caam_hash_state *state = ahash_request_ctx(req);
4358 struct caam_export_state *export = out;
4359 u8 *buf = state->buf;
4360 int len = state->buflen;
4361
4362 memcpy(export->buf, buf, len);
4363 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4364 export->buflen = len;
4365 export->update = state->update;
4366 export->final = state->final;
4367 export->finup = state->finup;
4368
4369 return 0;
4370 }
4371
ahash_import(struct ahash_request * req,const void * in)4372 static int ahash_import(struct ahash_request *req, const void *in)
4373 {
4374 struct caam_hash_state *state = ahash_request_ctx(req);
4375 const struct caam_export_state *export = in;
4376
4377 memset(state, 0, sizeof(*state));
4378 memcpy(state->buf, export->buf, export->buflen);
4379 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4380 state->buflen = export->buflen;
4381 state->update = export->update;
4382 state->final = export->final;
4383 state->finup = export->finup;
4384
4385 return 0;
4386 }
4387
4388 struct caam_hash_template {
4389 char name[CRYPTO_MAX_ALG_NAME];
4390 char driver_name[CRYPTO_MAX_ALG_NAME];
4391 char hmac_name[CRYPTO_MAX_ALG_NAME];
4392 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4393 unsigned int blocksize;
4394 struct ahash_alg template_ahash;
4395 u32 alg_type;
4396 };
4397
4398 /* ahash descriptors */
4399 static struct caam_hash_template driver_hash[] = {
4400 {
4401 .name = "sha1",
4402 .driver_name = "sha1-caam-qi2",
4403 .hmac_name = "hmac(sha1)",
4404 .hmac_driver_name = "hmac-sha1-caam-qi2",
4405 .blocksize = SHA1_BLOCK_SIZE,
4406 .template_ahash = {
4407 .init = ahash_init,
4408 .update = ahash_update,
4409 .final = ahash_final,
4410 .finup = ahash_finup,
4411 .digest = ahash_digest,
4412 .export = ahash_export,
4413 .import = ahash_import,
4414 .setkey = ahash_setkey,
4415 .halg = {
4416 .digestsize = SHA1_DIGEST_SIZE,
4417 .statesize = sizeof(struct caam_export_state),
4418 },
4419 },
4420 .alg_type = OP_ALG_ALGSEL_SHA1,
4421 }, {
4422 .name = "sha224",
4423 .driver_name = "sha224-caam-qi2",
4424 .hmac_name = "hmac(sha224)",
4425 .hmac_driver_name = "hmac-sha224-caam-qi2",
4426 .blocksize = SHA224_BLOCK_SIZE,
4427 .template_ahash = {
4428 .init = ahash_init,
4429 .update = ahash_update,
4430 .final = ahash_final,
4431 .finup = ahash_finup,
4432 .digest = ahash_digest,
4433 .export = ahash_export,
4434 .import = ahash_import,
4435 .setkey = ahash_setkey,
4436 .halg = {
4437 .digestsize = SHA224_DIGEST_SIZE,
4438 .statesize = sizeof(struct caam_export_state),
4439 },
4440 },
4441 .alg_type = OP_ALG_ALGSEL_SHA224,
4442 }, {
4443 .name = "sha256",
4444 .driver_name = "sha256-caam-qi2",
4445 .hmac_name = "hmac(sha256)",
4446 .hmac_driver_name = "hmac-sha256-caam-qi2",
4447 .blocksize = SHA256_BLOCK_SIZE,
4448 .template_ahash = {
4449 .init = ahash_init,
4450 .update = ahash_update,
4451 .final = ahash_final,
4452 .finup = ahash_finup,
4453 .digest = ahash_digest,
4454 .export = ahash_export,
4455 .import = ahash_import,
4456 .setkey = ahash_setkey,
4457 .halg = {
4458 .digestsize = SHA256_DIGEST_SIZE,
4459 .statesize = sizeof(struct caam_export_state),
4460 },
4461 },
4462 .alg_type = OP_ALG_ALGSEL_SHA256,
4463 }, {
4464 .name = "sha384",
4465 .driver_name = "sha384-caam-qi2",
4466 .hmac_name = "hmac(sha384)",
4467 .hmac_driver_name = "hmac-sha384-caam-qi2",
4468 .blocksize = SHA384_BLOCK_SIZE,
4469 .template_ahash = {
4470 .init = ahash_init,
4471 .update = ahash_update,
4472 .final = ahash_final,
4473 .finup = ahash_finup,
4474 .digest = ahash_digest,
4475 .export = ahash_export,
4476 .import = ahash_import,
4477 .setkey = ahash_setkey,
4478 .halg = {
4479 .digestsize = SHA384_DIGEST_SIZE,
4480 .statesize = sizeof(struct caam_export_state),
4481 },
4482 },
4483 .alg_type = OP_ALG_ALGSEL_SHA384,
4484 }, {
4485 .name = "sha512",
4486 .driver_name = "sha512-caam-qi2",
4487 .hmac_name = "hmac(sha512)",
4488 .hmac_driver_name = "hmac-sha512-caam-qi2",
4489 .blocksize = SHA512_BLOCK_SIZE,
4490 .template_ahash = {
4491 .init = ahash_init,
4492 .update = ahash_update,
4493 .final = ahash_final,
4494 .finup = ahash_finup,
4495 .digest = ahash_digest,
4496 .export = ahash_export,
4497 .import = ahash_import,
4498 .setkey = ahash_setkey,
4499 .halg = {
4500 .digestsize = SHA512_DIGEST_SIZE,
4501 .statesize = sizeof(struct caam_export_state),
4502 },
4503 },
4504 .alg_type = OP_ALG_ALGSEL_SHA512,
4505 }, {
4506 .name = "md5",
4507 .driver_name = "md5-caam-qi2",
4508 .hmac_name = "hmac(md5)",
4509 .hmac_driver_name = "hmac-md5-caam-qi2",
4510 .blocksize = MD5_BLOCK_WORDS * 4,
4511 .template_ahash = {
4512 .init = ahash_init,
4513 .update = ahash_update,
4514 .final = ahash_final,
4515 .finup = ahash_finup,
4516 .digest = ahash_digest,
4517 .export = ahash_export,
4518 .import = ahash_import,
4519 .setkey = ahash_setkey,
4520 .halg = {
4521 .digestsize = MD5_DIGEST_SIZE,
4522 .statesize = sizeof(struct caam_export_state),
4523 },
4524 },
4525 .alg_type = OP_ALG_ALGSEL_MD5,
4526 }
4527 };
4528
4529 struct caam_hash_alg {
4530 struct list_head entry;
4531 struct device *dev;
4532 int alg_type;
4533 struct ahash_alg ahash_alg;
4534 };
4535
caam_hash_cra_init(struct crypto_tfm * tfm)4536 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4537 {
4538 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4539 struct crypto_alg *base = tfm->__crt_alg;
4540 struct hash_alg_common *halg =
4541 container_of(base, struct hash_alg_common, base);
4542 struct ahash_alg *alg =
4543 container_of(halg, struct ahash_alg, halg);
4544 struct caam_hash_alg *caam_hash =
4545 container_of(alg, struct caam_hash_alg, ahash_alg);
4546 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4547 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4548 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4549 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4550 HASH_MSG_LEN + 32,
4551 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4552 HASH_MSG_LEN + 64,
4553 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4554 dma_addr_t dma_addr;
4555 int i;
4556
4557 ctx->dev = caam_hash->dev;
4558
4559 if (alg->setkey) {
4560 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4561 ARRAY_SIZE(ctx->key),
4562 DMA_TO_DEVICE,
4563 DMA_ATTR_SKIP_CPU_SYNC);
4564 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4565 dev_err(ctx->dev, "unable to map key\n");
4566 return -ENOMEM;
4567 }
4568 }
4569
4570 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4571 DMA_BIDIRECTIONAL,
4572 DMA_ATTR_SKIP_CPU_SYNC);
4573 if (dma_mapping_error(ctx->dev, dma_addr)) {
4574 dev_err(ctx->dev, "unable to map shared descriptors\n");
4575 if (ctx->adata.key_dma)
4576 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4577 ARRAY_SIZE(ctx->key),
4578 DMA_TO_DEVICE,
4579 DMA_ATTR_SKIP_CPU_SYNC);
4580 return -ENOMEM;
4581 }
4582
4583 for (i = 0; i < HASH_NUM_OP; i++)
4584 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4585
4586 /* copy descriptor header template value */
4587 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4588
4589 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4590 OP_ALG_ALGSEL_SUBMASK) >>
4591 OP_ALG_ALGSEL_SHIFT];
4592
4593 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4594 sizeof(struct caam_hash_state));
4595
4596 /*
4597 * For keyed hash algorithms shared descriptors
4598 * will be created later in setkey() callback
4599 */
4600 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4601 }
4602
caam_hash_cra_exit(struct crypto_tfm * tfm)4603 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4604 {
4605 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
4606
4607 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4608 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4609 if (ctx->adata.key_dma)
4610 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4611 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4612 DMA_ATTR_SKIP_CPU_SYNC);
4613 }
4614
caam_hash_alloc(struct device * dev,struct caam_hash_template * template,bool keyed)4615 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4616 struct caam_hash_template *template, bool keyed)
4617 {
4618 struct caam_hash_alg *t_alg;
4619 struct ahash_alg *halg;
4620 struct crypto_alg *alg;
4621
4622 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4623 if (!t_alg)
4624 return ERR_PTR(-ENOMEM);
4625
4626 t_alg->ahash_alg = template->template_ahash;
4627 halg = &t_alg->ahash_alg;
4628 alg = &halg->halg.base;
4629
4630 if (keyed) {
4631 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4632 template->hmac_name);
4633 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4634 template->hmac_driver_name);
4635 } else {
4636 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4637 template->name);
4638 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4639 template->driver_name);
4640 t_alg->ahash_alg.setkey = NULL;
4641 }
4642 alg->cra_module = THIS_MODULE;
4643 alg->cra_init = caam_hash_cra_init;
4644 alg->cra_exit = caam_hash_cra_exit;
4645 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
4646 alg->cra_priority = CAAM_CRA_PRIORITY;
4647 alg->cra_blocksize = template->blocksize;
4648 alg->cra_alignmask = 0;
4649 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4650
4651 t_alg->alg_type = template->alg_type;
4652 t_alg->dev = dev;
4653
4654 return t_alg;
4655 }
4656
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)4657 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4658 {
4659 struct dpaa2_caam_priv_per_cpu *ppriv;
4660
4661 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4662 napi_schedule_irqoff(&ppriv->napi);
4663 }
4664
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv * priv)4665 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4666 {
4667 struct device *dev = priv->dev;
4668 struct dpaa2_io_notification_ctx *nctx;
4669 struct dpaa2_caam_priv_per_cpu *ppriv;
4670 int err, i = 0, cpu;
4671
4672 for_each_online_cpu(cpu) {
4673 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4674 ppriv->priv = priv;
4675 nctx = &ppriv->nctx;
4676 nctx->is_cdan = 0;
4677 nctx->id = ppriv->rsp_fqid;
4678 nctx->desired_cpu = cpu;
4679 nctx->cb = dpaa2_caam_fqdan_cb;
4680
4681 /* Register notification callbacks */
4682 ppriv->dpio = dpaa2_io_service_select(cpu);
4683 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4684 if (unlikely(err)) {
4685 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4686 nctx->cb = NULL;
4687 /*
4688 * If no affine DPIO for this core, there's probably
4689 * none available for next cores either. Signal we want
4690 * to retry later, in case the DPIO devices weren't
4691 * probed yet.
4692 */
4693 err = -EPROBE_DEFER;
4694 goto err;
4695 }
4696
4697 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4698 dev);
4699 if (unlikely(!ppriv->store)) {
4700 dev_err(dev, "dpaa2_io_store_create() failed\n");
4701 err = -ENOMEM;
4702 goto err;
4703 }
4704
4705 if (++i == priv->num_pairs)
4706 break;
4707 }
4708
4709 return 0;
4710
4711 err:
4712 for_each_online_cpu(cpu) {
4713 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4714 if (!ppriv->nctx.cb)
4715 break;
4716 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4717 }
4718
4719 for_each_online_cpu(cpu) {
4720 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4721 if (!ppriv->store)
4722 break;
4723 dpaa2_io_store_destroy(ppriv->store);
4724 }
4725
4726 return err;
4727 }
4728
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv * priv)4729 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4730 {
4731 struct dpaa2_caam_priv_per_cpu *ppriv;
4732 int i = 0, cpu;
4733
4734 for_each_online_cpu(cpu) {
4735 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4736 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4737 priv->dev);
4738 dpaa2_io_store_destroy(ppriv->store);
4739
4740 if (++i == priv->num_pairs)
4741 return;
4742 }
4743 }
4744
dpaa2_dpseci_bind(struct dpaa2_caam_priv * priv)4745 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4746 {
4747 struct dpseci_rx_queue_cfg rx_queue_cfg;
4748 struct device *dev = priv->dev;
4749 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4750 struct dpaa2_caam_priv_per_cpu *ppriv;
4751 int err = 0, i = 0, cpu;
4752
4753 /* Configure Rx queues */
4754 for_each_online_cpu(cpu) {
4755 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4756
4757 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4758 DPSECI_QUEUE_OPT_USER_CTX;
4759 rx_queue_cfg.order_preservation_en = 0;
4760 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4761 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4762 /*
4763 * Rx priority (WQ) doesn't really matter, since we use
4764 * pull mode, i.e. volatile dequeues from specific FQs
4765 */
4766 rx_queue_cfg.dest_cfg.priority = 0;
4767 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4768
4769 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4770 &rx_queue_cfg);
4771 if (err) {
4772 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4773 err);
4774 return err;
4775 }
4776
4777 if (++i == priv->num_pairs)
4778 break;
4779 }
4780
4781 return err;
4782 }
4783
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv * priv)4784 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4785 {
4786 struct device *dev = priv->dev;
4787
4788 if (!priv->cscn_mem)
4789 return;
4790
4791 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4792 kfree(priv->cscn_mem);
4793 }
4794
dpaa2_dpseci_free(struct dpaa2_caam_priv * priv)4795 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4796 {
4797 struct device *dev = priv->dev;
4798 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4799 int err;
4800
4801 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4802 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4803 if (err)
4804 dev_err(dev, "dpseci_reset() failed\n");
4805 }
4806
4807 dpaa2_dpseci_congestion_free(priv);
4808 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4809 }
4810
dpaa2_caam_process_fd(struct dpaa2_caam_priv * priv,const struct dpaa2_fd * fd)4811 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4812 const struct dpaa2_fd *fd)
4813 {
4814 struct caam_request *req;
4815 u32 fd_err;
4816
4817 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4818 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4819 return;
4820 }
4821
4822 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4823 if (unlikely(fd_err))
4824 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4825
4826 /*
4827 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4828 * in FD[ERR] or FD[FRC].
4829 */
4830 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4831 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4832 DMA_BIDIRECTIONAL);
4833 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4834 }
4835
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu * ppriv)4836 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4837 {
4838 int err;
4839
4840 /* Retry while portal is busy */
4841 do {
4842 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4843 ppriv->store);
4844 } while (err == -EBUSY);
4845
4846 if (unlikely(err))
4847 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4848
4849 return err;
4850 }
4851
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu * ppriv)4852 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4853 {
4854 struct dpaa2_dq *dq;
4855 int cleaned = 0, is_last;
4856
4857 do {
4858 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4859 if (unlikely(!dq)) {
4860 if (unlikely(!is_last)) {
4861 dev_dbg(ppriv->priv->dev,
4862 "FQ %d returned no valid frames\n",
4863 ppriv->rsp_fqid);
4864 /*
4865 * MUST retry until we get some sort of
4866 * valid response token (be it "empty dequeue"
4867 * or a valid frame).
4868 */
4869 continue;
4870 }
4871 break;
4872 }
4873
4874 /* Process FD */
4875 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4876 cleaned++;
4877 } while (!is_last);
4878
4879 return cleaned;
4880 }
4881
dpaa2_dpseci_poll(struct napi_struct * napi,int budget)4882 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4883 {
4884 struct dpaa2_caam_priv_per_cpu *ppriv;
4885 struct dpaa2_caam_priv *priv;
4886 int err, cleaned = 0, store_cleaned;
4887
4888 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4889 priv = ppriv->priv;
4890
4891 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4892 return 0;
4893
4894 do {
4895 store_cleaned = dpaa2_caam_store_consume(ppriv);
4896 cleaned += store_cleaned;
4897
4898 if (store_cleaned == 0 ||
4899 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4900 break;
4901
4902 /* Try to dequeue some more */
4903 err = dpaa2_caam_pull_fq(ppriv);
4904 if (unlikely(err))
4905 break;
4906 } while (1);
4907
4908 if (cleaned < budget) {
4909 napi_complete_done(napi, cleaned);
4910 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4911 if (unlikely(err))
4912 dev_err(priv->dev, "Notification rearm failed: %d\n",
4913 err);
4914 }
4915
4916 return cleaned;
4917 }
4918
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv * priv,u16 token)4919 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4920 u16 token)
4921 {
4922 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4923 struct device *dev = priv->dev;
4924 int err;
4925
4926 /*
4927 * Congestion group feature supported starting with DPSECI API v5.1
4928 * and only when object has been created with this capability.
4929 */
4930 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4931 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4932 return 0;
4933
4934 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4935 GFP_KERNEL | GFP_DMA);
4936 if (!priv->cscn_mem)
4937 return -ENOMEM;
4938
4939 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4940 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4941 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4942 if (dma_mapping_error(dev, priv->cscn_dma)) {
4943 dev_err(dev, "Error mapping CSCN memory area\n");
4944 err = -ENOMEM;
4945 goto err_dma_map;
4946 }
4947
4948 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4949 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4950 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4951 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4952 cong_notif_cfg.message_iova = priv->cscn_dma;
4953 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4954 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4955 DPSECI_CGN_MODE_COHERENT_WRITE;
4956
4957 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4958 &cong_notif_cfg);
4959 if (err) {
4960 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4961 goto err_set_cong;
4962 }
4963
4964 return 0;
4965
4966 err_set_cong:
4967 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4968 err_dma_map:
4969 kfree(priv->cscn_mem);
4970
4971 return err;
4972 }
4973
dpaa2_dpseci_setup(struct fsl_mc_device * ls_dev)4974 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4975 {
4976 struct device *dev = &ls_dev->dev;
4977 struct dpaa2_caam_priv *priv;
4978 struct dpaa2_caam_priv_per_cpu *ppriv;
4979 int err, cpu;
4980 u8 i;
4981
4982 priv = dev_get_drvdata(dev);
4983
4984 priv->dev = dev;
4985 priv->dpsec_id = ls_dev->obj_desc.id;
4986
4987 /* Get a handle for the DPSECI this interface is associate with */
4988 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4989 if (err) {
4990 dev_err(dev, "dpseci_open() failed: %d\n", err);
4991 goto err_open;
4992 }
4993
4994 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
4995 &priv->minor_ver);
4996 if (err) {
4997 dev_err(dev, "dpseci_get_api_version() failed\n");
4998 goto err_get_vers;
4999 }
5000
5001 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5002
5003 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5004 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5005 if (err) {
5006 dev_err(dev, "dpseci_reset() failed\n");
5007 goto err_get_vers;
5008 }
5009 }
5010
5011 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5012 &priv->dpseci_attr);
5013 if (err) {
5014 dev_err(dev, "dpseci_get_attributes() failed\n");
5015 goto err_get_vers;
5016 }
5017
5018 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5019 &priv->sec_attr);
5020 if (err) {
5021 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5022 goto err_get_vers;
5023 }
5024
5025 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5026 if (err) {
5027 dev_err(dev, "setup_congestion() failed\n");
5028 goto err_get_vers;
5029 }
5030
5031 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5032 priv->dpseci_attr.num_tx_queues);
5033 if (priv->num_pairs > num_online_cpus()) {
5034 dev_warn(dev, "%d queues won't be used\n",
5035 priv->num_pairs - num_online_cpus());
5036 priv->num_pairs = num_online_cpus();
5037 }
5038
5039 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5040 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5041 &priv->rx_queue_attr[i]);
5042 if (err) {
5043 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5044 goto err_get_rx_queue;
5045 }
5046 }
5047
5048 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5049 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5050 &priv->tx_queue_attr[i]);
5051 if (err) {
5052 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5053 goto err_get_rx_queue;
5054 }
5055 }
5056
5057 i = 0;
5058 for_each_online_cpu(cpu) {
5059 u8 j;
5060
5061 j = i % priv->num_pairs;
5062
5063 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5064 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5065
5066 /*
5067 * Allow all cores to enqueue, while only some of them
5068 * will take part in dequeuing.
5069 */
5070 if (++i > priv->num_pairs)
5071 continue;
5072
5073 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5074 ppriv->prio = j;
5075
5076 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5077 priv->rx_queue_attr[j].fqid,
5078 priv->tx_queue_attr[j].fqid);
5079
5080 ppriv->net_dev.dev = *dev;
5081 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5082 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
5083 DPAA2_CAAM_NAPI_WEIGHT);
5084 }
5085
5086 return 0;
5087
5088 err_get_rx_queue:
5089 dpaa2_dpseci_congestion_free(priv);
5090 err_get_vers:
5091 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5092 err_open:
5093 return err;
5094 }
5095
dpaa2_dpseci_enable(struct dpaa2_caam_priv * priv)5096 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5097 {
5098 struct device *dev = priv->dev;
5099 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5100 struct dpaa2_caam_priv_per_cpu *ppriv;
5101 int i;
5102
5103 for (i = 0; i < priv->num_pairs; i++) {
5104 ppriv = per_cpu_ptr(priv->ppriv, i);
5105 napi_enable(&ppriv->napi);
5106 }
5107
5108 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5109 }
5110
dpaa2_dpseci_disable(struct dpaa2_caam_priv * priv)5111 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5112 {
5113 struct device *dev = priv->dev;
5114 struct dpaa2_caam_priv_per_cpu *ppriv;
5115 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5116 int i, err = 0, enabled;
5117
5118 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5119 if (err) {
5120 dev_err(dev, "dpseci_disable() failed\n");
5121 return err;
5122 }
5123
5124 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5125 if (err) {
5126 dev_err(dev, "dpseci_is_enabled() failed\n");
5127 return err;
5128 }
5129
5130 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5131
5132 for (i = 0; i < priv->num_pairs; i++) {
5133 ppriv = per_cpu_ptr(priv->ppriv, i);
5134 napi_disable(&ppriv->napi);
5135 netif_napi_del(&ppriv->napi);
5136 }
5137
5138 return 0;
5139 }
5140
5141 static struct list_head hash_list;
5142
dpaa2_caam_probe(struct fsl_mc_device * dpseci_dev)5143 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5144 {
5145 struct device *dev;
5146 struct dpaa2_caam_priv *priv;
5147 int i, err = 0;
5148 bool registered = false;
5149
5150 /*
5151 * There is no way to get CAAM endianness - there is no direct register
5152 * space access and MC f/w does not provide this attribute.
5153 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5154 * property.
5155 */
5156 caam_little_end = true;
5157
5158 caam_imx = false;
5159
5160 dev = &dpseci_dev->dev;
5161
5162 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5163 if (!priv)
5164 return -ENOMEM;
5165
5166 dev_set_drvdata(dev, priv);
5167
5168 priv->domain = iommu_get_domain_for_dev(dev);
5169
5170 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5171 0, SLAB_CACHE_DMA, NULL);
5172 if (!qi_cache) {
5173 dev_err(dev, "Can't allocate SEC cache\n");
5174 return -ENOMEM;
5175 }
5176
5177 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5178 if (err) {
5179 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5180 goto err_dma_mask;
5181 }
5182
5183 /* Obtain a MC portal */
5184 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5185 if (err) {
5186 if (err == -ENXIO)
5187 err = -EPROBE_DEFER;
5188 else
5189 dev_err(dev, "MC portal allocation failed\n");
5190
5191 goto err_dma_mask;
5192 }
5193
5194 priv->ppriv = alloc_percpu(*priv->ppriv);
5195 if (!priv->ppriv) {
5196 dev_err(dev, "alloc_percpu() failed\n");
5197 err = -ENOMEM;
5198 goto err_alloc_ppriv;
5199 }
5200
5201 /* DPSECI initialization */
5202 err = dpaa2_dpseci_setup(dpseci_dev);
5203 if (err) {
5204 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5205 goto err_dpseci_setup;
5206 }
5207
5208 /* DPIO */
5209 err = dpaa2_dpseci_dpio_setup(priv);
5210 if (err) {
5211 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5212 goto err_dpio_setup;
5213 }
5214
5215 /* DPSECI binding to DPIO */
5216 err = dpaa2_dpseci_bind(priv);
5217 if (err) {
5218 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5219 goto err_bind;
5220 }
5221
5222 /* DPSECI enable */
5223 err = dpaa2_dpseci_enable(priv);
5224 if (err) {
5225 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5226 goto err_bind;
5227 }
5228
5229 dpaa2_dpseci_debugfs_init(priv);
5230
5231 /* register crypto algorithms the device supports */
5232 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5233 struct caam_skcipher_alg *t_alg = driver_algs + i;
5234 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5235
5236 /* Skip DES algorithms if not supported by device */
5237 if (!priv->sec_attr.des_acc_num &&
5238 (alg_sel == OP_ALG_ALGSEL_3DES ||
5239 alg_sel == OP_ALG_ALGSEL_DES))
5240 continue;
5241
5242 /* Skip AES algorithms if not supported by device */
5243 if (!priv->sec_attr.aes_acc_num &&
5244 alg_sel == OP_ALG_ALGSEL_AES)
5245 continue;
5246
5247 /* Skip CHACHA20 algorithms if not supported by device */
5248 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5249 !priv->sec_attr.ccha_acc_num)
5250 continue;
5251
5252 t_alg->caam.dev = dev;
5253 caam_skcipher_alg_init(t_alg);
5254
5255 err = crypto_register_skcipher(&t_alg->skcipher);
5256 if (err) {
5257 dev_warn(dev, "%s alg registration failed: %d\n",
5258 t_alg->skcipher.base.cra_driver_name, err);
5259 continue;
5260 }
5261
5262 t_alg->registered = true;
5263 registered = true;
5264 }
5265
5266 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5267 struct caam_aead_alg *t_alg = driver_aeads + i;
5268 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5269 OP_ALG_ALGSEL_MASK;
5270 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5271 OP_ALG_ALGSEL_MASK;
5272
5273 /* Skip DES algorithms if not supported by device */
5274 if (!priv->sec_attr.des_acc_num &&
5275 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5276 c1_alg_sel == OP_ALG_ALGSEL_DES))
5277 continue;
5278
5279 /* Skip AES algorithms if not supported by device */
5280 if (!priv->sec_attr.aes_acc_num &&
5281 c1_alg_sel == OP_ALG_ALGSEL_AES)
5282 continue;
5283
5284 /* Skip CHACHA20 algorithms if not supported by device */
5285 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5286 !priv->sec_attr.ccha_acc_num)
5287 continue;
5288
5289 /* Skip POLY1305 algorithms if not supported by device */
5290 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5291 !priv->sec_attr.ptha_acc_num)
5292 continue;
5293
5294 /*
5295 * Skip algorithms requiring message digests
5296 * if MD not supported by device.
5297 */
5298 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5299 !priv->sec_attr.md_acc_num)
5300 continue;
5301
5302 t_alg->caam.dev = dev;
5303 caam_aead_alg_init(t_alg);
5304
5305 err = crypto_register_aead(&t_alg->aead);
5306 if (err) {
5307 dev_warn(dev, "%s alg registration failed: %d\n",
5308 t_alg->aead.base.cra_driver_name, err);
5309 continue;
5310 }
5311
5312 t_alg->registered = true;
5313 registered = true;
5314 }
5315 if (registered)
5316 dev_info(dev, "algorithms registered in /proc/crypto\n");
5317
5318 /* register hash algorithms the device supports */
5319 INIT_LIST_HEAD(&hash_list);
5320
5321 /*
5322 * Skip registration of any hashing algorithms if MD block
5323 * is not present.
5324 */
5325 if (!priv->sec_attr.md_acc_num)
5326 return 0;
5327
5328 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5329 struct caam_hash_alg *t_alg;
5330 struct caam_hash_template *alg = driver_hash + i;
5331
5332 /* register hmac version */
5333 t_alg = caam_hash_alloc(dev, alg, true);
5334 if (IS_ERR(t_alg)) {
5335 err = PTR_ERR(t_alg);
5336 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5337 alg->hmac_driver_name, err);
5338 continue;
5339 }
5340
5341 err = crypto_register_ahash(&t_alg->ahash_alg);
5342 if (err) {
5343 dev_warn(dev, "%s alg registration failed: %d\n",
5344 t_alg->ahash_alg.halg.base.cra_driver_name,
5345 err);
5346 kfree(t_alg);
5347 } else {
5348 list_add_tail(&t_alg->entry, &hash_list);
5349 }
5350
5351 /* register unkeyed version */
5352 t_alg = caam_hash_alloc(dev, alg, false);
5353 if (IS_ERR(t_alg)) {
5354 err = PTR_ERR(t_alg);
5355 dev_warn(dev, "%s alg allocation failed: %d\n",
5356 alg->driver_name, err);
5357 continue;
5358 }
5359
5360 err = crypto_register_ahash(&t_alg->ahash_alg);
5361 if (err) {
5362 dev_warn(dev, "%s alg registration failed: %d\n",
5363 t_alg->ahash_alg.halg.base.cra_driver_name,
5364 err);
5365 kfree(t_alg);
5366 } else {
5367 list_add_tail(&t_alg->entry, &hash_list);
5368 }
5369 }
5370 if (!list_empty(&hash_list))
5371 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5372
5373 return err;
5374
5375 err_bind:
5376 dpaa2_dpseci_dpio_free(priv);
5377 err_dpio_setup:
5378 dpaa2_dpseci_free(priv);
5379 err_dpseci_setup:
5380 free_percpu(priv->ppriv);
5381 err_alloc_ppriv:
5382 fsl_mc_portal_free(priv->mc_io);
5383 err_dma_mask:
5384 kmem_cache_destroy(qi_cache);
5385
5386 return err;
5387 }
5388
dpaa2_caam_remove(struct fsl_mc_device * ls_dev)5389 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5390 {
5391 struct device *dev;
5392 struct dpaa2_caam_priv *priv;
5393 int i;
5394
5395 dev = &ls_dev->dev;
5396 priv = dev_get_drvdata(dev);
5397
5398 dpaa2_dpseci_debugfs_exit(priv);
5399
5400 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5401 struct caam_aead_alg *t_alg = driver_aeads + i;
5402
5403 if (t_alg->registered)
5404 crypto_unregister_aead(&t_alg->aead);
5405 }
5406
5407 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5408 struct caam_skcipher_alg *t_alg = driver_algs + i;
5409
5410 if (t_alg->registered)
5411 crypto_unregister_skcipher(&t_alg->skcipher);
5412 }
5413
5414 if (hash_list.next) {
5415 struct caam_hash_alg *t_hash_alg, *p;
5416
5417 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5418 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5419 list_del(&t_hash_alg->entry);
5420 kfree(t_hash_alg);
5421 }
5422 }
5423
5424 dpaa2_dpseci_disable(priv);
5425 dpaa2_dpseci_dpio_free(priv);
5426 dpaa2_dpseci_free(priv);
5427 free_percpu(priv->ppriv);
5428 fsl_mc_portal_free(priv->mc_io);
5429 kmem_cache_destroy(qi_cache);
5430
5431 return 0;
5432 }
5433
dpaa2_caam_enqueue(struct device * dev,struct caam_request * req)5434 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5435 {
5436 struct dpaa2_fd fd;
5437 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5438 struct dpaa2_caam_priv_per_cpu *ppriv;
5439 int err = 0, i;
5440
5441 if (IS_ERR(req))
5442 return PTR_ERR(req);
5443
5444 if (priv->cscn_mem) {
5445 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5446 DPAA2_CSCN_SIZE,
5447 DMA_FROM_DEVICE);
5448 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5449 dev_dbg_ratelimited(dev, "Dropping request\n");
5450 return -EBUSY;
5451 }
5452 }
5453
5454 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5455
5456 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5457 DMA_BIDIRECTIONAL);
5458 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5459 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5460 goto err_out;
5461 }
5462
5463 memset(&fd, 0, sizeof(fd));
5464 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5465 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5466 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5467 dpaa2_fd_set_flc(&fd, req->flc_dma);
5468
5469 ppriv = this_cpu_ptr(priv->ppriv);
5470 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5471 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5472 &fd);
5473 if (err != -EBUSY)
5474 break;
5475
5476 cpu_relax();
5477 }
5478
5479 if (unlikely(err)) {
5480 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5481 goto err_out;
5482 }
5483
5484 return -EINPROGRESS;
5485
5486 err_out:
5487 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5488 DMA_BIDIRECTIONAL);
5489 return -EIO;
5490 }
5491 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5492
5493 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5494 {
5495 .vendor = FSL_MC_VENDOR_FREESCALE,
5496 .obj_type = "dpseci",
5497 },
5498 { .vendor = 0x0 }
5499 };
5500 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5501
5502 static struct fsl_mc_driver dpaa2_caam_driver = {
5503 .driver = {
5504 .name = KBUILD_MODNAME,
5505 .owner = THIS_MODULE,
5506 },
5507 .probe = dpaa2_caam_probe,
5508 .remove = dpaa2_caam_remove,
5509 .match_id_table = dpaa2_caam_match_id_table
5510 };
5511
5512 MODULE_LICENSE("Dual BSD/GPL");
5513 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5514 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5515
5516 module_fsl_mc_driver(dpaa2_caam_driver);
5517