1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
3 *
4 * Copyright (C) 2019 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <crypto/aes.h>
12 #include <crypto/authenc.h>
13 #include <crypto/cryptd.h>
14 #include <crypto/des.h>
15 #include <crypto/internal/aead.h>
16 #include <crypto/sha.h>
17 #include <crypto/xts.h>
18 #include <crypto/scatterwalk.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/sort.h>
21 #include <linux/module.h>
22 #include "otx_cptvf.h"
23 #include "otx_cptvf_algs.h"
24 #include "otx_cptvf_reqmgr.h"
25
26 #define CPT_MAX_VF_NUM 64
27 /* Size of salt in AES GCM mode */
28 #define AES_GCM_SALT_SIZE 4
29 /* Size of IV in AES GCM mode */
30 #define AES_GCM_IV_SIZE 8
31 /* Size of ICV (Integrity Check Value) in AES GCM mode */
32 #define AES_GCM_ICV_SIZE 16
33 /* Offset of IV in AES GCM mode */
34 #define AES_GCM_IV_OFFSET 8
35 #define CONTROL_WORD_LEN 8
36 #define KEY2_OFFSET 48
37 #define DMA_MODE_FLAG(dma_mode) \
38 (((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
39
40 /* Truncated SHA digest size */
41 #define SHA1_TRUNC_DIGEST_SIZE 12
42 #define SHA256_TRUNC_DIGEST_SIZE 16
43 #define SHA384_TRUNC_DIGEST_SIZE 24
44 #define SHA512_TRUNC_DIGEST_SIZE 32
45
46 static DEFINE_MUTEX(mutex);
47 static int is_crypto_registered;
48
49 struct cpt_device_desc {
50 enum otx_cptpf_type pf_type;
51 struct pci_dev *dev;
52 int num_queues;
53 };
54
55 struct cpt_device_table {
56 atomic_t count;
57 struct cpt_device_desc desc[CPT_MAX_VF_NUM];
58 };
59
60 static struct cpt_device_table se_devices = {
61 .count = ATOMIC_INIT(0)
62 };
63
64 static struct cpt_device_table ae_devices = {
65 .count = ATOMIC_INIT(0)
66 };
67
get_se_device(struct pci_dev ** pdev,int * cpu_num)68 static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
69 {
70 int count, ret = 0;
71
72 count = atomic_read(&se_devices.count);
73 if (count < 1)
74 return -ENODEV;
75
76 *cpu_num = get_cpu();
77
78 if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
79 /*
80 * On OcteonTX platform there is one CPT instruction queue bound
81 * to each VF. We get maximum performance if one CPT queue
82 * is available for each cpu otherwise CPT queues need to be
83 * shared between cpus.
84 */
85 if (*cpu_num >= count)
86 *cpu_num %= count;
87 *pdev = se_devices.desc[*cpu_num].dev;
88 } else {
89 pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
90 ret = -EINVAL;
91 }
92 put_cpu();
93
94 return ret;
95 }
96
validate_hmac_cipher_null(struct otx_cpt_req_info * cpt_req)97 static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
98 {
99 struct otx_cpt_req_ctx *rctx;
100 struct aead_request *req;
101 struct crypto_aead *tfm;
102
103 req = container_of(cpt_req->areq, struct aead_request, base);
104 tfm = crypto_aead_reqtfm(req);
105 rctx = aead_request_ctx(req);
106 if (memcmp(rctx->fctx.hmac.s.hmac_calc,
107 rctx->fctx.hmac.s.hmac_recv,
108 crypto_aead_authsize(tfm)) != 0)
109 return -EBADMSG;
110
111 return 0;
112 }
113
otx_cpt_aead_callback(int status,void * arg1,void * arg2)114 static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
115 {
116 struct otx_cpt_info_buffer *cpt_info = arg2;
117 struct crypto_async_request *areq = arg1;
118 struct otx_cpt_req_info *cpt_req;
119 struct pci_dev *pdev;
120
121 if (!cpt_info)
122 goto complete;
123
124 cpt_req = cpt_info->req;
125 if (!status) {
126 /*
127 * When selected cipher is NULL we need to manually
128 * verify whether calculated hmac value matches
129 * received hmac value
130 */
131 if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
132 !cpt_req->is_enc)
133 status = validate_hmac_cipher_null(cpt_req);
134 }
135 pdev = cpt_info->pdev;
136 do_request_cleanup(pdev, cpt_info);
137
138 complete:
139 if (areq)
140 areq->complete(areq, status);
141 }
142
output_iv_copyback(struct crypto_async_request * areq)143 static void output_iv_copyback(struct crypto_async_request *areq)
144 {
145 struct otx_cpt_req_info *req_info;
146 struct skcipher_request *sreq;
147 struct crypto_skcipher *stfm;
148 struct otx_cpt_req_ctx *rctx;
149 struct otx_cpt_enc_ctx *ctx;
150 u32 start, ivsize;
151
152 sreq = container_of(areq, struct skcipher_request, base);
153 stfm = crypto_skcipher_reqtfm(sreq);
154 ctx = crypto_skcipher_ctx(stfm);
155 if (ctx->cipher_type == OTX_CPT_AES_CBC ||
156 ctx->cipher_type == OTX_CPT_DES3_CBC) {
157 rctx = skcipher_request_ctx(sreq);
158 req_info = &rctx->cpt_req;
159 ivsize = crypto_skcipher_ivsize(stfm);
160 start = sreq->cryptlen - ivsize;
161
162 if (req_info->is_enc) {
163 scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
164 ivsize, 0);
165 } else {
166 if (sreq->src != sreq->dst) {
167 scatterwalk_map_and_copy(sreq->iv, sreq->src,
168 start, ivsize, 0);
169 } else {
170 memcpy(sreq->iv, req_info->iv_out, ivsize);
171 kfree(req_info->iv_out);
172 }
173 }
174 }
175 }
176
otx_cpt_skcipher_callback(int status,void * arg1,void * arg2)177 static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
178 {
179 struct otx_cpt_info_buffer *cpt_info = arg2;
180 struct crypto_async_request *areq = arg1;
181 struct pci_dev *pdev;
182
183 if (areq) {
184 if (!status)
185 output_iv_copyback(areq);
186 if (cpt_info) {
187 pdev = cpt_info->pdev;
188 do_request_cleanup(pdev, cpt_info);
189 }
190 areq->complete(areq, status);
191 }
192 }
193
update_input_data(struct otx_cpt_req_info * req_info,struct scatterlist * inp_sg,u32 nbytes,u32 * argcnt)194 static inline void update_input_data(struct otx_cpt_req_info *req_info,
195 struct scatterlist *inp_sg,
196 u32 nbytes, u32 *argcnt)
197 {
198 req_info->req.dlen += nbytes;
199
200 while (nbytes) {
201 u32 len = min(nbytes, inp_sg->length);
202 u8 *ptr = sg_virt(inp_sg);
203
204 req_info->in[*argcnt].vptr = (void *)ptr;
205 req_info->in[*argcnt].size = len;
206 nbytes -= len;
207 ++(*argcnt);
208 inp_sg = sg_next(inp_sg);
209 }
210 }
211
update_output_data(struct otx_cpt_req_info * req_info,struct scatterlist * outp_sg,u32 offset,u32 nbytes,u32 * argcnt)212 static inline void update_output_data(struct otx_cpt_req_info *req_info,
213 struct scatterlist *outp_sg,
214 u32 offset, u32 nbytes, u32 *argcnt)
215 {
216 req_info->rlen += nbytes;
217
218 while (nbytes) {
219 u32 len = min(nbytes, outp_sg->length - offset);
220 u8 *ptr = sg_virt(outp_sg);
221
222 req_info->out[*argcnt].vptr = (void *) (ptr + offset);
223 req_info->out[*argcnt].size = len;
224 nbytes -= len;
225 ++(*argcnt);
226 offset = 0;
227 outp_sg = sg_next(outp_sg);
228 }
229 }
230
create_ctx_hdr(struct skcipher_request * req,u32 enc,u32 * argcnt)231 static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
232 u32 *argcnt)
233 {
234 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
235 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
236 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
237 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
238 struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
239 struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
240 int ivsize = crypto_skcipher_ivsize(stfm);
241 u32 start = req->cryptlen - ivsize;
242 gfp_t flags;
243
244 flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
245 GFP_KERNEL : GFP_ATOMIC;
246 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
247 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
248
249 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
250 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
251 if (enc) {
252 req_info->req.opcode.s.minor = 2;
253 } else {
254 req_info->req.opcode.s.minor = 3;
255 if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
256 ctx->cipher_type == OTX_CPT_DES3_CBC) &&
257 req->src == req->dst) {
258 req_info->iv_out = kmalloc(ivsize, flags);
259 if (!req_info->iv_out)
260 return -ENOMEM;
261
262 scatterwalk_map_and_copy(req_info->iv_out, req->src,
263 start, ivsize, 0);
264 }
265 }
266 /* Encryption data length */
267 req_info->req.param1 = req->cryptlen;
268 /* Authentication data length */
269 req_info->req.param2 = 0;
270
271 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
272 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
273 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
274
275 if (ctx->cipher_type == OTX_CPT_AES_XTS)
276 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
277 else
278 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
279
280 memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
281
282 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
283
284 /*
285 * Storing Packet Data Information in offset
286 * Control Word First 8 bytes
287 */
288 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
289 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
290 req_info->req.dlen += CONTROL_WORD_LEN;
291 ++(*argcnt);
292
293 req_info->in[*argcnt].vptr = (u8 *)fctx;
294 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
295 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
296
297 ++(*argcnt);
298
299 return 0;
300 }
301
create_input_list(struct skcipher_request * req,u32 enc,u32 enc_iv_len)302 static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
303 u32 enc_iv_len)
304 {
305 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
306 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
307 u32 argcnt = 0;
308 int ret;
309
310 ret = create_ctx_hdr(req, enc, &argcnt);
311 if (ret)
312 return ret;
313
314 update_input_data(req_info, req->src, req->cryptlen, &argcnt);
315 req_info->incnt = argcnt;
316
317 return 0;
318 }
319
create_output_list(struct skcipher_request * req,u32 enc_iv_len)320 static inline void create_output_list(struct skcipher_request *req,
321 u32 enc_iv_len)
322 {
323 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
324 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
325 u32 argcnt = 0;
326
327 /*
328 * OUTPUT Buffer Processing
329 * AES encryption/decryption output would be
330 * received in the following format
331 *
332 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
333 * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
334 */
335 update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
336 req_info->outcnt = argcnt;
337 }
338
cpt_enc_dec(struct skcipher_request * req,u32 enc)339 static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
340 {
341 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
342 struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
343 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
344 u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
345 struct pci_dev *pdev;
346 int status, cpu_num;
347
348 /* Validate that request doesn't exceed maximum CPT supported size */
349 if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
350 return -E2BIG;
351
352 /* Clear control words */
353 rctx->ctrl_word.flags = 0;
354 rctx->fctx.enc.enc_ctrl.flags = 0;
355
356 status = create_input_list(req, enc, enc_iv_len);
357 if (status)
358 return status;
359 create_output_list(req, enc_iv_len);
360
361 status = get_se_device(&pdev, &cpu_num);
362 if (status)
363 return status;
364
365 req_info->callback = (void *)otx_cpt_skcipher_callback;
366 req_info->areq = &req->base;
367 req_info->req_type = OTX_CPT_ENC_DEC_REQ;
368 req_info->is_enc = enc;
369 req_info->is_trunc_hmac = false;
370 req_info->ctrl.s.grp = 0;
371
372 /*
373 * We perform an asynchronous send and once
374 * the request is completed the driver would
375 * intimate through registered call back functions
376 */
377 status = otx_cpt_do_request(pdev, req_info, cpu_num);
378
379 return status;
380 }
381
otx_cpt_skcipher_encrypt(struct skcipher_request * req)382 static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
383 {
384 return cpt_enc_dec(req, true);
385 }
386
otx_cpt_skcipher_decrypt(struct skcipher_request * req)387 static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
388 {
389 return cpt_enc_dec(req, false);
390 }
391
otx_cpt_skcipher_xts_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)392 static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
393 const u8 *key, u32 keylen)
394 {
395 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
396 const u8 *key2 = key + (keylen / 2);
397 const u8 *key1 = key;
398 int ret;
399
400 ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
401 if (ret)
402 return ret;
403 ctx->key_len = keylen;
404 memcpy(ctx->enc_key, key1, keylen / 2);
405 memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
406 ctx->cipher_type = OTX_CPT_AES_XTS;
407 switch (ctx->key_len) {
408 case 2 * AES_KEYSIZE_128:
409 ctx->key_type = OTX_CPT_AES_128_BIT;
410 break;
411 case 2 * AES_KEYSIZE_256:
412 ctx->key_type = OTX_CPT_AES_256_BIT;
413 break;
414 default:
415 return -EINVAL;
416 }
417
418 return 0;
419 }
420
cpt_des_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)421 static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
422 u32 keylen, u8 cipher_type)
423 {
424 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
425
426 if (keylen != DES3_EDE_KEY_SIZE)
427 return -EINVAL;
428
429 ctx->key_len = keylen;
430 ctx->cipher_type = cipher_type;
431
432 memcpy(ctx->enc_key, key, keylen);
433
434 return 0;
435 }
436
cpt_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen,u8 cipher_type)437 static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
438 u32 keylen, u8 cipher_type)
439 {
440 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
441
442 switch (keylen) {
443 case AES_KEYSIZE_128:
444 ctx->key_type = OTX_CPT_AES_128_BIT;
445 break;
446 case AES_KEYSIZE_192:
447 ctx->key_type = OTX_CPT_AES_192_BIT;
448 break;
449 case AES_KEYSIZE_256:
450 ctx->key_type = OTX_CPT_AES_256_BIT;
451 break;
452 default:
453 return -EINVAL;
454 }
455 ctx->key_len = keylen;
456 ctx->cipher_type = cipher_type;
457
458 memcpy(ctx->enc_key, key, keylen);
459
460 return 0;
461 }
462
otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)463 static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
464 const u8 *key, u32 keylen)
465 {
466 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
467 }
468
otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)469 static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
470 const u8 *key, u32 keylen)
471 {
472 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
473 }
474
otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)475 static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
476 const u8 *key, u32 keylen)
477 {
478 return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
479 }
480
otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)481 static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
482 const u8 *key, u32 keylen)
483 {
484 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
485 }
486
otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,u32 keylen)487 static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
488 const u8 *key, u32 keylen)
489 {
490 return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
491 }
492
otx_cpt_enc_dec_init(struct crypto_skcipher * tfm)493 static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
494 {
495 struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
496
497 memset(ctx, 0, sizeof(*ctx));
498 /*
499 * Additional memory for skcipher_request is
500 * allocated since the cryptd daemon uses
501 * this memory for request_ctx information
502 */
503 crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) +
504 sizeof(struct skcipher_request));
505
506 return 0;
507 }
508
cpt_aead_init(struct crypto_aead * tfm,u8 cipher_type,u8 mac_type)509 static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
510 {
511 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
512
513 ctx->cipher_type = cipher_type;
514 ctx->mac_type = mac_type;
515
516 /*
517 * When selected cipher is NULL we use HMAC opcode instead of
518 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
519 * for calculating ipad and opad
520 */
521 if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
522 switch (ctx->mac_type) {
523 case OTX_CPT_SHA1:
524 ctx->hashalg = crypto_alloc_shash("sha1", 0,
525 CRYPTO_ALG_ASYNC);
526 if (IS_ERR(ctx->hashalg))
527 return PTR_ERR(ctx->hashalg);
528 break;
529
530 case OTX_CPT_SHA256:
531 ctx->hashalg = crypto_alloc_shash("sha256", 0,
532 CRYPTO_ALG_ASYNC);
533 if (IS_ERR(ctx->hashalg))
534 return PTR_ERR(ctx->hashalg);
535 break;
536
537 case OTX_CPT_SHA384:
538 ctx->hashalg = crypto_alloc_shash("sha384", 0,
539 CRYPTO_ALG_ASYNC);
540 if (IS_ERR(ctx->hashalg))
541 return PTR_ERR(ctx->hashalg);
542 break;
543
544 case OTX_CPT_SHA512:
545 ctx->hashalg = crypto_alloc_shash("sha512", 0,
546 CRYPTO_ALG_ASYNC);
547 if (IS_ERR(ctx->hashalg))
548 return PTR_ERR(ctx->hashalg);
549 break;
550 }
551 }
552
553 crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx));
554
555 return 0;
556 }
557
otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead * tfm)558 static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
559 {
560 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
561 }
562
otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead * tfm)563 static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
564 {
565 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
566 }
567
otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead * tfm)568 static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
569 {
570 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
571 }
572
otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead * tfm)573 static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
574 {
575 return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
576 }
577
otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead * tfm)578 static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
579 {
580 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
581 }
582
otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead * tfm)583 static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
584 {
585 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
586 }
587
otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead * tfm)588 static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
589 {
590 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
591 }
592
otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead * tfm)593 static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
594 {
595 return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
596 }
597
otx_cpt_aead_gcm_aes_init(struct crypto_aead * tfm)598 static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
599 {
600 return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
601 }
602
otx_cpt_aead_exit(struct crypto_aead * tfm)603 static void otx_cpt_aead_exit(struct crypto_aead *tfm)
604 {
605 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
606
607 kfree(ctx->ipad);
608 kfree(ctx->opad);
609 if (ctx->hashalg)
610 crypto_free_shash(ctx->hashalg);
611 kfree(ctx->sdesc);
612 }
613
614 /*
615 * This is the Integrity Check Value validation (aka the authentication tag
616 * length)
617 */
otx_cpt_aead_set_authsize(struct crypto_aead * tfm,unsigned int authsize)618 static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
619 unsigned int authsize)
620 {
621 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
622
623 switch (ctx->mac_type) {
624 case OTX_CPT_SHA1:
625 if (authsize != SHA1_DIGEST_SIZE &&
626 authsize != SHA1_TRUNC_DIGEST_SIZE)
627 return -EINVAL;
628
629 if (authsize == SHA1_TRUNC_DIGEST_SIZE)
630 ctx->is_trunc_hmac = true;
631 break;
632
633 case OTX_CPT_SHA256:
634 if (authsize != SHA256_DIGEST_SIZE &&
635 authsize != SHA256_TRUNC_DIGEST_SIZE)
636 return -EINVAL;
637
638 if (authsize == SHA256_TRUNC_DIGEST_SIZE)
639 ctx->is_trunc_hmac = true;
640 break;
641
642 case OTX_CPT_SHA384:
643 if (authsize != SHA384_DIGEST_SIZE &&
644 authsize != SHA384_TRUNC_DIGEST_SIZE)
645 return -EINVAL;
646
647 if (authsize == SHA384_TRUNC_DIGEST_SIZE)
648 ctx->is_trunc_hmac = true;
649 break;
650
651 case OTX_CPT_SHA512:
652 if (authsize != SHA512_DIGEST_SIZE &&
653 authsize != SHA512_TRUNC_DIGEST_SIZE)
654 return -EINVAL;
655
656 if (authsize == SHA512_TRUNC_DIGEST_SIZE)
657 ctx->is_trunc_hmac = true;
658 break;
659
660 case OTX_CPT_MAC_NULL:
661 if (ctx->cipher_type == OTX_CPT_AES_GCM) {
662 if (authsize != AES_GCM_ICV_SIZE)
663 return -EINVAL;
664 } else
665 return -EINVAL;
666 break;
667
668 default:
669 return -EINVAL;
670 }
671
672 tfm->authsize = authsize;
673 return 0;
674 }
675
alloc_sdesc(struct crypto_shash * alg)676 static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
677 {
678 struct otx_cpt_sdesc *sdesc;
679 int size;
680
681 size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
682 sdesc = kmalloc(size, GFP_KERNEL);
683 if (!sdesc)
684 return NULL;
685
686 sdesc->shash.tfm = alg;
687
688 return sdesc;
689 }
690
swap_data32(void * buf,u32 len)691 static inline void swap_data32(void *buf, u32 len)
692 {
693 cpu_to_be32_array(buf, buf, len / 4);
694 }
695
swap_data64(void * buf,u32 len)696 static inline void swap_data64(void *buf, u32 len)
697 {
698 __be64 *dst = buf;
699 u64 *src = buf;
700 int i = 0;
701
702 for (i = 0 ; i < len / 8; i++, src++, dst++)
703 *dst = cpu_to_be64p(src);
704 }
705
copy_pad(u8 mac_type,u8 * out_pad,u8 * in_pad)706 static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
707 {
708 struct sha512_state *sha512;
709 struct sha256_state *sha256;
710 struct sha1_state *sha1;
711
712 switch (mac_type) {
713 case OTX_CPT_SHA1:
714 sha1 = (struct sha1_state *) in_pad;
715 swap_data32(sha1->state, SHA1_DIGEST_SIZE);
716 memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
717 break;
718
719 case OTX_CPT_SHA256:
720 sha256 = (struct sha256_state *) in_pad;
721 swap_data32(sha256->state, SHA256_DIGEST_SIZE);
722 memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
723 break;
724
725 case OTX_CPT_SHA384:
726 case OTX_CPT_SHA512:
727 sha512 = (struct sha512_state *) in_pad;
728 swap_data64(sha512->state, SHA512_DIGEST_SIZE);
729 memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
730 break;
731
732 default:
733 return -EINVAL;
734 }
735
736 return 0;
737 }
738
aead_hmac_init(struct crypto_aead * cipher)739 static int aead_hmac_init(struct crypto_aead *cipher)
740 {
741 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
742 int state_size = crypto_shash_statesize(ctx->hashalg);
743 int ds = crypto_shash_digestsize(ctx->hashalg);
744 int bs = crypto_shash_blocksize(ctx->hashalg);
745 int authkeylen = ctx->auth_key_len;
746 u8 *ipad = NULL, *opad = NULL;
747 int ret = 0, icount = 0;
748
749 ctx->sdesc = alloc_sdesc(ctx->hashalg);
750 if (!ctx->sdesc)
751 return -ENOMEM;
752
753 ctx->ipad = kzalloc(bs, GFP_KERNEL);
754 if (!ctx->ipad) {
755 ret = -ENOMEM;
756 goto calc_fail;
757 }
758
759 ctx->opad = kzalloc(bs, GFP_KERNEL);
760 if (!ctx->opad) {
761 ret = -ENOMEM;
762 goto calc_fail;
763 }
764
765 ipad = kzalloc(state_size, GFP_KERNEL);
766 if (!ipad) {
767 ret = -ENOMEM;
768 goto calc_fail;
769 }
770
771 opad = kzalloc(state_size, GFP_KERNEL);
772 if (!opad) {
773 ret = -ENOMEM;
774 goto calc_fail;
775 }
776
777 if (authkeylen > bs) {
778 ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
779 authkeylen, ipad);
780 if (ret)
781 goto calc_fail;
782
783 authkeylen = ds;
784 } else {
785 memcpy(ipad, ctx->key, authkeylen);
786 }
787
788 memset(ipad + authkeylen, 0, bs - authkeylen);
789 memcpy(opad, ipad, bs);
790
791 for (icount = 0; icount < bs; icount++) {
792 ipad[icount] ^= 0x36;
793 opad[icount] ^= 0x5c;
794 }
795
796 /*
797 * Partial Hash calculated from the software
798 * algorithm is retrieved for IPAD & OPAD
799 */
800
801 /* IPAD Calculation */
802 crypto_shash_init(&ctx->sdesc->shash);
803 crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
804 crypto_shash_export(&ctx->sdesc->shash, ipad);
805 ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
806 if (ret)
807 goto calc_fail;
808
809 /* OPAD Calculation */
810 crypto_shash_init(&ctx->sdesc->shash);
811 crypto_shash_update(&ctx->sdesc->shash, opad, bs);
812 crypto_shash_export(&ctx->sdesc->shash, opad);
813 ret = copy_pad(ctx->mac_type, ctx->opad, opad);
814 if (ret)
815 goto calc_fail;
816
817 kfree(ipad);
818 kfree(opad);
819
820 return 0;
821
822 calc_fail:
823 kfree(ctx->ipad);
824 ctx->ipad = NULL;
825 kfree(ctx->opad);
826 ctx->opad = NULL;
827 kfree(ipad);
828 kfree(opad);
829 kfree(ctx->sdesc);
830 ctx->sdesc = NULL;
831
832 return ret;
833 }
834
otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)835 static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
836 const unsigned char *key,
837 unsigned int keylen)
838 {
839 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
840 struct crypto_authenc_key_param *param;
841 int enckeylen = 0, authkeylen = 0;
842 struct rtattr *rta = (void *)key;
843 int status = -EINVAL;
844
845 if (!RTA_OK(rta, keylen))
846 goto badkey;
847
848 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
849 goto badkey;
850
851 if (RTA_PAYLOAD(rta) < sizeof(*param))
852 goto badkey;
853
854 param = RTA_DATA(rta);
855 enckeylen = be32_to_cpu(param->enckeylen);
856 key += RTA_ALIGN(rta->rta_len);
857 keylen -= RTA_ALIGN(rta->rta_len);
858 if (keylen < enckeylen)
859 goto badkey;
860
861 if (keylen > OTX_CPT_MAX_KEY_SIZE)
862 goto badkey;
863
864 authkeylen = keylen - enckeylen;
865 memcpy(ctx->key, key, keylen);
866
867 switch (enckeylen) {
868 case AES_KEYSIZE_128:
869 ctx->key_type = OTX_CPT_AES_128_BIT;
870 break;
871 case AES_KEYSIZE_192:
872 ctx->key_type = OTX_CPT_AES_192_BIT;
873 break;
874 case AES_KEYSIZE_256:
875 ctx->key_type = OTX_CPT_AES_256_BIT;
876 break;
877 default:
878 /* Invalid key length */
879 goto badkey;
880 }
881
882 ctx->enc_key_len = enckeylen;
883 ctx->auth_key_len = authkeylen;
884
885 status = aead_hmac_init(cipher);
886 if (status)
887 goto badkey;
888
889 return 0;
890 badkey:
891 return status;
892 }
893
otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)894 static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
895 const unsigned char *key,
896 unsigned int keylen)
897 {
898 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
899 struct crypto_authenc_key_param *param;
900 struct rtattr *rta = (void *)key;
901 int enckeylen = 0;
902
903 if (!RTA_OK(rta, keylen))
904 goto badkey;
905
906 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
907 goto badkey;
908
909 if (RTA_PAYLOAD(rta) < sizeof(*param))
910 goto badkey;
911
912 param = RTA_DATA(rta);
913 enckeylen = be32_to_cpu(param->enckeylen);
914 key += RTA_ALIGN(rta->rta_len);
915 keylen -= RTA_ALIGN(rta->rta_len);
916 if (enckeylen != 0)
917 goto badkey;
918
919 if (keylen > OTX_CPT_MAX_KEY_SIZE)
920 goto badkey;
921
922 memcpy(ctx->key, key, keylen);
923 ctx->enc_key_len = enckeylen;
924 ctx->auth_key_len = keylen;
925 return 0;
926 badkey:
927 return -EINVAL;
928 }
929
otx_cpt_aead_gcm_aes_setkey(struct crypto_aead * cipher,const unsigned char * key,unsigned int keylen)930 static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
931 const unsigned char *key,
932 unsigned int keylen)
933 {
934 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
935
936 /*
937 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
938 * and salt (4 bytes)
939 */
940 switch (keylen) {
941 case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
942 ctx->key_type = OTX_CPT_AES_128_BIT;
943 ctx->enc_key_len = AES_KEYSIZE_128;
944 break;
945 case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
946 ctx->key_type = OTX_CPT_AES_192_BIT;
947 ctx->enc_key_len = AES_KEYSIZE_192;
948 break;
949 case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
950 ctx->key_type = OTX_CPT_AES_256_BIT;
951 ctx->enc_key_len = AES_KEYSIZE_256;
952 break;
953 default:
954 /* Invalid key and salt length */
955 return -EINVAL;
956 }
957
958 /* Store encryption key and salt */
959 memcpy(ctx->key, key, keylen);
960
961 return 0;
962 }
963
create_aead_ctx_hdr(struct aead_request * req,u32 enc,u32 * argcnt)964 static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
965 u32 *argcnt)
966 {
967 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
968 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
969 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
970 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
971 struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
972 int mac_len = crypto_aead_authsize(tfm);
973 int ds;
974
975 rctx->ctrl_word.e.enc_data_offset = req->assoclen;
976
977 switch (ctx->cipher_type) {
978 case OTX_CPT_AES_CBC:
979 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
980 /* Copy encryption key to context */
981 memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
982 ctx->enc_key_len);
983 /* Copy IV to context */
984 memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
985
986 ds = crypto_shash_digestsize(ctx->hashalg);
987 if (ctx->mac_type == OTX_CPT_SHA384)
988 ds = SHA512_DIGEST_SIZE;
989 if (ctx->ipad)
990 memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
991 if (ctx->opad)
992 memcpy(fctx->hmac.e.opad, ctx->opad, ds);
993 break;
994
995 case OTX_CPT_AES_GCM:
996 fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
997 /* Copy encryption key to context */
998 memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
999 /* Copy salt to context */
1000 memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
1001 AES_GCM_SALT_SIZE);
1002
1003 rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
1004 break;
1005
1006 default:
1007 /* Unknown cipher type */
1008 return -EINVAL;
1009 }
1010 rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
1011
1012 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1013 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1014 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
1015 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1016 if (enc) {
1017 req_info->req.opcode.s.minor = 2;
1018 req_info->req.param1 = req->cryptlen;
1019 req_info->req.param2 = req->cryptlen + req->assoclen;
1020 } else {
1021 req_info->req.opcode.s.minor = 3;
1022 req_info->req.param1 = req->cryptlen - mac_len;
1023 req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
1024 }
1025
1026 fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
1027 fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
1028 fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
1029 fctx->enc.enc_ctrl.e.mac_len = mac_len;
1030 fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
1031
1032 /*
1033 * Storing Packet Data Information in offset
1034 * Control Word First 8 bytes
1035 */
1036 req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
1037 req_info->in[*argcnt].size = CONTROL_WORD_LEN;
1038 req_info->req.dlen += CONTROL_WORD_LEN;
1039 ++(*argcnt);
1040
1041 req_info->in[*argcnt].vptr = (u8 *)fctx;
1042 req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
1043 req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
1044 ++(*argcnt);
1045
1046 return 0;
1047 }
1048
create_hmac_ctx_hdr(struct aead_request * req,u32 * argcnt,u32 enc)1049 static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
1050 u32 enc)
1051 {
1052 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1053 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1054 struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
1055 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1056
1057 req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
1058 req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
1059 req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
1060 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
1061 req_info->is_trunc_hmac = ctx->is_trunc_hmac;
1062
1063 req_info->req.opcode.s.minor = 0;
1064 req_info->req.param1 = ctx->auth_key_len;
1065 req_info->req.param2 = ctx->mac_type << 8;
1066
1067 /* Add authentication key */
1068 req_info->in[*argcnt].vptr = ctx->key;
1069 req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
1070 req_info->req.dlen += round_up(ctx->auth_key_len, 8);
1071 ++(*argcnt);
1072
1073 return 0;
1074 }
1075
create_aead_input_list(struct aead_request * req,u32 enc)1076 static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
1077 {
1078 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1079 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1080 u32 inputlen = req->cryptlen + req->assoclen;
1081 u32 status, argcnt = 0;
1082
1083 status = create_aead_ctx_hdr(req, enc, &argcnt);
1084 if (status)
1085 return status;
1086 update_input_data(req_info, req->src, inputlen, &argcnt);
1087 req_info->incnt = argcnt;
1088
1089 return 0;
1090 }
1091
create_aead_output_list(struct aead_request * req,u32 enc,u32 mac_len)1092 static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
1093 u32 mac_len)
1094 {
1095 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1096 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1097 u32 argcnt = 0, outputlen = 0;
1098
1099 if (enc)
1100 outputlen = req->cryptlen + req->assoclen + mac_len;
1101 else
1102 outputlen = req->cryptlen + req->assoclen - mac_len;
1103
1104 update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
1105 req_info->outcnt = argcnt;
1106
1107 return 0;
1108 }
1109
create_aead_null_input_list(struct aead_request * req,u32 enc,u32 mac_len)1110 static inline u32 create_aead_null_input_list(struct aead_request *req,
1111 u32 enc, u32 mac_len)
1112 {
1113 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1114 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1115 u32 inputlen, argcnt = 0;
1116
1117 if (enc)
1118 inputlen = req->cryptlen + req->assoclen;
1119 else
1120 inputlen = req->cryptlen + req->assoclen - mac_len;
1121
1122 create_hmac_ctx_hdr(req, &argcnt, enc);
1123 update_input_data(req_info, req->src, inputlen, &argcnt);
1124 req_info->incnt = argcnt;
1125
1126 return 0;
1127 }
1128
create_aead_null_output_list(struct aead_request * req,u32 enc,u32 mac_len)1129 static inline u32 create_aead_null_output_list(struct aead_request *req,
1130 u32 enc, u32 mac_len)
1131 {
1132 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1133 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1134 struct scatterlist *dst;
1135 u8 *ptr = NULL;
1136 int argcnt = 0, status, offset;
1137 u32 inputlen;
1138
1139 if (enc)
1140 inputlen = req->cryptlen + req->assoclen;
1141 else
1142 inputlen = req->cryptlen + req->assoclen - mac_len;
1143
1144 /*
1145 * If source and destination are different
1146 * then copy payload to destination
1147 */
1148 if (req->src != req->dst) {
1149
1150 ptr = kmalloc(inputlen, (req_info->areq->flags &
1151 CRYPTO_TFM_REQ_MAY_SLEEP) ?
1152 GFP_KERNEL : GFP_ATOMIC);
1153 if (!ptr) {
1154 status = -ENOMEM;
1155 goto error;
1156 }
1157
1158 status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
1159 inputlen);
1160 if (status != inputlen) {
1161 status = -EINVAL;
1162 goto error_free;
1163 }
1164 status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
1165 inputlen);
1166 if (status != inputlen) {
1167 status = -EINVAL;
1168 goto error_free;
1169 }
1170 kfree(ptr);
1171 }
1172
1173 if (enc) {
1174 /*
1175 * In an encryption scenario hmac needs
1176 * to be appended after payload
1177 */
1178 dst = req->dst;
1179 offset = inputlen;
1180 while (offset >= dst->length) {
1181 offset -= dst->length;
1182 dst = sg_next(dst);
1183 if (!dst) {
1184 status = -ENOENT;
1185 goto error;
1186 }
1187 }
1188
1189 update_output_data(req_info, dst, offset, mac_len, &argcnt);
1190 } else {
1191 /*
1192 * In a decryption scenario calculated hmac for received
1193 * payload needs to be compare with hmac received
1194 */
1195 status = sg_copy_buffer(req->src, sg_nents(req->src),
1196 rctx->fctx.hmac.s.hmac_recv, mac_len,
1197 inputlen, true);
1198 if (status != mac_len) {
1199 status = -EINVAL;
1200 goto error;
1201 }
1202
1203 req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
1204 req_info->out[argcnt].size = mac_len;
1205 argcnt++;
1206 }
1207
1208 req_info->outcnt = argcnt;
1209 return 0;
1210
1211 error_free:
1212 kfree(ptr);
1213 error:
1214 return status;
1215 }
1216
cpt_aead_enc_dec(struct aead_request * req,u8 reg_type,u8 enc)1217 static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
1218 {
1219 struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
1220 struct otx_cpt_req_info *req_info = &rctx->cpt_req;
1221 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1222 struct pci_dev *pdev;
1223 u32 status, cpu_num;
1224
1225 /* Clear control words */
1226 rctx->ctrl_word.flags = 0;
1227 rctx->fctx.enc.enc_ctrl.flags = 0;
1228
1229 req_info->callback = otx_cpt_aead_callback;
1230 req_info->areq = &req->base;
1231 req_info->req_type = reg_type;
1232 req_info->is_enc = enc;
1233 req_info->is_trunc_hmac = false;
1234
1235 switch (reg_type) {
1236 case OTX_CPT_AEAD_ENC_DEC_REQ:
1237 status = create_aead_input_list(req, enc);
1238 if (status)
1239 return status;
1240 status = create_aead_output_list(req, enc,
1241 crypto_aead_authsize(tfm));
1242 if (status)
1243 return status;
1244 break;
1245
1246 case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
1247 status = create_aead_null_input_list(req, enc,
1248 crypto_aead_authsize(tfm));
1249 if (status)
1250 return status;
1251 status = create_aead_null_output_list(req, enc,
1252 crypto_aead_authsize(tfm));
1253 if (status)
1254 return status;
1255 break;
1256
1257 default:
1258 return -EINVAL;
1259 }
1260
1261 /* Validate that request doesn't exceed maximum CPT supported size */
1262 if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
1263 req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
1264 return -E2BIG;
1265
1266 status = get_se_device(&pdev, &cpu_num);
1267 if (status)
1268 return status;
1269
1270 req_info->ctrl.s.grp = 0;
1271
1272 status = otx_cpt_do_request(pdev, req_info, cpu_num);
1273 /*
1274 * We perform an asynchronous send and once
1275 * the request is completed the driver would
1276 * intimate through registered call back functions
1277 */
1278 return status;
1279 }
1280
otx_cpt_aead_encrypt(struct aead_request * req)1281 static int otx_cpt_aead_encrypt(struct aead_request *req)
1282 {
1283 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
1284 }
1285
otx_cpt_aead_decrypt(struct aead_request * req)1286 static int otx_cpt_aead_decrypt(struct aead_request *req)
1287 {
1288 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
1289 }
1290
otx_cpt_aead_null_encrypt(struct aead_request * req)1291 static int otx_cpt_aead_null_encrypt(struct aead_request *req)
1292 {
1293 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
1294 }
1295
otx_cpt_aead_null_decrypt(struct aead_request * req)1296 static int otx_cpt_aead_null_decrypt(struct aead_request *req)
1297 {
1298 return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
1299 }
1300
1301 static struct skcipher_alg otx_cpt_skciphers[] = { {
1302 .base.cra_name = "xts(aes)",
1303 .base.cra_driver_name = "cpt_xts_aes",
1304 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1305 .base.cra_blocksize = AES_BLOCK_SIZE,
1306 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1307 .base.cra_alignmask = 7,
1308 .base.cra_priority = 4001,
1309 .base.cra_module = THIS_MODULE,
1310
1311 .init = otx_cpt_enc_dec_init,
1312 .ivsize = AES_BLOCK_SIZE,
1313 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1314 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1315 .setkey = otx_cpt_skcipher_xts_setkey,
1316 .encrypt = otx_cpt_skcipher_encrypt,
1317 .decrypt = otx_cpt_skcipher_decrypt,
1318 }, {
1319 .base.cra_name = "cbc(aes)",
1320 .base.cra_driver_name = "cpt_cbc_aes",
1321 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1322 .base.cra_blocksize = AES_BLOCK_SIZE,
1323 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1324 .base.cra_alignmask = 7,
1325 .base.cra_priority = 4001,
1326 .base.cra_module = THIS_MODULE,
1327
1328 .init = otx_cpt_enc_dec_init,
1329 .ivsize = AES_BLOCK_SIZE,
1330 .min_keysize = AES_MIN_KEY_SIZE,
1331 .max_keysize = AES_MAX_KEY_SIZE,
1332 .setkey = otx_cpt_skcipher_cbc_aes_setkey,
1333 .encrypt = otx_cpt_skcipher_encrypt,
1334 .decrypt = otx_cpt_skcipher_decrypt,
1335 }, {
1336 .base.cra_name = "ecb(aes)",
1337 .base.cra_driver_name = "cpt_ecb_aes",
1338 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1339 .base.cra_blocksize = AES_BLOCK_SIZE,
1340 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1341 .base.cra_alignmask = 7,
1342 .base.cra_priority = 4001,
1343 .base.cra_module = THIS_MODULE,
1344
1345 .init = otx_cpt_enc_dec_init,
1346 .ivsize = 0,
1347 .min_keysize = AES_MIN_KEY_SIZE,
1348 .max_keysize = AES_MAX_KEY_SIZE,
1349 .setkey = otx_cpt_skcipher_ecb_aes_setkey,
1350 .encrypt = otx_cpt_skcipher_encrypt,
1351 .decrypt = otx_cpt_skcipher_decrypt,
1352 }, {
1353 .base.cra_name = "cfb(aes)",
1354 .base.cra_driver_name = "cpt_cfb_aes",
1355 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1356 .base.cra_blocksize = AES_BLOCK_SIZE,
1357 .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
1358 .base.cra_alignmask = 7,
1359 .base.cra_priority = 4001,
1360 .base.cra_module = THIS_MODULE,
1361
1362 .init = otx_cpt_enc_dec_init,
1363 .ivsize = AES_BLOCK_SIZE,
1364 .min_keysize = AES_MIN_KEY_SIZE,
1365 .max_keysize = AES_MAX_KEY_SIZE,
1366 .setkey = otx_cpt_skcipher_cfb_aes_setkey,
1367 .encrypt = otx_cpt_skcipher_encrypt,
1368 .decrypt = otx_cpt_skcipher_decrypt,
1369 }, {
1370 .base.cra_name = "cbc(des3_ede)",
1371 .base.cra_driver_name = "cpt_cbc_des3_ede",
1372 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1373 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1374 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1375 .base.cra_alignmask = 7,
1376 .base.cra_priority = 4001,
1377 .base.cra_module = THIS_MODULE,
1378
1379 .init = otx_cpt_enc_dec_init,
1380 .min_keysize = DES3_EDE_KEY_SIZE,
1381 .max_keysize = DES3_EDE_KEY_SIZE,
1382 .ivsize = DES_BLOCK_SIZE,
1383 .setkey = otx_cpt_skcipher_cbc_des3_setkey,
1384 .encrypt = otx_cpt_skcipher_encrypt,
1385 .decrypt = otx_cpt_skcipher_decrypt,
1386 }, {
1387 .base.cra_name = "ecb(des3_ede)",
1388 .base.cra_driver_name = "cpt_ecb_des3_ede",
1389 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1390 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1391 .base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
1392 .base.cra_alignmask = 7,
1393 .base.cra_priority = 4001,
1394 .base.cra_module = THIS_MODULE,
1395
1396 .init = otx_cpt_enc_dec_init,
1397 .min_keysize = DES3_EDE_KEY_SIZE,
1398 .max_keysize = DES3_EDE_KEY_SIZE,
1399 .ivsize = 0,
1400 .setkey = otx_cpt_skcipher_ecb_des3_setkey,
1401 .encrypt = otx_cpt_skcipher_encrypt,
1402 .decrypt = otx_cpt_skcipher_decrypt,
1403 } };
1404
1405 static struct aead_alg otx_cpt_aeads[] = { {
1406 .base = {
1407 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1408 .cra_driver_name = "cpt_hmac_sha1_cbc_aes",
1409 .cra_blocksize = AES_BLOCK_SIZE,
1410 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1411 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1412 .cra_priority = 4001,
1413 .cra_alignmask = 0,
1414 .cra_module = THIS_MODULE,
1415 },
1416 .init = otx_cpt_aead_cbc_aes_sha1_init,
1417 .exit = otx_cpt_aead_exit,
1418 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1419 .setauthsize = otx_cpt_aead_set_authsize,
1420 .encrypt = otx_cpt_aead_encrypt,
1421 .decrypt = otx_cpt_aead_decrypt,
1422 .ivsize = AES_BLOCK_SIZE,
1423 .maxauthsize = SHA1_DIGEST_SIZE,
1424 }, {
1425 .base = {
1426 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1427 .cra_driver_name = "cpt_hmac_sha256_cbc_aes",
1428 .cra_blocksize = AES_BLOCK_SIZE,
1429 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1430 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1431 .cra_priority = 4001,
1432 .cra_alignmask = 0,
1433 .cra_module = THIS_MODULE,
1434 },
1435 .init = otx_cpt_aead_cbc_aes_sha256_init,
1436 .exit = otx_cpt_aead_exit,
1437 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1438 .setauthsize = otx_cpt_aead_set_authsize,
1439 .encrypt = otx_cpt_aead_encrypt,
1440 .decrypt = otx_cpt_aead_decrypt,
1441 .ivsize = AES_BLOCK_SIZE,
1442 .maxauthsize = SHA256_DIGEST_SIZE,
1443 }, {
1444 .base = {
1445 .cra_name = "authenc(hmac(sha384),cbc(aes))",
1446 .cra_driver_name = "cpt_hmac_sha384_cbc_aes",
1447 .cra_blocksize = AES_BLOCK_SIZE,
1448 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1449 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1450 .cra_priority = 4001,
1451 .cra_alignmask = 0,
1452 .cra_module = THIS_MODULE,
1453 },
1454 .init = otx_cpt_aead_cbc_aes_sha384_init,
1455 .exit = otx_cpt_aead_exit,
1456 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1457 .setauthsize = otx_cpt_aead_set_authsize,
1458 .encrypt = otx_cpt_aead_encrypt,
1459 .decrypt = otx_cpt_aead_decrypt,
1460 .ivsize = AES_BLOCK_SIZE,
1461 .maxauthsize = SHA384_DIGEST_SIZE,
1462 }, {
1463 .base = {
1464 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1465 .cra_driver_name = "cpt_hmac_sha512_cbc_aes",
1466 .cra_blocksize = AES_BLOCK_SIZE,
1467 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1468 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1469 .cra_priority = 4001,
1470 .cra_alignmask = 0,
1471 .cra_module = THIS_MODULE,
1472 },
1473 .init = otx_cpt_aead_cbc_aes_sha512_init,
1474 .exit = otx_cpt_aead_exit,
1475 .setkey = otx_cpt_aead_cbc_aes_sha_setkey,
1476 .setauthsize = otx_cpt_aead_set_authsize,
1477 .encrypt = otx_cpt_aead_encrypt,
1478 .decrypt = otx_cpt_aead_decrypt,
1479 .ivsize = AES_BLOCK_SIZE,
1480 .maxauthsize = SHA512_DIGEST_SIZE,
1481 }, {
1482 .base = {
1483 .cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
1484 .cra_driver_name = "cpt_hmac_sha1_ecb_null",
1485 .cra_blocksize = 1,
1486 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1487 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1488 .cra_priority = 4001,
1489 .cra_alignmask = 0,
1490 .cra_module = THIS_MODULE,
1491 },
1492 .init = otx_cpt_aead_ecb_null_sha1_init,
1493 .exit = otx_cpt_aead_exit,
1494 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1495 .setauthsize = otx_cpt_aead_set_authsize,
1496 .encrypt = otx_cpt_aead_null_encrypt,
1497 .decrypt = otx_cpt_aead_null_decrypt,
1498 .ivsize = 0,
1499 .maxauthsize = SHA1_DIGEST_SIZE,
1500 }, {
1501 .base = {
1502 .cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
1503 .cra_driver_name = "cpt_hmac_sha256_ecb_null",
1504 .cra_blocksize = 1,
1505 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1506 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1507 .cra_priority = 4001,
1508 .cra_alignmask = 0,
1509 .cra_module = THIS_MODULE,
1510 },
1511 .init = otx_cpt_aead_ecb_null_sha256_init,
1512 .exit = otx_cpt_aead_exit,
1513 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1514 .setauthsize = otx_cpt_aead_set_authsize,
1515 .encrypt = otx_cpt_aead_null_encrypt,
1516 .decrypt = otx_cpt_aead_null_decrypt,
1517 .ivsize = 0,
1518 .maxauthsize = SHA256_DIGEST_SIZE,
1519 }, {
1520 .base = {
1521 .cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
1522 .cra_driver_name = "cpt_hmac_sha384_ecb_null",
1523 .cra_blocksize = 1,
1524 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1525 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1526 .cra_priority = 4001,
1527 .cra_alignmask = 0,
1528 .cra_module = THIS_MODULE,
1529 },
1530 .init = otx_cpt_aead_ecb_null_sha384_init,
1531 .exit = otx_cpt_aead_exit,
1532 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1533 .setauthsize = otx_cpt_aead_set_authsize,
1534 .encrypt = otx_cpt_aead_null_encrypt,
1535 .decrypt = otx_cpt_aead_null_decrypt,
1536 .ivsize = 0,
1537 .maxauthsize = SHA384_DIGEST_SIZE,
1538 }, {
1539 .base = {
1540 .cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
1541 .cra_driver_name = "cpt_hmac_sha512_ecb_null",
1542 .cra_blocksize = 1,
1543 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1544 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1545 .cra_priority = 4001,
1546 .cra_alignmask = 0,
1547 .cra_module = THIS_MODULE,
1548 },
1549 .init = otx_cpt_aead_ecb_null_sha512_init,
1550 .exit = otx_cpt_aead_exit,
1551 .setkey = otx_cpt_aead_ecb_null_sha_setkey,
1552 .setauthsize = otx_cpt_aead_set_authsize,
1553 .encrypt = otx_cpt_aead_null_encrypt,
1554 .decrypt = otx_cpt_aead_null_decrypt,
1555 .ivsize = 0,
1556 .maxauthsize = SHA512_DIGEST_SIZE,
1557 }, {
1558 .base = {
1559 .cra_name = "rfc4106(gcm(aes))",
1560 .cra_driver_name = "cpt_rfc4106_gcm_aes",
1561 .cra_blocksize = 1,
1562 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
1563 .cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
1564 .cra_priority = 4001,
1565 .cra_alignmask = 0,
1566 .cra_module = THIS_MODULE,
1567 },
1568 .init = otx_cpt_aead_gcm_aes_init,
1569 .exit = otx_cpt_aead_exit,
1570 .setkey = otx_cpt_aead_gcm_aes_setkey,
1571 .setauthsize = otx_cpt_aead_set_authsize,
1572 .encrypt = otx_cpt_aead_encrypt,
1573 .decrypt = otx_cpt_aead_decrypt,
1574 .ivsize = AES_GCM_IV_SIZE,
1575 .maxauthsize = AES_GCM_ICV_SIZE,
1576 } };
1577
is_any_alg_used(void)1578 static inline int is_any_alg_used(void)
1579 {
1580 int i;
1581
1582 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1583 if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
1584 return true;
1585 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1586 if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
1587 return true;
1588 return false;
1589 }
1590
cpt_register_algs(void)1591 static inline int cpt_register_algs(void)
1592 {
1593 int i, err = 0;
1594
1595 if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
1596 for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
1597 otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1598
1599 err = crypto_register_skciphers(otx_cpt_skciphers,
1600 ARRAY_SIZE(otx_cpt_skciphers));
1601 if (err)
1602 return err;
1603 }
1604
1605 for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
1606 otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
1607
1608 err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1609 if (err) {
1610 crypto_unregister_skciphers(otx_cpt_skciphers,
1611 ARRAY_SIZE(otx_cpt_skciphers));
1612 return err;
1613 }
1614
1615 return 0;
1616 }
1617
cpt_unregister_algs(void)1618 static inline void cpt_unregister_algs(void)
1619 {
1620 crypto_unregister_skciphers(otx_cpt_skciphers,
1621 ARRAY_SIZE(otx_cpt_skciphers));
1622 crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
1623 }
1624
compare_func(const void * lptr,const void * rptr)1625 static int compare_func(const void *lptr, const void *rptr)
1626 {
1627 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1628 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1629
1630 if (ldesc->dev->devfn < rdesc->dev->devfn)
1631 return -1;
1632 if (ldesc->dev->devfn > rdesc->dev->devfn)
1633 return 1;
1634 return 0;
1635 }
1636
swap_func(void * lptr,void * rptr,int size)1637 static void swap_func(void *lptr, void *rptr, int size)
1638 {
1639 struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
1640 struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
1641 struct cpt_device_desc desc;
1642
1643 desc = *ldesc;
1644 *ldesc = *rdesc;
1645 *rdesc = desc;
1646 }
1647
otx_cpt_crypto_init(struct pci_dev * pdev,struct module * mod,enum otx_cptpf_type pf_type,enum otx_cptvf_type engine_type,int num_queues,int num_devices)1648 int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
1649 enum otx_cptpf_type pf_type,
1650 enum otx_cptvf_type engine_type,
1651 int num_queues, int num_devices)
1652 {
1653 int ret = 0;
1654 int count;
1655
1656 mutex_lock(&mutex);
1657 switch (engine_type) {
1658 case OTX_CPT_SE_TYPES:
1659 count = atomic_read(&se_devices.count);
1660 if (count >= CPT_MAX_VF_NUM) {
1661 dev_err(&pdev->dev, "No space to add a new device\n");
1662 ret = -ENOSPC;
1663 goto err;
1664 }
1665 se_devices.desc[count].pf_type = pf_type;
1666 se_devices.desc[count].num_queues = num_queues;
1667 se_devices.desc[count++].dev = pdev;
1668 atomic_inc(&se_devices.count);
1669
1670 if (atomic_read(&se_devices.count) == num_devices &&
1671 is_crypto_registered == false) {
1672 if (cpt_register_algs()) {
1673 dev_err(&pdev->dev,
1674 "Error in registering crypto algorithms\n");
1675 ret = -EINVAL;
1676 goto err;
1677 }
1678 try_module_get(mod);
1679 is_crypto_registered = true;
1680 }
1681 sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
1682 compare_func, swap_func);
1683 break;
1684
1685 case OTX_CPT_AE_TYPES:
1686 count = atomic_read(&ae_devices.count);
1687 if (count >= CPT_MAX_VF_NUM) {
1688 dev_err(&pdev->dev, "No space to a add new device\n");
1689 ret = -ENOSPC;
1690 goto err;
1691 }
1692 ae_devices.desc[count].pf_type = pf_type;
1693 ae_devices.desc[count].num_queues = num_queues;
1694 ae_devices.desc[count++].dev = pdev;
1695 atomic_inc(&ae_devices.count);
1696 sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
1697 compare_func, swap_func);
1698 break;
1699
1700 default:
1701 dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
1702 ret = BAD_OTX_CPTVF_TYPE;
1703 }
1704 err:
1705 mutex_unlock(&mutex);
1706 return ret;
1707 }
1708
otx_cpt_crypto_exit(struct pci_dev * pdev,struct module * mod,enum otx_cptvf_type engine_type)1709 void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
1710 enum otx_cptvf_type engine_type)
1711 {
1712 struct cpt_device_table *dev_tbl;
1713 bool dev_found = false;
1714 int i, j, count;
1715
1716 mutex_lock(&mutex);
1717
1718 dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
1719 count = atomic_read(&dev_tbl->count);
1720 for (i = 0; i < count; i++)
1721 if (pdev == dev_tbl->desc[i].dev) {
1722 for (j = i; j < count-1; j++)
1723 dev_tbl->desc[j] = dev_tbl->desc[j+1];
1724 dev_found = true;
1725 break;
1726 }
1727
1728 if (!dev_found) {
1729 dev_err(&pdev->dev, "%s device not found\n", __func__);
1730 goto exit;
1731 }
1732
1733 if (engine_type != OTX_CPT_AE_TYPES) {
1734 if (atomic_dec_and_test(&se_devices.count) &&
1735 !is_any_alg_used()) {
1736 cpt_unregister_algs();
1737 module_put(mod);
1738 is_crypto_registered = false;
1739 }
1740 } else
1741 atomic_dec(&ae_devices.count);
1742 exit:
1743 mutex_unlock(&mutex);
1744 }
1745