1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/dh.h>
5 #include <crypto/ecc_curve.h>
6 #include <crypto/ecdh.h>
7 #include <crypto/rng.h>
8 #include <crypto/internal/akcipher.h>
9 #include <crypto/internal/kpp.h>
10 #include <crypto/internal/rsa.h>
11 #include <crypto/kpp.h>
12 #include <crypto/scatterwalk.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fips.h>
15 #include <linux/module.h>
16 #include <linux/time.h>
17 #include "hpre.h"
18
19 struct hpre_ctx;
20
21 #define HPRE_CRYPTO_ALG_PRI 1000
22 #define HPRE_ALIGN_SZ 64
23 #define HPRE_BITS_2_BYTES_SHIFT 3
24 #define HPRE_RSA_512BITS_KSZ 64
25 #define HPRE_RSA_1536BITS_KSZ 192
26 #define HPRE_CRT_PRMS 5
27 #define HPRE_CRT_Q 2
28 #define HPRE_CRT_P 3
29 #define HPRE_CRT_INV 4
30 #define HPRE_DH_G_FLAG 0x02
31 #define HPRE_TRY_SEND_TIMES 100
32 #define HPRE_INVLD_REQ_ID (-1)
33
34 #define HPRE_SQE_ALG_BITS 5
35 #define HPRE_SQE_DONE_SHIFT 30
36 #define HPRE_DH_MAX_P_SZ 512
37
38 #define HPRE_DFX_SEC_TO_US 1000000
39 #define HPRE_DFX_US_TO_NS 1000
40
41 #define HPRE_ENABLE_HPCORE_SHIFT 7
42
43 /* due to nist p521 */
44 #define HPRE_ECC_MAX_KSZ 66
45
46 /* size in bytes of the n prime */
47 #define HPRE_ECC_NIST_P192_N_SIZE 24
48 #define HPRE_ECC_NIST_P256_N_SIZE 32
49 #define HPRE_ECC_NIST_P384_N_SIZE 48
50
51 /* size in bytes */
52 #define HPRE_ECC_HW256_KSZ_B 32
53 #define HPRE_ECC_HW384_KSZ_B 48
54
55 /* capability register mask of driver */
56 #define HPRE_DRV_RSA_MASK_CAP BIT(0)
57 #define HPRE_DRV_DH_MASK_CAP BIT(1)
58 #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
59 #define HPRE_DRV_X25519_MASK_CAP BIT(5)
60
61 static DEFINE_MUTEX(hpre_algs_lock);
62 static unsigned int hpre_available_devs;
63
64 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
65
66 struct hpre_rsa_ctx {
67 /* low address: e--->n */
68 char *pubkey;
69 dma_addr_t dma_pubkey;
70
71 /* low address: d--->n */
72 char *prikey;
73 dma_addr_t dma_prikey;
74
75 /* low address: dq->dp->q->p->qinv */
76 char *crt_prikey;
77 dma_addr_t dma_crt_prikey;
78
79 struct crypto_akcipher *soft_tfm;
80 };
81
82 struct hpre_dh_ctx {
83 /*
84 * If base is g we compute the public key
85 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
86 * else if base if the counterpart public key we
87 * compute the shared secret
88 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
89 * low address: d--->n, please refer to Hisilicon HPRE UM
90 */
91 char *xa_p;
92 dma_addr_t dma_xa_p;
93
94 char *g; /* m */
95 dma_addr_t dma_g;
96 struct crypto_kpp *soft_tfm;
97 };
98
99 struct hpre_ecdh_ctx {
100 /* low address: p->a->k->b */
101 unsigned char *p;
102 dma_addr_t dma_p;
103
104 /* low address: x->y */
105 unsigned char *g;
106 dma_addr_t dma_g;
107 struct crypto_kpp *soft_tfm;
108 };
109
110 struct hpre_ctx {
111 struct hisi_qp *qp;
112 struct device *dev;
113 struct hpre *hpre;
114 unsigned int key_sz;
115 bool crt_g2_mode;
116 union {
117 struct hpre_rsa_ctx rsa;
118 struct hpre_dh_ctx dh;
119 struct hpre_ecdh_ctx ecdh;
120 };
121 /* for ecc algorithms */
122 unsigned int curve_id;
123 /* for high performance core */
124 u8 enable_hpcore;
125 bool fallback;
126 };
127
128 struct hpre_asym_request {
129 char *src;
130 char *dst;
131 struct hpre_sqe req;
132 struct hpre_ctx *ctx;
133 union {
134 struct akcipher_request *rsa;
135 struct kpp_request *dh;
136 struct kpp_request *ecdh;
137 } areq;
138 int err;
139 hpre_cb cb;
140 struct timespec64 req_time;
141 };
142
hpre_align_sz(void)143 static inline unsigned int hpre_align_sz(void)
144 {
145 return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
146 }
147
hpre_align_pd(void)148 static inline unsigned int hpre_align_pd(void)
149 {
150 return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
151 }
152
hpre_dfx_add_req_time(struct hpre_asym_request * hpre_req)153 static void hpre_dfx_add_req_time(struct hpre_asym_request *hpre_req)
154 {
155 struct hpre_ctx *ctx = hpre_req->ctx;
156 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
157
158 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
159 ktime_get_ts64(&hpre_req->req_time);
160 }
161
hpre_get_data_dma_addr(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)162 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
163 struct scatterlist *data, unsigned int len,
164 int is_src, dma_addr_t *tmp)
165 {
166 struct device *dev = hpre_req->ctx->dev;
167 enum dma_data_direction dma_dir;
168
169 if (is_src) {
170 hpre_req->src = NULL;
171 dma_dir = DMA_TO_DEVICE;
172 } else {
173 hpre_req->dst = NULL;
174 dma_dir = DMA_FROM_DEVICE;
175 }
176 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
177 if (unlikely(dma_mapping_error(dev, *tmp))) {
178 dev_err(dev, "dma map data err!\n");
179 return -ENOMEM;
180 }
181
182 return 0;
183 }
184
hpre_prepare_dma_buf(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)185 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
186 struct scatterlist *data, unsigned int len,
187 int is_src, dma_addr_t *tmp)
188 {
189 struct hpre_ctx *ctx = hpre_req->ctx;
190 struct device *dev = ctx->dev;
191 void *ptr;
192 int shift;
193
194 shift = ctx->key_sz - len;
195 if (unlikely(shift < 0))
196 return -EINVAL;
197
198 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
199 if (unlikely(!ptr))
200 return -ENOMEM;
201
202 if (is_src) {
203 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
204 hpre_req->src = ptr;
205 } else {
206 hpre_req->dst = ptr;
207 }
208
209 return 0;
210 }
211
hpre_hw_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,int is_dh)212 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
213 struct scatterlist *data, unsigned int len,
214 int is_src, int is_dh)
215 {
216 struct hpre_sqe *msg = &hpre_req->req;
217 struct hpre_ctx *ctx = hpre_req->ctx;
218 dma_addr_t tmp = 0;
219 int ret;
220
221 /* when the data is dh's source, we should format it */
222 if ((sg_is_last(data) && len == ctx->key_sz) &&
223 ((is_dh && !is_src) || !is_dh))
224 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
225 else
226 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
227
228 if (unlikely(ret))
229 return ret;
230
231 if (is_src)
232 msg->in = cpu_to_le64(tmp);
233 else
234 msg->out = cpu_to_le64(tmp);
235
236 return 0;
237 }
238
hpre_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)239 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
240 struct hpre_asym_request *req,
241 struct scatterlist *dst,
242 struct scatterlist *src)
243 {
244 struct device *dev = ctx->dev;
245 struct hpre_sqe *sqe = &req->req;
246 dma_addr_t tmp;
247
248 tmp = le64_to_cpu(sqe->in);
249 if (unlikely(dma_mapping_error(dev, tmp)))
250 return;
251
252 if (src) {
253 if (req->src)
254 dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
255 else
256 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
257 }
258
259 tmp = le64_to_cpu(sqe->out);
260 if (unlikely(dma_mapping_error(dev, tmp)))
261 return;
262
263 if (req->dst) {
264 if (dst)
265 scatterwalk_map_and_copy(req->dst, dst, 0,
266 ctx->key_sz, 1);
267 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
268 } else {
269 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
270 }
271 }
272
hpre_alg_res_post_hf(struct hpre_ctx * ctx,struct hpre_sqe * sqe,void ** kreq)273 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
274 void **kreq)
275 {
276 unsigned int err, done, alg;
277
278 #define HPRE_NO_HW_ERR 0
279 #define HPRE_HW_TASK_DONE 3
280 #define HREE_HW_ERR_MASK GENMASK(10, 0)
281 #define HREE_SQE_DONE_MASK GENMASK(1, 0)
282 #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
283 *kreq = (void *)le64_to_cpu(sqe->tag);
284
285 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
286 HREE_HW_ERR_MASK;
287 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
288 HREE_SQE_DONE_MASK;
289 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
290 return 0;
291
292 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
293 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
294 alg, done, err);
295
296 return -EINVAL;
297 }
298
hpre_ctx_clear(struct hpre_ctx * ctx,bool is_clear_all)299 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
300 {
301 if (is_clear_all)
302 hisi_qm_free_qps(&ctx->qp, 1);
303
304 ctx->crt_g2_mode = false;
305 ctx->key_sz = 0;
306 }
307
hpre_is_bd_timeout(struct hpre_asym_request * req,u64 overtime_thrhld)308 static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
309 u64 overtime_thrhld)
310 {
311 struct timespec64 reply_time;
312 u64 time_use_us;
313
314 ktime_get_ts64(&reply_time);
315 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
316 HPRE_DFX_SEC_TO_US +
317 (reply_time.tv_nsec - req->req_time.tv_nsec) /
318 HPRE_DFX_US_TO_NS;
319
320 if (time_use_us <= overtime_thrhld)
321 return false;
322
323 return true;
324 }
325
hpre_dh_cb(struct hpre_ctx * ctx,void * resp)326 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
327 {
328 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
329 struct hpre_asym_request *req;
330 struct kpp_request *areq;
331 u64 overtime_thrhld;
332 int ret;
333
334 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
335 areq = req->areq.dh;
336 areq->dst_len = ctx->key_sz;
337
338 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
339 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
340 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
341
342 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
343 kpp_request_complete(areq, ret);
344 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
345 }
346
hpre_rsa_cb(struct hpre_ctx * ctx,void * resp)347 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
348 {
349 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
350 struct hpre_asym_request *req;
351 struct akcipher_request *areq;
352 u64 overtime_thrhld;
353 int ret;
354
355 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
356
357 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
358 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
359 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
360
361 areq = req->areq.rsa;
362 areq->dst_len = ctx->key_sz;
363 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
364 akcipher_request_complete(areq, ret);
365 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
366 }
367
hpre_alg_cb(struct hisi_qp * qp,void * resp)368 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
369 {
370 struct hpre_asym_request *h_req;
371 struct hpre_sqe *sqe = resp;
372
373 h_req = (struct hpre_asym_request *)le64_to_cpu(sqe->tag);
374 if (unlikely(!h_req)) {
375 pr_err("Failed to get request, and qp_id is %u\n", qp->qp_id);
376 return;
377 }
378
379 h_req->cb(h_req->ctx, resp);
380 }
381
hpre_ctx_init(struct hpre_ctx * ctx,u8 type)382 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
383 {
384 struct hisi_qp *qp;
385 struct hpre *hpre;
386
387 qp = hpre_create_qp(type);
388 if (!qp) {
389 ctx->qp = NULL;
390 return -ENODEV;
391 }
392
393 qp->req_cb = hpre_alg_cb;
394 ctx->qp = qp;
395 ctx->dev = &qp->qm->pdev->dev;
396 hpre = container_of(ctx->qp->qm, struct hpre, qm);
397 ctx->hpre = hpre;
398 ctx->key_sz = 0;
399 ctx->crt_g2_mode = false;
400
401 return 0;
402 }
403
hpre_msg_request_set(struct hpre_ctx * ctx,void * req,bool is_rsa)404 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
405 {
406 struct hpre_asym_request *h_req;
407 struct hpre_sqe *msg;
408 void *tmp;
409
410 if (is_rsa) {
411 struct akcipher_request *akreq = req;
412
413 if (akreq->dst_len < ctx->key_sz) {
414 akreq->dst_len = ctx->key_sz;
415 return -EOVERFLOW;
416 }
417
418 tmp = akcipher_request_ctx(akreq);
419 h_req = PTR_ALIGN(tmp, hpre_align_sz());
420 h_req->cb = hpre_rsa_cb;
421 h_req->areq.rsa = akreq;
422 msg = &h_req->req;
423 memset(msg, 0, sizeof(*msg));
424 } else {
425 struct kpp_request *kreq = req;
426
427 if (kreq->dst_len < ctx->key_sz) {
428 kreq->dst_len = ctx->key_sz;
429 return -EOVERFLOW;
430 }
431
432 tmp = kpp_request_ctx(kreq);
433 h_req = PTR_ALIGN(tmp, hpre_align_sz());
434 h_req->cb = hpre_dh_cb;
435 h_req->areq.dh = kreq;
436 msg = &h_req->req;
437 memset(msg, 0, sizeof(*msg));
438 msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
439 }
440
441 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
442 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
443 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
444 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
445 h_req->ctx = ctx;
446
447 hpre_dfx_add_req_time(h_req);
448 msg->tag = cpu_to_le64((uintptr_t)h_req);
449
450 return 0;
451 }
452
hpre_send(struct hpre_ctx * ctx,struct hpre_sqe * msg)453 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
454 {
455 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
456 int ctr = 0;
457 int ret;
458
459 do {
460 atomic64_inc(&dfx[HPRE_SEND_CNT].value);
461 ret = hisi_qp_send(ctx->qp, msg);
462 if (ret != -EBUSY)
463 break;
464 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
465 } while (ctr++ < HPRE_TRY_SEND_TIMES);
466
467 if (likely(!ret))
468 return ret;
469
470 if (ret != -EBUSY)
471 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
472
473 return ret;
474 }
475
hpre_dh_compute_value(struct kpp_request * req)476 static int hpre_dh_compute_value(struct kpp_request *req)
477 {
478 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
479 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
480 void *tmp = kpp_request_ctx(req);
481 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
482 struct hpre_sqe *msg = &hpre_req->req;
483 int ret;
484
485 ret = hpre_msg_request_set(ctx, req, false);
486 if (unlikely(ret))
487 return ret;
488
489 if (req->src) {
490 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
491 if (unlikely(ret))
492 goto clear_all;
493 } else {
494 msg->in = cpu_to_le64(ctx->dh.dma_g);
495 }
496
497 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
498 if (unlikely(ret))
499 goto clear_all;
500
501 if (ctx->crt_g2_mode && !req->src)
502 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
503 else
504 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
505
506 /* success */
507 ret = hpre_send(ctx, msg);
508 if (likely(!ret))
509 return -EINPROGRESS;
510
511 clear_all:
512 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
513
514 return ret;
515 }
516
hpre_dh_prepare_fb_req(struct kpp_request * req)517 static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req)
518 {
519 struct kpp_request *fb_req = kpp_request_ctx(req);
520 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
521 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
522
523 kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm);
524 kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data);
525 kpp_request_set_input(fb_req, req->src, req->src_len);
526 kpp_request_set_output(fb_req, req->dst, req->dst_len);
527
528 return fb_req;
529 }
530
hpre_dh_generate_public_key(struct kpp_request * req)531 static int hpre_dh_generate_public_key(struct kpp_request *req)
532 {
533 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
534 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
535 struct kpp_request *fb_req;
536
537 if (ctx->fallback) {
538 fb_req = hpre_dh_prepare_fb_req(req);
539 return crypto_kpp_generate_public_key(fb_req);
540 }
541
542 return hpre_dh_compute_value(req);
543 }
544
hpre_dh_compute_shared_secret(struct kpp_request * req)545 static int hpre_dh_compute_shared_secret(struct kpp_request *req)
546 {
547 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
548 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
549 struct kpp_request *fb_req;
550
551 if (ctx->fallback) {
552 fb_req = hpre_dh_prepare_fb_req(req);
553 return crypto_kpp_compute_shared_secret(fb_req);
554 }
555
556 return hpre_dh_compute_value(req);
557 }
558
hpre_is_dh_params_length_valid(unsigned int key_sz)559 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
560 {
561 #define _HPRE_DH_GRP1 768
562 #define _HPRE_DH_GRP2 1024
563 #define _HPRE_DH_GRP5 1536
564 #define _HPRE_DH_GRP14 2048
565 #define _HPRE_DH_GRP15 3072
566 #define _HPRE_DH_GRP16 4096
567 switch (key_sz) {
568 case _HPRE_DH_GRP1:
569 case _HPRE_DH_GRP2:
570 case _HPRE_DH_GRP5:
571 case _HPRE_DH_GRP14:
572 case _HPRE_DH_GRP15:
573 case _HPRE_DH_GRP16:
574 return 0;
575 default:
576 return -EINVAL;
577 }
578 }
579
hpre_dh_set_params(struct hpre_ctx * ctx,struct dh * params)580 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
581 {
582 struct device *dev = ctx->dev;
583 unsigned int sz;
584
585 sz = ctx->key_sz = params->p_size;
586 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
587 &ctx->dh.dma_xa_p, GFP_KERNEL);
588 if (!ctx->dh.xa_p)
589 return -ENOMEM;
590
591 memcpy(ctx->dh.xa_p + sz, params->p, sz);
592
593 /* If g equals 2 don't copy it */
594 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
595 ctx->crt_g2_mode = true;
596 return 0;
597 }
598
599 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
600 if (!ctx->dh.g) {
601 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
602 ctx->dh.dma_xa_p);
603 ctx->dh.xa_p = NULL;
604 return -ENOMEM;
605 }
606
607 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
608
609 return 0;
610 }
611
hpre_dh_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)612 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
613 {
614 struct device *dev = ctx->dev;
615 unsigned int sz = ctx->key_sz;
616
617 if (!ctx->qp)
618 return;
619
620 if (ctx->dh.g) {
621 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
622 ctx->dh.g = NULL;
623 }
624
625 if (ctx->dh.xa_p) {
626 memzero_explicit(ctx->dh.xa_p, sz);
627 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
628 ctx->dh.dma_xa_p);
629 ctx->dh.xa_p = NULL;
630 }
631
632 hpre_ctx_clear(ctx, is_clear_all);
633 }
634
hpre_dh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)635 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
636 unsigned int len)
637 {
638 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
639 struct dh params;
640 int ret;
641
642 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
643 return -EINVAL;
644
645 if (!ctx->qp)
646 goto set_soft_secret;
647
648 if (hpre_is_dh_params_length_valid(params.p_size <<
649 HPRE_BITS_2_BYTES_SHIFT))
650 goto set_soft_secret;
651
652 /* Free old secret if any */
653 hpre_dh_clear_ctx(ctx, false);
654
655 ret = hpre_dh_set_params(ctx, ¶ms);
656 if (ret < 0)
657 goto err_clear_ctx;
658
659 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
660 params.key_size);
661
662 ctx->fallback = false;
663 return 0;
664
665 err_clear_ctx:
666 hpre_dh_clear_ctx(ctx, false);
667 return ret;
668 set_soft_secret:
669 ctx->fallback = true;
670 return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len);
671 }
672
hpre_dh_max_size(struct crypto_kpp * tfm)673 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
674 {
675 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
676
677 if (ctx->fallback)
678 return crypto_kpp_maxsize(ctx->dh.soft_tfm);
679
680 return ctx->key_sz;
681 }
682
hpre_dh_init_tfm(struct crypto_kpp * tfm)683 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
684 {
685 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
686 const char *alg = kpp_alg_name(tfm);
687 unsigned int reqsize;
688 int ret;
689
690 ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
691 if (IS_ERR(ctx->dh.soft_tfm)) {
692 pr_err("Failed to alloc dh tfm!\n");
693 return PTR_ERR(ctx->dh.soft_tfm);
694 }
695
696 crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm));
697
698 reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(),
699 sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm));
700 kpp_set_reqsize(tfm, reqsize);
701
702 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
703 if (ret && ret != -ENODEV) {
704 crypto_free_kpp(ctx->dh.soft_tfm);
705 return ret;
706 } else if (ret == -ENODEV) {
707 ctx->fallback = true;
708 }
709
710 return 0;
711 }
712
hpre_dh_exit_tfm(struct crypto_kpp * tfm)713 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
714 {
715 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
716
717 hpre_dh_clear_ctx(ctx, true);
718 crypto_free_kpp(ctx->dh.soft_tfm);
719 }
720
hpre_rsa_drop_leading_zeros(const char ** ptr,size_t * len)721 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
722 {
723 while (!**ptr && *len) {
724 (*ptr)++;
725 (*len)--;
726 }
727 }
728
hpre_rsa_key_size_is_support(unsigned int len)729 static bool hpre_rsa_key_size_is_support(unsigned int len)
730 {
731 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
732
733 #define _RSA_1024BITS_KEY_WDTH 1024
734 #define _RSA_2048BITS_KEY_WDTH 2048
735 #define _RSA_3072BITS_KEY_WDTH 3072
736 #define _RSA_4096BITS_KEY_WDTH 4096
737
738 switch (bits) {
739 case _RSA_1024BITS_KEY_WDTH:
740 case _RSA_2048BITS_KEY_WDTH:
741 case _RSA_3072BITS_KEY_WDTH:
742 case _RSA_4096BITS_KEY_WDTH:
743 return true;
744 default:
745 return false;
746 }
747 }
748
hpre_rsa_enc(struct akcipher_request * req)749 static int hpre_rsa_enc(struct akcipher_request *req)
750 {
751 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
752 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
753 void *tmp = akcipher_request_ctx(req);
754 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
755 struct hpre_sqe *msg = &hpre_req->req;
756 int ret;
757
758 /* For unsupported key size and unavailable devices, use soft tfm instead */
759 if (ctx->fallback) {
760 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
761 ret = crypto_akcipher_encrypt(req);
762 akcipher_request_set_tfm(req, tfm);
763 return ret;
764 }
765
766 if (unlikely(!ctx->rsa.pubkey))
767 return -EINVAL;
768
769 ret = hpre_msg_request_set(ctx, req, true);
770 if (unlikely(ret))
771 return ret;
772
773 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
774 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
775
776 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
777 if (unlikely(ret))
778 goto clear_all;
779
780 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
781 if (unlikely(ret))
782 goto clear_all;
783
784 /* success */
785 ret = hpre_send(ctx, msg);
786 if (likely(!ret))
787 return -EINPROGRESS;
788
789 clear_all:
790 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
791
792 return ret;
793 }
794
hpre_rsa_dec(struct akcipher_request * req)795 static int hpre_rsa_dec(struct akcipher_request *req)
796 {
797 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
798 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
799 void *tmp = akcipher_request_ctx(req);
800 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
801 struct hpre_sqe *msg = &hpre_req->req;
802 int ret;
803
804 /* For unsupported key size and unavailable devices, use soft tfm instead */
805 if (ctx->fallback) {
806 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
807 ret = crypto_akcipher_decrypt(req);
808 akcipher_request_set_tfm(req, tfm);
809 return ret;
810 }
811
812 if (unlikely(!ctx->rsa.prikey))
813 return -EINVAL;
814
815 ret = hpre_msg_request_set(ctx, req, true);
816 if (unlikely(ret))
817 return ret;
818
819 if (ctx->crt_g2_mode) {
820 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
821 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
822 HPRE_ALG_NC_CRT);
823 } else {
824 msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
825 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
826 HPRE_ALG_NC_NCRT);
827 }
828
829 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
830 if (unlikely(ret))
831 goto clear_all;
832
833 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
834 if (unlikely(ret))
835 goto clear_all;
836
837 /* success */
838 ret = hpre_send(ctx, msg);
839 if (likely(!ret))
840 return -EINPROGRESS;
841
842 clear_all:
843 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
844
845 return ret;
846 }
847
hpre_rsa_set_n(struct hpre_ctx * ctx,const char * value,size_t vlen,bool private)848 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
849 size_t vlen, bool private)
850 {
851 const char *ptr = value;
852
853 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
854
855 ctx->key_sz = vlen;
856
857 /* if invalid key size provided, we use software tfm */
858 if (!hpre_rsa_key_size_is_support(ctx->key_sz)) {
859 ctx->fallback = true;
860 return 0;
861 }
862
863 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
864 &ctx->rsa.dma_pubkey,
865 GFP_KERNEL);
866 if (!ctx->rsa.pubkey)
867 return -ENOMEM;
868
869 if (private) {
870 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
871 &ctx->rsa.dma_prikey,
872 GFP_KERNEL);
873 if (!ctx->rsa.prikey) {
874 dma_free_coherent(ctx->dev, vlen << 1,
875 ctx->rsa.pubkey,
876 ctx->rsa.dma_pubkey);
877 ctx->rsa.pubkey = NULL;
878 return -ENOMEM;
879 }
880 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
881 }
882 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
883
884 /* Using hardware HPRE to do RSA */
885 return 1;
886 }
887
hpre_rsa_set_e(struct hpre_ctx * ctx,const char * value,size_t vlen)888 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
889 size_t vlen)
890 {
891 const char *ptr = value;
892
893 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
894
895 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
896 return -EINVAL;
897
898 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
899
900 return 0;
901 }
902
hpre_rsa_set_d(struct hpre_ctx * ctx,const char * value,size_t vlen)903 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
904 size_t vlen)
905 {
906 const char *ptr = value;
907
908 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
909
910 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
911 return -EINVAL;
912
913 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
914
915 return 0;
916 }
917
hpre_crt_para_get(char * para,size_t para_sz,const char * raw,size_t raw_sz)918 static int hpre_crt_para_get(char *para, size_t para_sz,
919 const char *raw, size_t raw_sz)
920 {
921 const char *ptr = raw;
922 size_t len = raw_sz;
923
924 hpre_rsa_drop_leading_zeros(&ptr, &len);
925 if (!len || len > para_sz)
926 return -EINVAL;
927
928 memcpy(para + para_sz - len, ptr, len);
929
930 return 0;
931 }
932
hpre_rsa_setkey_crt(struct hpre_ctx * ctx,struct rsa_key * rsa_key)933 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
934 {
935 unsigned int hlf_ksz = ctx->key_sz >> 1;
936 struct device *dev = ctx->dev;
937 u64 offset;
938 int ret;
939
940 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
941 &ctx->rsa.dma_crt_prikey,
942 GFP_KERNEL);
943 if (!ctx->rsa.crt_prikey)
944 return -ENOMEM;
945
946 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
947 rsa_key->dq, rsa_key->dq_sz);
948 if (ret)
949 goto free_key;
950
951 offset = hlf_ksz;
952 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
953 rsa_key->dp, rsa_key->dp_sz);
954 if (ret)
955 goto free_key;
956
957 offset = hlf_ksz * HPRE_CRT_Q;
958 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
959 rsa_key->q, rsa_key->q_sz);
960 if (ret)
961 goto free_key;
962
963 offset = hlf_ksz * HPRE_CRT_P;
964 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
965 rsa_key->p, rsa_key->p_sz);
966 if (ret)
967 goto free_key;
968
969 offset = hlf_ksz * HPRE_CRT_INV;
970 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
971 rsa_key->qinv, rsa_key->qinv_sz);
972 if (ret)
973 goto free_key;
974
975 ctx->crt_g2_mode = true;
976
977 return 0;
978
979 free_key:
980 offset = hlf_ksz * HPRE_CRT_PRMS;
981 memzero_explicit(ctx->rsa.crt_prikey, offset);
982 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
983 ctx->rsa.dma_crt_prikey);
984 ctx->rsa.crt_prikey = NULL;
985 ctx->crt_g2_mode = false;
986
987 return ret;
988 }
989
990 /* If it is clear all, all the resources of the QP will be cleaned. */
hpre_rsa_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)991 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
992 {
993 unsigned int half_key_sz = ctx->key_sz >> 1;
994 struct device *dev = ctx->dev;
995
996 if (!ctx->qp)
997 return;
998
999 if (ctx->rsa.pubkey) {
1000 dma_free_coherent(dev, ctx->key_sz << 1,
1001 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1002 ctx->rsa.pubkey = NULL;
1003 }
1004
1005 if (ctx->rsa.crt_prikey) {
1006 memzero_explicit(ctx->rsa.crt_prikey,
1007 half_key_sz * HPRE_CRT_PRMS);
1008 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1009 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1010 ctx->rsa.crt_prikey = NULL;
1011 }
1012
1013 if (ctx->rsa.prikey) {
1014 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1015 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1016 ctx->rsa.dma_prikey);
1017 ctx->rsa.prikey = NULL;
1018 }
1019
1020 hpre_ctx_clear(ctx, is_clear_all);
1021 }
1022
1023 /*
1024 * we should judge if it is CRT or not,
1025 * CRT: return true, N-CRT: return false .
1026 */
hpre_is_crt_key(struct rsa_key * key)1027 static bool hpre_is_crt_key(struct rsa_key *key)
1028 {
1029 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1030 key->qinv_sz;
1031
1032 #define LEN_OF_NCRT_PARA 5
1033
1034 /* N-CRT less than 5 parameters */
1035 return len > LEN_OF_NCRT_PARA;
1036 }
1037
hpre_rsa_setkey(struct hpre_ctx * ctx,const void * key,unsigned int keylen,bool private)1038 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1039 unsigned int keylen, bool private)
1040 {
1041 struct rsa_key rsa_key;
1042 int ret;
1043
1044 hpre_rsa_clear_ctx(ctx, false);
1045
1046 if (private)
1047 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1048 else
1049 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1050 if (ret < 0)
1051 return ret;
1052
1053 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1054 if (ret <= 0)
1055 return ret;
1056
1057 if (private) {
1058 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1059 if (ret < 0)
1060 goto free;
1061
1062 if (hpre_is_crt_key(&rsa_key)) {
1063 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1064 if (ret < 0)
1065 goto free;
1066 }
1067 }
1068
1069 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1070 if (ret < 0)
1071 goto free;
1072
1073 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1074 ret = -EINVAL;
1075 goto free;
1076 }
1077
1078 ctx->fallback = false;
1079 return 0;
1080
1081 free:
1082 hpre_rsa_clear_ctx(ctx, false);
1083 return ret;
1084 }
1085
hpre_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1086 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1087 unsigned int keylen)
1088 {
1089 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1090 int ret;
1091
1092 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1093 if (ret)
1094 return ret;
1095
1096 if (!ctx->qp)
1097 return 0;
1098
1099 return hpre_rsa_setkey(ctx, key, keylen, false);
1100 }
1101
hpre_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1102 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1103 unsigned int keylen)
1104 {
1105 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1106 int ret;
1107
1108 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1109 if (ret)
1110 return ret;
1111
1112 if (!ctx->qp)
1113 return 0;
1114
1115 return hpre_rsa_setkey(ctx, key, keylen, true);
1116 }
1117
hpre_rsa_max_size(struct crypto_akcipher * tfm)1118 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1119 {
1120 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1121
1122 /* For unsupported key size and unavailable devices, use soft tfm instead */
1123 if (ctx->fallback)
1124 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1125
1126 return ctx->key_sz;
1127 }
1128
hpre_rsa_init_tfm(struct crypto_akcipher * tfm)1129 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1130 {
1131 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1132 int ret;
1133
1134 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1135 if (IS_ERR(ctx->rsa.soft_tfm)) {
1136 pr_err("Can not alloc_akcipher!\n");
1137 return PTR_ERR(ctx->rsa.soft_tfm);
1138 }
1139
1140 akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
1141 hpre_align_pd());
1142
1143 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1144 if (ret && ret != -ENODEV) {
1145 crypto_free_akcipher(ctx->rsa.soft_tfm);
1146 return ret;
1147 } else if (ret == -ENODEV) {
1148 ctx->fallback = true;
1149 }
1150
1151 return 0;
1152 }
1153
hpre_rsa_exit_tfm(struct crypto_akcipher * tfm)1154 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1155 {
1156 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1157
1158 hpre_rsa_clear_ctx(ctx, true);
1159 crypto_free_akcipher(ctx->rsa.soft_tfm);
1160 }
1161
hpre_key_to_big_end(u8 * data,int len)1162 static void hpre_key_to_big_end(u8 *data, int len)
1163 {
1164 int i, j;
1165
1166 for (i = 0; i < len / 2; i++) {
1167 j = len - i - 1;
1168 swap(data[j], data[i]);
1169 }
1170 }
1171
hpre_ecc_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)1172 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1173 {
1174 struct device *dev = ctx->dev;
1175 unsigned int sz = ctx->key_sz;
1176 unsigned int shift = sz << 1;
1177
1178 if (ctx->ecdh.p) {
1179 /* ecdh: p->a->k->b */
1180 memzero_explicit(ctx->ecdh.p + shift, sz);
1181 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1182 ctx->ecdh.p = NULL;
1183 }
1184
1185 hpre_ctx_clear(ctx, is_clear_all);
1186 }
1187
1188 /*
1189 * The bits of 192/224/256/384/521 are supported by HPRE,
1190 * and convert the bits like:
1191 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1192 * If the parameter bit width is insufficient, then we fill in the
1193 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1194 */
hpre_ecdh_supported_curve(unsigned short id)1195 static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1196 {
1197 switch (id) {
1198 case ECC_CURVE_NIST_P192:
1199 case ECC_CURVE_NIST_P256:
1200 return HPRE_ECC_HW256_KSZ_B;
1201 case ECC_CURVE_NIST_P384:
1202 return HPRE_ECC_HW384_KSZ_B;
1203 default:
1204 break;
1205 }
1206
1207 return 0;
1208 }
1209
fill_curve_param(void * addr,u64 * param,unsigned int cur_sz,u8 ndigits)1210 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1211 {
1212 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1213 u8 i = 0;
1214
1215 while (i < ndigits - 1) {
1216 memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
1217 i++;
1218 }
1219
1220 memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
1221 hpre_key_to_big_end((u8 *)addr, cur_sz);
1222 }
1223
hpre_ecdh_fill_curve(struct hpre_ctx * ctx,struct ecdh * params,unsigned int cur_sz)1224 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1225 unsigned int cur_sz)
1226 {
1227 unsigned int shifta = ctx->key_sz << 1;
1228 unsigned int shiftb = ctx->key_sz << 2;
1229 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1230 void *a = ctx->ecdh.p + shifta - cur_sz;
1231 void *b = ctx->ecdh.p + shiftb - cur_sz;
1232 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1233 void *y = ctx->ecdh.g + shifta - cur_sz;
1234 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1235 char *n;
1236
1237 if (unlikely(!curve))
1238 return -EINVAL;
1239
1240 n = kzalloc(ctx->key_sz, GFP_KERNEL);
1241 if (!n)
1242 return -ENOMEM;
1243
1244 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1245 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1246 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1247 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1248 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1249 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1250
1251 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1252 kfree(n);
1253 return -EINVAL;
1254 }
1255
1256 kfree(n);
1257 return 0;
1258 }
1259
hpre_ecdh_get_curvesz(unsigned short id)1260 static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1261 {
1262 switch (id) {
1263 case ECC_CURVE_NIST_P192:
1264 return HPRE_ECC_NIST_P192_N_SIZE;
1265 case ECC_CURVE_NIST_P256:
1266 return HPRE_ECC_NIST_P256_N_SIZE;
1267 case ECC_CURVE_NIST_P384:
1268 return HPRE_ECC_NIST_P384_N_SIZE;
1269 default:
1270 break;
1271 }
1272
1273 return 0;
1274 }
1275
hpre_ecdh_set_param(struct hpre_ctx * ctx,struct ecdh * params)1276 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1277 {
1278 struct device *dev = ctx->dev;
1279 unsigned int sz, shift, curve_sz;
1280 int ret;
1281
1282 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1283 if (!ctx->key_sz)
1284 return -EINVAL;
1285
1286 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1287 if (!curve_sz || params->key_size > curve_sz)
1288 return -EINVAL;
1289
1290 sz = ctx->key_sz;
1291
1292 if (!ctx->ecdh.p) {
1293 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1294 GFP_KERNEL);
1295 if (!ctx->ecdh.p)
1296 return -ENOMEM;
1297 }
1298
1299 shift = sz << 2;
1300 ctx->ecdh.g = ctx->ecdh.p + shift;
1301 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1302
1303 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1304 if (ret) {
1305 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1306 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1307 ctx->ecdh.p = NULL;
1308 return ret;
1309 }
1310
1311 return 0;
1312 }
1313
hpre_key_is_zero(const char * key,unsigned short key_sz)1314 static bool hpre_key_is_zero(const char *key, unsigned short key_sz)
1315 {
1316 int i;
1317
1318 for (i = 0; i < key_sz; i++)
1319 if (key[i])
1320 return false;
1321
1322 return true;
1323 }
1324
ecdh_gen_privkey(struct hpre_ctx * ctx,struct ecdh * params)1325 static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1326 {
1327 struct device *dev = ctx->dev;
1328 int ret;
1329
1330 ret = crypto_get_default_rng();
1331 if (ret) {
1332 dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1333 return ret;
1334 }
1335
1336 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1337 params->key_size);
1338 crypto_put_default_rng();
1339 if (ret)
1340 dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1341
1342 return ret;
1343 }
1344
hpre_ecdh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1345 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1346 unsigned int len)
1347 {
1348 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1349 unsigned int sz, sz_shift, curve_sz;
1350 struct device *dev = ctx->dev;
1351 char key[HPRE_ECC_MAX_KSZ];
1352 struct ecdh params;
1353 int ret;
1354
1355 if (ctx->fallback)
1356 return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len);
1357
1358 if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
1359 dev_err(dev, "failed to decode ecdh key!\n");
1360 return -EINVAL;
1361 }
1362
1363 /* Use stdrng to generate private key */
1364 if (!params.key || !params.key_size) {
1365 params.key = key;
1366 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1367 if (!curve_sz) {
1368 dev_err(dev, "Invalid curve size!\n");
1369 return -EINVAL;
1370 }
1371
1372 params.key_size = curve_sz - 1;
1373 ret = ecdh_gen_privkey(ctx, ¶ms);
1374 if (ret)
1375 return ret;
1376 }
1377
1378 if (hpre_key_is_zero(params.key, params.key_size)) {
1379 dev_err(dev, "Invalid hpre key!\n");
1380 return -EINVAL;
1381 }
1382
1383 hpre_ecc_clear_ctx(ctx, false);
1384
1385 ret = hpre_ecdh_set_param(ctx, ¶ms);
1386 if (ret < 0) {
1387 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1388 return ret;
1389 }
1390
1391 sz = ctx->key_sz;
1392 sz_shift = (sz << 1) + sz - params.key_size;
1393 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1394
1395 return 0;
1396 }
1397
hpre_ecdh_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1398 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1399 struct hpre_asym_request *req,
1400 struct scatterlist *dst,
1401 struct scatterlist *src)
1402 {
1403 struct device *dev = ctx->dev;
1404 struct hpre_sqe *sqe = &req->req;
1405 dma_addr_t dma;
1406
1407 dma = le64_to_cpu(sqe->in);
1408 if (unlikely(dma_mapping_error(dev, dma)))
1409 return;
1410
1411 if (src && req->src)
1412 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1413
1414 dma = le64_to_cpu(sqe->out);
1415 if (unlikely(dma_mapping_error(dev, dma)))
1416 return;
1417
1418 if (req->dst)
1419 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1420 if (dst)
1421 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1422 }
1423
hpre_ecdh_cb(struct hpre_ctx * ctx,void * resp)1424 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1425 {
1426 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1427 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1428 struct hpre_asym_request *req = NULL;
1429 struct kpp_request *areq;
1430 u64 overtime_thrhld;
1431 char *p;
1432 int ret;
1433
1434 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1435 areq = req->areq.ecdh;
1436 areq->dst_len = ctx->key_sz << 1;
1437
1438 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1439 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1440 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1441
1442 /* Do unmap before data processing */
1443 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1444
1445 p = sg_virt(areq->dst);
1446 memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1447 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1448
1449 kpp_request_complete(areq, ret);
1450
1451 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1452 }
1453
hpre_ecdh_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1454 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1455 struct kpp_request *req)
1456 {
1457 struct hpre_asym_request *h_req;
1458 struct hpre_sqe *msg;
1459 void *tmp;
1460
1461 if (req->dst_len < ctx->key_sz << 1) {
1462 req->dst_len = ctx->key_sz << 1;
1463 return -EINVAL;
1464 }
1465
1466 tmp = kpp_request_ctx(req);
1467 h_req = PTR_ALIGN(tmp, hpre_align_sz());
1468 h_req->cb = hpre_ecdh_cb;
1469 h_req->areq.ecdh = req;
1470 msg = &h_req->req;
1471 memset(msg, 0, sizeof(*msg));
1472 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1473 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1474 msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1475
1476 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1477 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1478 h_req->ctx = ctx;
1479
1480 hpre_dfx_add_req_time(h_req);
1481 msg->tag = cpu_to_le64((uintptr_t)h_req);
1482 return 0;
1483 }
1484
hpre_ecdh_src_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1485 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1486 struct scatterlist *data, unsigned int len)
1487 {
1488 struct hpre_sqe *msg = &hpre_req->req;
1489 struct hpre_ctx *ctx = hpre_req->ctx;
1490 struct device *dev = ctx->dev;
1491 unsigned int tmpshift;
1492 dma_addr_t dma = 0;
1493 void *ptr;
1494 int shift;
1495
1496 /* Src_data include gx and gy. */
1497 shift = ctx->key_sz - (len >> 1);
1498 if (unlikely(shift < 0))
1499 return -EINVAL;
1500
1501 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1502 if (unlikely(!ptr))
1503 return -ENOMEM;
1504
1505 tmpshift = ctx->key_sz << 1;
1506 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1507 memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1508 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1509
1510 hpre_req->src = ptr;
1511 msg->in = cpu_to_le64(dma);
1512 return 0;
1513 }
1514
hpre_ecdh_dst_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1515 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1516 struct scatterlist *data, unsigned int len)
1517 {
1518 struct hpre_sqe *msg = &hpre_req->req;
1519 struct hpre_ctx *ctx = hpre_req->ctx;
1520 struct device *dev = ctx->dev;
1521 dma_addr_t dma;
1522
1523 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1524 dev_err(dev, "data or data length is illegal!\n");
1525 return -EINVAL;
1526 }
1527
1528 hpre_req->dst = NULL;
1529 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1530 if (unlikely(dma_mapping_error(dev, dma))) {
1531 dev_err(dev, "dma map data err!\n");
1532 return -ENOMEM;
1533 }
1534
1535 msg->out = cpu_to_le64(dma);
1536 return 0;
1537 }
1538
hpre_ecdh_compute_value(struct kpp_request * req)1539 static int hpre_ecdh_compute_value(struct kpp_request *req)
1540 {
1541 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1542 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1543 struct device *dev = ctx->dev;
1544 void *tmp = kpp_request_ctx(req);
1545 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1546 struct hpre_sqe *msg = &hpre_req->req;
1547 int ret;
1548
1549 ret = hpre_ecdh_msg_request_set(ctx, req);
1550 if (unlikely(ret)) {
1551 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1552 return ret;
1553 }
1554
1555 if (req->src) {
1556 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1557 if (unlikely(ret)) {
1558 dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1559 goto clear_all;
1560 }
1561 } else {
1562 msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1563 }
1564
1565 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1566 if (unlikely(ret)) {
1567 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1568 goto clear_all;
1569 }
1570
1571 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1572 msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;
1573
1574 ret = hpre_send(ctx, msg);
1575 if (likely(!ret))
1576 return -EINPROGRESS;
1577
1578 clear_all:
1579 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1580 return ret;
1581 }
1582
hpre_ecdh_generate_public_key(struct kpp_request * req)1583 static int hpre_ecdh_generate_public_key(struct kpp_request *req)
1584 {
1585 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1586 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1587 int ret;
1588
1589 if (ctx->fallback) {
1590 kpp_request_set_tfm(req, ctx->ecdh.soft_tfm);
1591 ret = crypto_kpp_generate_public_key(req);
1592 kpp_request_set_tfm(req, tfm);
1593 return ret;
1594 }
1595
1596 return hpre_ecdh_compute_value(req);
1597 }
1598
hpre_ecdh_compute_shared_secret(struct kpp_request * req)1599 static int hpre_ecdh_compute_shared_secret(struct kpp_request *req)
1600 {
1601 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1602 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1603 int ret;
1604
1605 if (ctx->fallback) {
1606 kpp_request_set_tfm(req, ctx->ecdh.soft_tfm);
1607 ret = crypto_kpp_compute_shared_secret(req);
1608 kpp_request_set_tfm(req, tfm);
1609 return ret;
1610 }
1611
1612 return hpre_ecdh_compute_value(req);
1613 }
1614
hpre_ecdh_max_size(struct crypto_kpp * tfm)1615 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1616 {
1617 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1618
1619 if (ctx->fallback)
1620 return crypto_kpp_maxsize(ctx->ecdh.soft_tfm);
1621
1622 /* max size is the pub_key_size, include x and y */
1623 return ctx->key_sz << 1;
1624 }
1625
hpre_ecdh_init_tfm(struct crypto_kpp * tfm)1626 static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm)
1627 {
1628 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1629 const char *alg = kpp_alg_name(tfm);
1630 int ret;
1631
1632 ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1633 if (!ret) {
1634 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1635 return 0;
1636 } else if (ret && ret != -ENODEV) {
1637 return ret;
1638 }
1639
1640 ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
1641 if (IS_ERR(ctx->ecdh.soft_tfm)) {
1642 pr_err("Failed to alloc %s tfm!\n", alg);
1643 return PTR_ERR(ctx->ecdh.soft_tfm);
1644 }
1645
1646 crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm));
1647 ctx->fallback = true;
1648
1649 return 0;
1650 }
1651
hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp * tfm)1652 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1653 {
1654 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1655
1656 ctx->curve_id = ECC_CURVE_NIST_P192;
1657
1658 return hpre_ecdh_init_tfm(tfm);
1659 }
1660
hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp * tfm)1661 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1662 {
1663 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1664
1665 ctx->curve_id = ECC_CURVE_NIST_P256;
1666 ctx->enable_hpcore = 1;
1667
1668 return hpre_ecdh_init_tfm(tfm);
1669 }
1670
hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp * tfm)1671 static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1672 {
1673 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1674
1675 ctx->curve_id = ECC_CURVE_NIST_P384;
1676
1677 return hpre_ecdh_init_tfm(tfm);
1678 }
1679
hpre_ecdh_exit_tfm(struct crypto_kpp * tfm)1680 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1681 {
1682 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1683
1684 if (ctx->fallback) {
1685 crypto_free_kpp(ctx->ecdh.soft_tfm);
1686 return;
1687 }
1688
1689 hpre_ecc_clear_ctx(ctx, true);
1690 }
1691
1692 static struct akcipher_alg rsa = {
1693 .encrypt = hpre_rsa_enc,
1694 .decrypt = hpre_rsa_dec,
1695 .set_pub_key = hpre_rsa_setpubkey,
1696 .set_priv_key = hpre_rsa_setprivkey,
1697 .max_size = hpre_rsa_max_size,
1698 .init = hpre_rsa_init_tfm,
1699 .exit = hpre_rsa_exit_tfm,
1700 .base = {
1701 .cra_ctxsize = sizeof(struct hpre_ctx),
1702 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1703 .cra_name = "rsa",
1704 .cra_driver_name = "hpre-rsa",
1705 .cra_module = THIS_MODULE,
1706 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1707 },
1708 };
1709
1710 static struct kpp_alg dh = {
1711 .set_secret = hpre_dh_set_secret,
1712 .generate_public_key = hpre_dh_generate_public_key,
1713 .compute_shared_secret = hpre_dh_compute_shared_secret,
1714 .max_size = hpre_dh_max_size,
1715 .init = hpre_dh_init_tfm,
1716 .exit = hpre_dh_exit_tfm,
1717 .base = {
1718 .cra_ctxsize = sizeof(struct hpre_ctx),
1719 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1720 .cra_name = "dh",
1721 .cra_driver_name = "hpre-dh",
1722 .cra_module = THIS_MODULE,
1723 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1724 },
1725 };
1726
1727 static struct kpp_alg ecdh_curves[] = {
1728 {
1729 .set_secret = hpre_ecdh_set_secret,
1730 .generate_public_key = hpre_ecdh_generate_public_key,
1731 .compute_shared_secret = hpre_ecdh_compute_shared_secret,
1732 .max_size = hpre_ecdh_max_size,
1733 .init = hpre_ecdh_nist_p192_init_tfm,
1734 .exit = hpre_ecdh_exit_tfm,
1735 .base = {
1736 .cra_ctxsize = sizeof(struct hpre_ctx),
1737 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1738 .cra_name = "ecdh-nist-p192",
1739 .cra_driver_name = "hpre-ecdh-nist-p192",
1740 .cra_module = THIS_MODULE,
1741 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1742 },
1743 }, {
1744 .set_secret = hpre_ecdh_set_secret,
1745 .generate_public_key = hpre_ecdh_generate_public_key,
1746 .compute_shared_secret = hpre_ecdh_compute_shared_secret,
1747 .max_size = hpre_ecdh_max_size,
1748 .init = hpre_ecdh_nist_p256_init_tfm,
1749 .exit = hpre_ecdh_exit_tfm,
1750 .base = {
1751 .cra_ctxsize = sizeof(struct hpre_ctx),
1752 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1753 .cra_name = "ecdh-nist-p256",
1754 .cra_driver_name = "hpre-ecdh-nist-p256",
1755 .cra_module = THIS_MODULE,
1756 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1757 },
1758 }, {
1759 .set_secret = hpre_ecdh_set_secret,
1760 .generate_public_key = hpre_ecdh_generate_public_key,
1761 .compute_shared_secret = hpre_ecdh_compute_shared_secret,
1762 .max_size = hpre_ecdh_max_size,
1763 .init = hpre_ecdh_nist_p384_init_tfm,
1764 .exit = hpre_ecdh_exit_tfm,
1765 .base = {
1766 .cra_ctxsize = sizeof(struct hpre_ctx),
1767 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1768 .cra_name = "ecdh-nist-p384",
1769 .cra_driver_name = "hpre-ecdh-nist-p384",
1770 .cra_module = THIS_MODULE,
1771 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1772 },
1773 }
1774 };
1775
hpre_register_rsa(struct hisi_qm * qm)1776 static int hpre_register_rsa(struct hisi_qm *qm)
1777 {
1778 int ret;
1779
1780 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
1781 return 0;
1782
1783 rsa.base.cra_flags = 0;
1784 ret = crypto_register_akcipher(&rsa);
1785 if (ret)
1786 dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
1787
1788 return ret;
1789 }
1790
hpre_unregister_rsa(struct hisi_qm * qm)1791 static void hpre_unregister_rsa(struct hisi_qm *qm)
1792 {
1793 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
1794 return;
1795
1796 crypto_unregister_akcipher(&rsa);
1797 }
1798
hpre_register_dh(struct hisi_qm * qm)1799 static int hpre_register_dh(struct hisi_qm *qm)
1800 {
1801 int ret;
1802
1803 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
1804 return 0;
1805
1806 ret = crypto_register_kpp(&dh);
1807 if (ret)
1808 dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
1809
1810 return ret;
1811 }
1812
hpre_unregister_dh(struct hisi_qm * qm)1813 static void hpre_unregister_dh(struct hisi_qm *qm)
1814 {
1815 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
1816 return;
1817
1818 crypto_unregister_kpp(&dh);
1819 }
1820
hpre_register_ecdh(struct hisi_qm * qm)1821 static int hpre_register_ecdh(struct hisi_qm *qm)
1822 {
1823 int ret, i;
1824
1825 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
1826 return 0;
1827
1828 for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
1829 ret = crypto_register_kpp(&ecdh_curves[i]);
1830 if (ret) {
1831 dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
1832 ecdh_curves[i].base.cra_name, ret);
1833 goto unreg_kpp;
1834 }
1835 }
1836
1837 return 0;
1838
1839 unreg_kpp:
1840 for (--i; i >= 0; --i)
1841 crypto_unregister_kpp(&ecdh_curves[i]);
1842
1843 return ret;
1844 }
1845
hpre_unregister_ecdh(struct hisi_qm * qm)1846 static void hpre_unregister_ecdh(struct hisi_qm *qm)
1847 {
1848 int i;
1849
1850 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
1851 return;
1852
1853 for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
1854 crypto_unregister_kpp(&ecdh_curves[i]);
1855 }
1856
hpre_algs_register(struct hisi_qm * qm)1857 int hpre_algs_register(struct hisi_qm *qm)
1858 {
1859 int ret = 0;
1860
1861 mutex_lock(&hpre_algs_lock);
1862 if (hpre_available_devs) {
1863 hpre_available_devs++;
1864 goto unlock;
1865 }
1866
1867 ret = hpre_register_rsa(qm);
1868 if (ret)
1869 goto unlock;
1870
1871 ret = hpre_register_dh(qm);
1872 if (ret)
1873 goto unreg_rsa;
1874
1875 ret = hpre_register_ecdh(qm);
1876 if (ret)
1877 goto unreg_dh;
1878
1879 hpre_available_devs++;
1880 mutex_unlock(&hpre_algs_lock);
1881
1882 return ret;
1883
1884 unreg_dh:
1885 hpre_unregister_dh(qm);
1886 unreg_rsa:
1887 hpre_unregister_rsa(qm);
1888 unlock:
1889 mutex_unlock(&hpre_algs_lock);
1890 return ret;
1891 }
1892
hpre_algs_unregister(struct hisi_qm * qm)1893 void hpre_algs_unregister(struct hisi_qm *qm)
1894 {
1895 mutex_lock(&hpre_algs_lock);
1896 if (--hpre_available_devs)
1897 goto unlock;
1898
1899 hpre_unregister_ecdh(qm);
1900 hpre_unregister_dh(qm);
1901 hpre_unregister_rsa(qm);
1902
1903 unlock:
1904 mutex_unlock(&hpre_algs_lock);
1905 }
1906