1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/curve25519.h>
5 #include <crypto/dh.h>
6 #include <crypto/ecc_curve.h>
7 #include <crypto/ecdh.h>
8 #include <crypto/rng.h>
9 #include <crypto/internal/akcipher.h>
10 #include <crypto/internal/kpp.h>
11 #include <crypto/internal/rsa.h>
12 #include <crypto/kpp.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/fips.h>
16 #include <linux/module.h>
17 #include <linux/time.h>
18 #include "hpre.h"
19
20 struct hpre_ctx;
21
22 #define HPRE_CRYPTO_ALG_PRI 1000
23 #define HPRE_ALIGN_SZ 64
24 #define HPRE_BITS_2_BYTES_SHIFT 3
25 #define HPRE_RSA_512BITS_KSZ 64
26 #define HPRE_RSA_1536BITS_KSZ 192
27 #define HPRE_CRT_PRMS 5
28 #define HPRE_CRT_Q 2
29 #define HPRE_CRT_P 3
30 #define HPRE_CRT_INV 4
31 #define HPRE_DH_G_FLAG 0x02
32 #define HPRE_TRY_SEND_TIMES 100
33 #define HPRE_INVLD_REQ_ID (-1)
34
35 #define HPRE_SQE_ALG_BITS 5
36 #define HPRE_SQE_DONE_SHIFT 30
37 #define HPRE_DH_MAX_P_SZ 512
38
39 #define HPRE_DFX_SEC_TO_US 1000000
40 #define HPRE_DFX_US_TO_NS 1000
41
42 #define HPRE_ENABLE_HPCORE_SHIFT 7
43
44 /* due to nist p521 */
45 #define HPRE_ECC_MAX_KSZ 66
46
47 /* size in bytes of the n prime */
48 #define HPRE_ECC_NIST_P192_N_SIZE 24
49 #define HPRE_ECC_NIST_P256_N_SIZE 32
50 #define HPRE_ECC_NIST_P384_N_SIZE 48
51
52 /* size in bytes */
53 #define HPRE_ECC_HW256_KSZ_B 32
54 #define HPRE_ECC_HW384_KSZ_B 48
55
56 /* capability register mask of driver */
57 #define HPRE_DRV_RSA_MASK_CAP BIT(0)
58 #define HPRE_DRV_DH_MASK_CAP BIT(1)
59 #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
60 #define HPRE_DRV_X25519_MASK_CAP BIT(5)
61
62 static DEFINE_MUTEX(hpre_algs_lock);
63 static unsigned int hpre_available_devs;
64
65 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
66
67 struct hpre_rsa_ctx {
68 /* low address: e--->n */
69 char *pubkey;
70 dma_addr_t dma_pubkey;
71
72 /* low address: d--->n */
73 char *prikey;
74 dma_addr_t dma_prikey;
75
76 /* low address: dq->dp->q->p->qinv */
77 char *crt_prikey;
78 dma_addr_t dma_crt_prikey;
79
80 struct crypto_akcipher *soft_tfm;
81 };
82
83 struct hpre_dh_ctx {
84 /*
85 * If base is g we compute the public key
86 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
87 * else if base if the counterpart public key we
88 * compute the shared secret
89 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
90 * low address: d--->n, please refer to Hisilicon HPRE UM
91 */
92 char *xa_p;
93 dma_addr_t dma_xa_p;
94
95 char *g; /* m */
96 dma_addr_t dma_g;
97 };
98
99 struct hpre_ecdh_ctx {
100 /* low address: p->a->k->b */
101 unsigned char *p;
102 dma_addr_t dma_p;
103
104 /* low address: x->y */
105 unsigned char *g;
106 dma_addr_t dma_g;
107 };
108
109 struct hpre_curve25519_ctx {
110 /* low address: p->a->k */
111 unsigned char *p;
112 dma_addr_t dma_p;
113
114 /* gx coordinate */
115 unsigned char *g;
116 dma_addr_t dma_g;
117 };
118
119 struct hpre_ctx {
120 struct hisi_qp *qp;
121 struct device *dev;
122 struct hpre_asym_request **req_list;
123 struct hpre *hpre;
124 spinlock_t req_lock;
125 unsigned int key_sz;
126 bool crt_g2_mode;
127 struct idr req_idr;
128 union {
129 struct hpre_rsa_ctx rsa;
130 struct hpre_dh_ctx dh;
131 struct hpre_ecdh_ctx ecdh;
132 struct hpre_curve25519_ctx curve25519;
133 };
134 /* for ecc algorithms */
135 unsigned int curve_id;
136 /* for high performance core */
137 u8 enable_hpcore;
138 };
139
140 struct hpre_asym_request {
141 char *src;
142 char *dst;
143 struct hpre_sqe req;
144 struct hpre_ctx *ctx;
145 union {
146 struct akcipher_request *rsa;
147 struct kpp_request *dh;
148 struct kpp_request *ecdh;
149 struct kpp_request *curve25519;
150 } areq;
151 int err;
152 int req_id;
153 hpre_cb cb;
154 struct timespec64 req_time;
155 };
156
hpre_align_sz(void)157 static inline unsigned int hpre_align_sz(void)
158 {
159 return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
160 }
161
hpre_align_pd(void)162 static inline unsigned int hpre_align_pd(void)
163 {
164 return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
165 }
166
hpre_alloc_req_id(struct hpre_ctx * ctx)167 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
168 {
169 unsigned long flags;
170 int id;
171
172 spin_lock_irqsave(&ctx->req_lock, flags);
173 id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
174 spin_unlock_irqrestore(&ctx->req_lock, flags);
175
176 return id;
177 }
178
hpre_free_req_id(struct hpre_ctx * ctx,int req_id)179 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
180 {
181 unsigned long flags;
182
183 spin_lock_irqsave(&ctx->req_lock, flags);
184 idr_remove(&ctx->req_idr, req_id);
185 spin_unlock_irqrestore(&ctx->req_lock, flags);
186 }
187
hpre_add_req_to_ctx(struct hpre_asym_request * hpre_req)188 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
189 {
190 struct hpre_ctx *ctx;
191 struct hpre_dfx *dfx;
192 int id;
193
194 ctx = hpre_req->ctx;
195 id = hpre_alloc_req_id(ctx);
196 if (unlikely(id < 0))
197 return -EINVAL;
198
199 ctx->req_list[id] = hpre_req;
200 hpre_req->req_id = id;
201
202 dfx = ctx->hpre->debug.dfx;
203 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
204 ktime_get_ts64(&hpre_req->req_time);
205
206 return id;
207 }
208
hpre_rm_req_from_ctx(struct hpre_asym_request * hpre_req)209 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
210 {
211 struct hpre_ctx *ctx = hpre_req->ctx;
212 int id = hpre_req->req_id;
213
214 if (hpre_req->req_id >= 0) {
215 hpre_req->req_id = HPRE_INVLD_REQ_ID;
216 ctx->req_list[id] = NULL;
217 hpre_free_req_id(ctx, id);
218 }
219 }
220
hpre_get_qp_and_start(u8 type)221 static struct hisi_qp *hpre_get_qp_and_start(u8 type)
222 {
223 struct hisi_qp *qp;
224 int ret;
225
226 qp = hpre_create_qp(type);
227 if (!qp) {
228 pr_err("Can not create hpre qp!\n");
229 return ERR_PTR(-ENODEV);
230 }
231
232 ret = hisi_qm_start_qp(qp, 0);
233 if (ret < 0) {
234 hisi_qm_free_qps(&qp, 1);
235 pci_err(qp->qm->pdev, "Can not start qp!\n");
236 return ERR_PTR(-EINVAL);
237 }
238
239 return qp;
240 }
241
hpre_get_data_dma_addr(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)242 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
243 struct scatterlist *data, unsigned int len,
244 int is_src, dma_addr_t *tmp)
245 {
246 struct device *dev = hpre_req->ctx->dev;
247 enum dma_data_direction dma_dir;
248
249 if (is_src) {
250 hpre_req->src = NULL;
251 dma_dir = DMA_TO_DEVICE;
252 } else {
253 hpre_req->dst = NULL;
254 dma_dir = DMA_FROM_DEVICE;
255 }
256 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
257 if (unlikely(dma_mapping_error(dev, *tmp))) {
258 dev_err(dev, "dma map data err!\n");
259 return -ENOMEM;
260 }
261
262 return 0;
263 }
264
hpre_prepare_dma_buf(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)265 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
266 struct scatterlist *data, unsigned int len,
267 int is_src, dma_addr_t *tmp)
268 {
269 struct hpre_ctx *ctx = hpre_req->ctx;
270 struct device *dev = ctx->dev;
271 void *ptr;
272 int shift;
273
274 shift = ctx->key_sz - len;
275 if (unlikely(shift < 0))
276 return -EINVAL;
277
278 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
279 if (unlikely(!ptr))
280 return -ENOMEM;
281
282 if (is_src) {
283 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
284 hpre_req->src = ptr;
285 } else {
286 hpre_req->dst = ptr;
287 }
288
289 return 0;
290 }
291
hpre_hw_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,int is_dh)292 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
293 struct scatterlist *data, unsigned int len,
294 int is_src, int is_dh)
295 {
296 struct hpre_sqe *msg = &hpre_req->req;
297 struct hpre_ctx *ctx = hpre_req->ctx;
298 dma_addr_t tmp = 0;
299 int ret;
300
301 /* when the data is dh's source, we should format it */
302 if ((sg_is_last(data) && len == ctx->key_sz) &&
303 ((is_dh && !is_src) || !is_dh))
304 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
305 else
306 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
307
308 if (unlikely(ret))
309 return ret;
310
311 if (is_src)
312 msg->in = cpu_to_le64(tmp);
313 else
314 msg->out = cpu_to_le64(tmp);
315
316 return 0;
317 }
318
hpre_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)319 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
320 struct hpre_asym_request *req,
321 struct scatterlist *dst,
322 struct scatterlist *src)
323 {
324 struct device *dev = ctx->dev;
325 struct hpre_sqe *sqe = &req->req;
326 dma_addr_t tmp;
327
328 tmp = le64_to_cpu(sqe->in);
329 if (unlikely(dma_mapping_error(dev, tmp)))
330 return;
331
332 if (src) {
333 if (req->src)
334 dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
335 else
336 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
337 }
338
339 tmp = le64_to_cpu(sqe->out);
340 if (unlikely(dma_mapping_error(dev, tmp)))
341 return;
342
343 if (req->dst) {
344 if (dst)
345 scatterwalk_map_and_copy(req->dst, dst, 0,
346 ctx->key_sz, 1);
347 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
348 } else {
349 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
350 }
351 }
352
hpre_alg_res_post_hf(struct hpre_ctx * ctx,struct hpre_sqe * sqe,void ** kreq)353 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
354 void **kreq)
355 {
356 struct hpre_asym_request *req;
357 unsigned int err, done, alg;
358 int id;
359
360 #define HPRE_NO_HW_ERR 0
361 #define HPRE_HW_TASK_DONE 3
362 #define HREE_HW_ERR_MASK GENMASK(10, 0)
363 #define HREE_SQE_DONE_MASK GENMASK(1, 0)
364 #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
365 id = (int)le16_to_cpu(sqe->tag);
366 req = ctx->req_list[id];
367 hpre_rm_req_from_ctx(req);
368 *kreq = req;
369
370 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
371 HREE_HW_ERR_MASK;
372
373 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
374 HREE_SQE_DONE_MASK;
375
376 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
377 return 0;
378
379 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
380 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
381 alg, done, err);
382
383 return -EINVAL;
384 }
385
hpre_ctx_set(struct hpre_ctx * ctx,struct hisi_qp * qp,int qlen)386 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
387 {
388 struct hpre *hpre;
389
390 if (!ctx || !qp || qlen < 0)
391 return -EINVAL;
392
393 spin_lock_init(&ctx->req_lock);
394 ctx->qp = qp;
395 ctx->dev = &qp->qm->pdev->dev;
396
397 hpre = container_of(ctx->qp->qm, struct hpre, qm);
398 ctx->hpre = hpre;
399 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
400 if (!ctx->req_list)
401 return -ENOMEM;
402 ctx->key_sz = 0;
403 ctx->crt_g2_mode = false;
404 idr_init(&ctx->req_idr);
405
406 return 0;
407 }
408
hpre_ctx_clear(struct hpre_ctx * ctx,bool is_clear_all)409 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
410 {
411 if (is_clear_all) {
412 idr_destroy(&ctx->req_idr);
413 kfree(ctx->req_list);
414 hisi_qm_free_qps(&ctx->qp, 1);
415 }
416
417 ctx->crt_g2_mode = false;
418 ctx->key_sz = 0;
419 }
420
hpre_is_bd_timeout(struct hpre_asym_request * req,u64 overtime_thrhld)421 static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
422 u64 overtime_thrhld)
423 {
424 struct timespec64 reply_time;
425 u64 time_use_us;
426
427 ktime_get_ts64(&reply_time);
428 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
429 HPRE_DFX_SEC_TO_US +
430 (reply_time.tv_nsec - req->req_time.tv_nsec) /
431 HPRE_DFX_US_TO_NS;
432
433 if (time_use_us <= overtime_thrhld)
434 return false;
435
436 return true;
437 }
438
hpre_dh_cb(struct hpre_ctx * ctx,void * resp)439 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
440 {
441 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
442 struct hpre_asym_request *req;
443 struct kpp_request *areq;
444 u64 overtime_thrhld;
445 int ret;
446
447 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
448 areq = req->areq.dh;
449 areq->dst_len = ctx->key_sz;
450
451 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
452 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
453 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
454
455 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
456 kpp_request_complete(areq, ret);
457 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
458 }
459
hpre_rsa_cb(struct hpre_ctx * ctx,void * resp)460 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
461 {
462 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
463 struct hpre_asym_request *req;
464 struct akcipher_request *areq;
465 u64 overtime_thrhld;
466 int ret;
467
468 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
469
470 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
471 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
472 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
473
474 areq = req->areq.rsa;
475 areq->dst_len = ctx->key_sz;
476 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
477 akcipher_request_complete(areq, ret);
478 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
479 }
480
hpre_alg_cb(struct hisi_qp * qp,void * resp)481 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
482 {
483 struct hpre_ctx *ctx = qp->qp_ctx;
484 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
485 struct hpre_sqe *sqe = resp;
486 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
487
488 if (unlikely(!req)) {
489 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
490 return;
491 }
492
493 req->cb(ctx, resp);
494 }
495
hpre_stop_qp_and_put(struct hisi_qp * qp)496 static void hpre_stop_qp_and_put(struct hisi_qp *qp)
497 {
498 hisi_qm_stop_qp(qp);
499 hisi_qm_free_qps(&qp, 1);
500 }
501
hpre_ctx_init(struct hpre_ctx * ctx,u8 type)502 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
503 {
504 struct hisi_qp *qp;
505 int ret;
506
507 qp = hpre_get_qp_and_start(type);
508 if (IS_ERR(qp))
509 return PTR_ERR(qp);
510
511 qp->qp_ctx = ctx;
512 qp->req_cb = hpre_alg_cb;
513
514 ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
515 if (ret)
516 hpre_stop_qp_and_put(qp);
517
518 return ret;
519 }
520
hpre_msg_request_set(struct hpre_ctx * ctx,void * req,bool is_rsa)521 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
522 {
523 struct hpre_asym_request *h_req;
524 struct hpre_sqe *msg;
525 int req_id;
526 void *tmp;
527
528 if (is_rsa) {
529 struct akcipher_request *akreq = req;
530
531 if (akreq->dst_len < ctx->key_sz) {
532 akreq->dst_len = ctx->key_sz;
533 return -EOVERFLOW;
534 }
535
536 tmp = akcipher_request_ctx(akreq);
537 h_req = PTR_ALIGN(tmp, hpre_align_sz());
538 h_req->cb = hpre_rsa_cb;
539 h_req->areq.rsa = akreq;
540 msg = &h_req->req;
541 memset(msg, 0, sizeof(*msg));
542 } else {
543 struct kpp_request *kreq = req;
544
545 if (kreq->dst_len < ctx->key_sz) {
546 kreq->dst_len = ctx->key_sz;
547 return -EOVERFLOW;
548 }
549
550 tmp = kpp_request_ctx(kreq);
551 h_req = PTR_ALIGN(tmp, hpre_align_sz());
552 h_req->cb = hpre_dh_cb;
553 h_req->areq.dh = kreq;
554 msg = &h_req->req;
555 memset(msg, 0, sizeof(*msg));
556 msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
557 }
558
559 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
560 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
561 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
562 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
563 h_req->ctx = ctx;
564
565 req_id = hpre_add_req_to_ctx(h_req);
566 if (req_id < 0)
567 return -EBUSY;
568
569 msg->tag = cpu_to_le16((u16)req_id);
570
571 return 0;
572 }
573
hpre_send(struct hpre_ctx * ctx,struct hpre_sqe * msg)574 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
575 {
576 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
577 int ctr = 0;
578 int ret;
579
580 do {
581 atomic64_inc(&dfx[HPRE_SEND_CNT].value);
582 spin_lock_bh(&ctx->req_lock);
583 ret = hisi_qp_send(ctx->qp, msg);
584 spin_unlock_bh(&ctx->req_lock);
585 if (ret != -EBUSY)
586 break;
587 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
588 } while (ctr++ < HPRE_TRY_SEND_TIMES);
589
590 if (likely(!ret))
591 return ret;
592
593 if (ret != -EBUSY)
594 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
595
596 return ret;
597 }
598
hpre_dh_compute_value(struct kpp_request * req)599 static int hpre_dh_compute_value(struct kpp_request *req)
600 {
601 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
602 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
603 void *tmp = kpp_request_ctx(req);
604 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
605 struct hpre_sqe *msg = &hpre_req->req;
606 int ret;
607
608 ret = hpre_msg_request_set(ctx, req, false);
609 if (unlikely(ret))
610 return ret;
611
612 if (req->src) {
613 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
614 if (unlikely(ret))
615 goto clear_all;
616 } else {
617 msg->in = cpu_to_le64(ctx->dh.dma_g);
618 }
619
620 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
621 if (unlikely(ret))
622 goto clear_all;
623
624 if (ctx->crt_g2_mode && !req->src)
625 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
626 else
627 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
628
629 /* success */
630 ret = hpre_send(ctx, msg);
631 if (likely(!ret))
632 return -EINPROGRESS;
633
634 clear_all:
635 hpre_rm_req_from_ctx(hpre_req);
636 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
637
638 return ret;
639 }
640
hpre_is_dh_params_length_valid(unsigned int key_sz)641 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
642 {
643 #define _HPRE_DH_GRP1 768
644 #define _HPRE_DH_GRP2 1024
645 #define _HPRE_DH_GRP5 1536
646 #define _HPRE_DH_GRP14 2048
647 #define _HPRE_DH_GRP15 3072
648 #define _HPRE_DH_GRP16 4096
649 switch (key_sz) {
650 case _HPRE_DH_GRP1:
651 case _HPRE_DH_GRP2:
652 case _HPRE_DH_GRP5:
653 case _HPRE_DH_GRP14:
654 case _HPRE_DH_GRP15:
655 case _HPRE_DH_GRP16:
656 return 0;
657 default:
658 return -EINVAL;
659 }
660 }
661
hpre_dh_set_params(struct hpre_ctx * ctx,struct dh * params)662 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
663 {
664 struct device *dev = ctx->dev;
665 unsigned int sz;
666
667 if (params->p_size > HPRE_DH_MAX_P_SZ)
668 return -EINVAL;
669
670 if (hpre_is_dh_params_length_valid(params->p_size <<
671 HPRE_BITS_2_BYTES_SHIFT))
672 return -EINVAL;
673
674 sz = ctx->key_sz = params->p_size;
675 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
676 &ctx->dh.dma_xa_p, GFP_KERNEL);
677 if (!ctx->dh.xa_p)
678 return -ENOMEM;
679
680 memcpy(ctx->dh.xa_p + sz, params->p, sz);
681
682 /* If g equals 2 don't copy it */
683 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
684 ctx->crt_g2_mode = true;
685 return 0;
686 }
687
688 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
689 if (!ctx->dh.g) {
690 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
691 ctx->dh.dma_xa_p);
692 ctx->dh.xa_p = NULL;
693 return -ENOMEM;
694 }
695
696 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
697
698 return 0;
699 }
700
hpre_dh_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)701 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
702 {
703 struct device *dev = ctx->dev;
704 unsigned int sz = ctx->key_sz;
705
706 if (is_clear_all)
707 hisi_qm_stop_qp(ctx->qp);
708
709 if (ctx->dh.g) {
710 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
711 ctx->dh.g = NULL;
712 }
713
714 if (ctx->dh.xa_p) {
715 memzero_explicit(ctx->dh.xa_p, sz);
716 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
717 ctx->dh.dma_xa_p);
718 ctx->dh.xa_p = NULL;
719 }
720
721 hpre_ctx_clear(ctx, is_clear_all);
722 }
723
hpre_dh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)724 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
725 unsigned int len)
726 {
727 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
728 struct dh params;
729 int ret;
730
731 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
732 return -EINVAL;
733
734 /* Free old secret if any */
735 hpre_dh_clear_ctx(ctx, false);
736
737 ret = hpre_dh_set_params(ctx, ¶ms);
738 if (ret < 0)
739 goto err_clear_ctx;
740
741 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
742 params.key_size);
743
744 return 0;
745
746 err_clear_ctx:
747 hpre_dh_clear_ctx(ctx, false);
748 return ret;
749 }
750
hpre_dh_max_size(struct crypto_kpp * tfm)751 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
752 {
753 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
754
755 return ctx->key_sz;
756 }
757
hpre_dh_init_tfm(struct crypto_kpp * tfm)758 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
759 {
760 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
761
762 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
763
764 return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
765 }
766
hpre_dh_exit_tfm(struct crypto_kpp * tfm)767 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
768 {
769 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
770
771 hpre_dh_clear_ctx(ctx, true);
772 }
773
hpre_rsa_drop_leading_zeros(const char ** ptr,size_t * len)774 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
775 {
776 while (!**ptr && *len) {
777 (*ptr)++;
778 (*len)--;
779 }
780 }
781
hpre_rsa_key_size_is_support(unsigned int len)782 static bool hpre_rsa_key_size_is_support(unsigned int len)
783 {
784 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
785
786 #define _RSA_1024BITS_KEY_WDTH 1024
787 #define _RSA_2048BITS_KEY_WDTH 2048
788 #define _RSA_3072BITS_KEY_WDTH 3072
789 #define _RSA_4096BITS_KEY_WDTH 4096
790
791 switch (bits) {
792 case _RSA_1024BITS_KEY_WDTH:
793 case _RSA_2048BITS_KEY_WDTH:
794 case _RSA_3072BITS_KEY_WDTH:
795 case _RSA_4096BITS_KEY_WDTH:
796 return true;
797 default:
798 return false;
799 }
800 }
801
hpre_rsa_enc(struct akcipher_request * req)802 static int hpre_rsa_enc(struct akcipher_request *req)
803 {
804 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
805 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
806 void *tmp = akcipher_request_ctx(req);
807 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
808 struct hpre_sqe *msg = &hpre_req->req;
809 int ret;
810
811 /* For 512 and 1536 bits key size, use soft tfm instead */
812 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
813 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
814 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
815 ret = crypto_akcipher_encrypt(req);
816 akcipher_request_set_tfm(req, tfm);
817 return ret;
818 }
819
820 if (unlikely(!ctx->rsa.pubkey))
821 return -EINVAL;
822
823 ret = hpre_msg_request_set(ctx, req, true);
824 if (unlikely(ret))
825 return ret;
826
827 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
828 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
829
830 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
831 if (unlikely(ret))
832 goto clear_all;
833
834 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
835 if (unlikely(ret))
836 goto clear_all;
837
838 /* success */
839 ret = hpre_send(ctx, msg);
840 if (likely(!ret))
841 return -EINPROGRESS;
842
843 clear_all:
844 hpre_rm_req_from_ctx(hpre_req);
845 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
846
847 return ret;
848 }
849
hpre_rsa_dec(struct akcipher_request * req)850 static int hpre_rsa_dec(struct akcipher_request *req)
851 {
852 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
853 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
854 void *tmp = akcipher_request_ctx(req);
855 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
856 struct hpre_sqe *msg = &hpre_req->req;
857 int ret;
858
859 /* For 512 and 1536 bits key size, use soft tfm instead */
860 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
861 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
862 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
863 ret = crypto_akcipher_decrypt(req);
864 akcipher_request_set_tfm(req, tfm);
865 return ret;
866 }
867
868 if (unlikely(!ctx->rsa.prikey))
869 return -EINVAL;
870
871 ret = hpre_msg_request_set(ctx, req, true);
872 if (unlikely(ret))
873 return ret;
874
875 if (ctx->crt_g2_mode) {
876 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
877 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
878 HPRE_ALG_NC_CRT);
879 } else {
880 msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
881 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
882 HPRE_ALG_NC_NCRT);
883 }
884
885 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
886 if (unlikely(ret))
887 goto clear_all;
888
889 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
890 if (unlikely(ret))
891 goto clear_all;
892
893 /* success */
894 ret = hpre_send(ctx, msg);
895 if (likely(!ret))
896 return -EINPROGRESS;
897
898 clear_all:
899 hpre_rm_req_from_ctx(hpre_req);
900 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
901
902 return ret;
903 }
904
hpre_rsa_set_n(struct hpre_ctx * ctx,const char * value,size_t vlen,bool private)905 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
906 size_t vlen, bool private)
907 {
908 const char *ptr = value;
909
910 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
911
912 ctx->key_sz = vlen;
913
914 /* if invalid key size provided, we use software tfm */
915 if (!hpre_rsa_key_size_is_support(ctx->key_sz))
916 return 0;
917
918 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
919 &ctx->rsa.dma_pubkey,
920 GFP_KERNEL);
921 if (!ctx->rsa.pubkey)
922 return -ENOMEM;
923
924 if (private) {
925 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
926 &ctx->rsa.dma_prikey,
927 GFP_KERNEL);
928 if (!ctx->rsa.prikey) {
929 dma_free_coherent(ctx->dev, vlen << 1,
930 ctx->rsa.pubkey,
931 ctx->rsa.dma_pubkey);
932 ctx->rsa.pubkey = NULL;
933 return -ENOMEM;
934 }
935 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
936 }
937 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
938
939 /* Using hardware HPRE to do RSA */
940 return 1;
941 }
942
hpre_rsa_set_e(struct hpre_ctx * ctx,const char * value,size_t vlen)943 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
944 size_t vlen)
945 {
946 const char *ptr = value;
947
948 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
949
950 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
951 return -EINVAL;
952
953 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
954
955 return 0;
956 }
957
hpre_rsa_set_d(struct hpre_ctx * ctx,const char * value,size_t vlen)958 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
959 size_t vlen)
960 {
961 const char *ptr = value;
962
963 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
964
965 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
966 return -EINVAL;
967
968 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
969
970 return 0;
971 }
972
hpre_crt_para_get(char * para,size_t para_sz,const char * raw,size_t raw_sz)973 static int hpre_crt_para_get(char *para, size_t para_sz,
974 const char *raw, size_t raw_sz)
975 {
976 const char *ptr = raw;
977 size_t len = raw_sz;
978
979 hpre_rsa_drop_leading_zeros(&ptr, &len);
980 if (!len || len > para_sz)
981 return -EINVAL;
982
983 memcpy(para + para_sz - len, ptr, len);
984
985 return 0;
986 }
987
hpre_rsa_setkey_crt(struct hpre_ctx * ctx,struct rsa_key * rsa_key)988 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
989 {
990 unsigned int hlf_ksz = ctx->key_sz >> 1;
991 struct device *dev = ctx->dev;
992 u64 offset;
993 int ret;
994
995 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
996 &ctx->rsa.dma_crt_prikey,
997 GFP_KERNEL);
998 if (!ctx->rsa.crt_prikey)
999 return -ENOMEM;
1000
1001 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
1002 rsa_key->dq, rsa_key->dq_sz);
1003 if (ret)
1004 goto free_key;
1005
1006 offset = hlf_ksz;
1007 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1008 rsa_key->dp, rsa_key->dp_sz);
1009 if (ret)
1010 goto free_key;
1011
1012 offset = hlf_ksz * HPRE_CRT_Q;
1013 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1014 rsa_key->q, rsa_key->q_sz);
1015 if (ret)
1016 goto free_key;
1017
1018 offset = hlf_ksz * HPRE_CRT_P;
1019 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1020 rsa_key->p, rsa_key->p_sz);
1021 if (ret)
1022 goto free_key;
1023
1024 offset = hlf_ksz * HPRE_CRT_INV;
1025 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1026 rsa_key->qinv, rsa_key->qinv_sz);
1027 if (ret)
1028 goto free_key;
1029
1030 ctx->crt_g2_mode = true;
1031
1032 return 0;
1033
1034 free_key:
1035 offset = hlf_ksz * HPRE_CRT_PRMS;
1036 memzero_explicit(ctx->rsa.crt_prikey, offset);
1037 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1038 ctx->rsa.dma_crt_prikey);
1039 ctx->rsa.crt_prikey = NULL;
1040 ctx->crt_g2_mode = false;
1041
1042 return ret;
1043 }
1044
1045 /* If it is clear all, all the resources of the QP will be cleaned. */
hpre_rsa_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)1046 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1047 {
1048 unsigned int half_key_sz = ctx->key_sz >> 1;
1049 struct device *dev = ctx->dev;
1050
1051 if (is_clear_all)
1052 hisi_qm_stop_qp(ctx->qp);
1053
1054 if (ctx->rsa.pubkey) {
1055 dma_free_coherent(dev, ctx->key_sz << 1,
1056 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1057 ctx->rsa.pubkey = NULL;
1058 }
1059
1060 if (ctx->rsa.crt_prikey) {
1061 memzero_explicit(ctx->rsa.crt_prikey,
1062 half_key_sz * HPRE_CRT_PRMS);
1063 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1064 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1065 ctx->rsa.crt_prikey = NULL;
1066 }
1067
1068 if (ctx->rsa.prikey) {
1069 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1070 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1071 ctx->rsa.dma_prikey);
1072 ctx->rsa.prikey = NULL;
1073 }
1074
1075 hpre_ctx_clear(ctx, is_clear_all);
1076 }
1077
1078 /*
1079 * we should judge if it is CRT or not,
1080 * CRT: return true, N-CRT: return false .
1081 */
hpre_is_crt_key(struct rsa_key * key)1082 static bool hpre_is_crt_key(struct rsa_key *key)
1083 {
1084 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1085 key->qinv_sz;
1086
1087 #define LEN_OF_NCRT_PARA 5
1088
1089 /* N-CRT less than 5 parameters */
1090 return len > LEN_OF_NCRT_PARA;
1091 }
1092
hpre_rsa_setkey(struct hpre_ctx * ctx,const void * key,unsigned int keylen,bool private)1093 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1094 unsigned int keylen, bool private)
1095 {
1096 struct rsa_key rsa_key;
1097 int ret;
1098
1099 hpre_rsa_clear_ctx(ctx, false);
1100
1101 if (private)
1102 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1103 else
1104 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1105 if (ret < 0)
1106 return ret;
1107
1108 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1109 if (ret <= 0)
1110 return ret;
1111
1112 if (private) {
1113 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1114 if (ret < 0)
1115 goto free;
1116
1117 if (hpre_is_crt_key(&rsa_key)) {
1118 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1119 if (ret < 0)
1120 goto free;
1121 }
1122 }
1123
1124 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1125 if (ret < 0)
1126 goto free;
1127
1128 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1129 ret = -EINVAL;
1130 goto free;
1131 }
1132
1133 return 0;
1134
1135 free:
1136 hpre_rsa_clear_ctx(ctx, false);
1137 return ret;
1138 }
1139
hpre_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1140 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1141 unsigned int keylen)
1142 {
1143 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1144 int ret;
1145
1146 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1147 if (ret)
1148 return ret;
1149
1150 return hpre_rsa_setkey(ctx, key, keylen, false);
1151 }
1152
hpre_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1153 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1154 unsigned int keylen)
1155 {
1156 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1157 int ret;
1158
1159 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1160 if (ret)
1161 return ret;
1162
1163 return hpre_rsa_setkey(ctx, key, keylen, true);
1164 }
1165
hpre_rsa_max_size(struct crypto_akcipher * tfm)1166 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1167 {
1168 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1169
1170 /* For 512 and 1536 bits key size, use soft tfm instead */
1171 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1172 ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1173 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1174
1175 return ctx->key_sz;
1176 }
1177
hpre_rsa_init_tfm(struct crypto_akcipher * tfm)1178 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1179 {
1180 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1181 int ret;
1182
1183 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1184 if (IS_ERR(ctx->rsa.soft_tfm)) {
1185 pr_err("Can not alloc_akcipher!\n");
1186 return PTR_ERR(ctx->rsa.soft_tfm);
1187 }
1188
1189 akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
1190 hpre_align_pd());
1191
1192 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1193 if (ret)
1194 crypto_free_akcipher(ctx->rsa.soft_tfm);
1195
1196 return ret;
1197 }
1198
hpre_rsa_exit_tfm(struct crypto_akcipher * tfm)1199 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1200 {
1201 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1202
1203 hpre_rsa_clear_ctx(ctx, true);
1204 crypto_free_akcipher(ctx->rsa.soft_tfm);
1205 }
1206
hpre_key_to_big_end(u8 * data,int len)1207 static void hpre_key_to_big_end(u8 *data, int len)
1208 {
1209 int i, j;
1210
1211 for (i = 0; i < len / 2; i++) {
1212 j = len - i - 1;
1213 swap(data[j], data[i]);
1214 }
1215 }
1216
hpre_ecc_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all,bool is_ecdh)1217 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1218 bool is_ecdh)
1219 {
1220 struct device *dev = ctx->dev;
1221 unsigned int sz = ctx->key_sz;
1222 unsigned int shift = sz << 1;
1223
1224 if (is_clear_all)
1225 hisi_qm_stop_qp(ctx->qp);
1226
1227 if (is_ecdh && ctx->ecdh.p) {
1228 /* ecdh: p->a->k->b */
1229 memzero_explicit(ctx->ecdh.p + shift, sz);
1230 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1231 ctx->ecdh.p = NULL;
1232 } else if (!is_ecdh && ctx->curve25519.p) {
1233 /* curve25519: p->a->k */
1234 memzero_explicit(ctx->curve25519.p + shift, sz);
1235 dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1236 ctx->curve25519.dma_p);
1237 ctx->curve25519.p = NULL;
1238 }
1239
1240 hpre_ctx_clear(ctx, is_clear_all);
1241 }
1242
1243 /*
1244 * The bits of 192/224/256/384/521 are supported by HPRE,
1245 * and convert the bits like:
1246 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1247 * If the parameter bit width is insufficient, then we fill in the
1248 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1249 */
hpre_ecdh_supported_curve(unsigned short id)1250 static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1251 {
1252 switch (id) {
1253 case ECC_CURVE_NIST_P192:
1254 case ECC_CURVE_NIST_P256:
1255 return HPRE_ECC_HW256_KSZ_B;
1256 case ECC_CURVE_NIST_P384:
1257 return HPRE_ECC_HW384_KSZ_B;
1258 default:
1259 break;
1260 }
1261
1262 return 0;
1263 }
1264
fill_curve_param(void * addr,u64 * param,unsigned int cur_sz,u8 ndigits)1265 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1266 {
1267 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1268 u8 i = 0;
1269
1270 while (i < ndigits - 1) {
1271 memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
1272 i++;
1273 }
1274
1275 memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
1276 hpre_key_to_big_end((u8 *)addr, cur_sz);
1277 }
1278
hpre_ecdh_fill_curve(struct hpre_ctx * ctx,struct ecdh * params,unsigned int cur_sz)1279 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1280 unsigned int cur_sz)
1281 {
1282 unsigned int shifta = ctx->key_sz << 1;
1283 unsigned int shiftb = ctx->key_sz << 2;
1284 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1285 void *a = ctx->ecdh.p + shifta - cur_sz;
1286 void *b = ctx->ecdh.p + shiftb - cur_sz;
1287 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1288 void *y = ctx->ecdh.g + shifta - cur_sz;
1289 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1290 char *n;
1291
1292 if (unlikely(!curve))
1293 return -EINVAL;
1294
1295 n = kzalloc(ctx->key_sz, GFP_KERNEL);
1296 if (!n)
1297 return -ENOMEM;
1298
1299 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1300 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1301 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1302 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1303 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1304 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1305
1306 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1307 kfree(n);
1308 return -EINVAL;
1309 }
1310
1311 kfree(n);
1312 return 0;
1313 }
1314
hpre_ecdh_get_curvesz(unsigned short id)1315 static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1316 {
1317 switch (id) {
1318 case ECC_CURVE_NIST_P192:
1319 return HPRE_ECC_NIST_P192_N_SIZE;
1320 case ECC_CURVE_NIST_P256:
1321 return HPRE_ECC_NIST_P256_N_SIZE;
1322 case ECC_CURVE_NIST_P384:
1323 return HPRE_ECC_NIST_P384_N_SIZE;
1324 default:
1325 break;
1326 }
1327
1328 return 0;
1329 }
1330
hpre_ecdh_set_param(struct hpre_ctx * ctx,struct ecdh * params)1331 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1332 {
1333 struct device *dev = ctx->dev;
1334 unsigned int sz, shift, curve_sz;
1335 int ret;
1336
1337 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1338 if (!ctx->key_sz)
1339 return -EINVAL;
1340
1341 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1342 if (!curve_sz || params->key_size > curve_sz)
1343 return -EINVAL;
1344
1345 sz = ctx->key_sz;
1346
1347 if (!ctx->ecdh.p) {
1348 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1349 GFP_KERNEL);
1350 if (!ctx->ecdh.p)
1351 return -ENOMEM;
1352 }
1353
1354 shift = sz << 2;
1355 ctx->ecdh.g = ctx->ecdh.p + shift;
1356 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1357
1358 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1359 if (ret) {
1360 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1361 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1362 ctx->ecdh.p = NULL;
1363 return ret;
1364 }
1365
1366 return 0;
1367 }
1368
hpre_key_is_zero(char * key,unsigned short key_sz)1369 static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1370 {
1371 int i;
1372
1373 for (i = 0; i < key_sz; i++)
1374 if (key[i])
1375 return false;
1376
1377 return true;
1378 }
1379
ecdh_gen_privkey(struct hpre_ctx * ctx,struct ecdh * params)1380 static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1381 {
1382 struct device *dev = ctx->dev;
1383 int ret;
1384
1385 ret = crypto_get_default_rng();
1386 if (ret) {
1387 dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1388 return ret;
1389 }
1390
1391 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1392 params->key_size);
1393 crypto_put_default_rng();
1394 if (ret)
1395 dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1396
1397 return ret;
1398 }
1399
hpre_ecdh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1400 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1401 unsigned int len)
1402 {
1403 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1404 unsigned int sz, sz_shift, curve_sz;
1405 struct device *dev = ctx->dev;
1406 char key[HPRE_ECC_MAX_KSZ];
1407 struct ecdh params;
1408 int ret;
1409
1410 if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
1411 dev_err(dev, "failed to decode ecdh key!\n");
1412 return -EINVAL;
1413 }
1414
1415 /* Use stdrng to generate private key */
1416 if (!params.key || !params.key_size) {
1417 params.key = key;
1418 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1419 if (!curve_sz) {
1420 dev_err(dev, "Invalid curve size!\n");
1421 return -EINVAL;
1422 }
1423
1424 params.key_size = curve_sz - 1;
1425 ret = ecdh_gen_privkey(ctx, ¶ms);
1426 if (ret)
1427 return ret;
1428 }
1429
1430 if (hpre_key_is_zero(params.key, params.key_size)) {
1431 dev_err(dev, "Invalid hpre key!\n");
1432 return -EINVAL;
1433 }
1434
1435 hpre_ecc_clear_ctx(ctx, false, true);
1436
1437 ret = hpre_ecdh_set_param(ctx, ¶ms);
1438 if (ret < 0) {
1439 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1440 return ret;
1441 }
1442
1443 sz = ctx->key_sz;
1444 sz_shift = (sz << 1) + sz - params.key_size;
1445 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1446
1447 return 0;
1448 }
1449
hpre_ecdh_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1450 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1451 struct hpre_asym_request *req,
1452 struct scatterlist *dst,
1453 struct scatterlist *src)
1454 {
1455 struct device *dev = ctx->dev;
1456 struct hpre_sqe *sqe = &req->req;
1457 dma_addr_t dma;
1458
1459 dma = le64_to_cpu(sqe->in);
1460 if (unlikely(dma_mapping_error(dev, dma)))
1461 return;
1462
1463 if (src && req->src)
1464 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1465
1466 dma = le64_to_cpu(sqe->out);
1467 if (unlikely(dma_mapping_error(dev, dma)))
1468 return;
1469
1470 if (req->dst)
1471 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1472 if (dst)
1473 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1474 }
1475
hpre_ecdh_cb(struct hpre_ctx * ctx,void * resp)1476 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1477 {
1478 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1479 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1480 struct hpre_asym_request *req = NULL;
1481 struct kpp_request *areq;
1482 u64 overtime_thrhld;
1483 char *p;
1484 int ret;
1485
1486 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1487 areq = req->areq.ecdh;
1488 areq->dst_len = ctx->key_sz << 1;
1489
1490 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1491 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1492 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1493
1494 p = sg_virt(areq->dst);
1495 memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1496 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1497
1498 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1499 kpp_request_complete(areq, ret);
1500
1501 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1502 }
1503
hpre_ecdh_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1504 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1505 struct kpp_request *req)
1506 {
1507 struct hpre_asym_request *h_req;
1508 struct hpre_sqe *msg;
1509 int req_id;
1510 void *tmp;
1511
1512 if (req->dst_len < ctx->key_sz << 1) {
1513 req->dst_len = ctx->key_sz << 1;
1514 return -EINVAL;
1515 }
1516
1517 tmp = kpp_request_ctx(req);
1518 h_req = PTR_ALIGN(tmp, hpre_align_sz());
1519 h_req->cb = hpre_ecdh_cb;
1520 h_req->areq.ecdh = req;
1521 msg = &h_req->req;
1522 memset(msg, 0, sizeof(*msg));
1523 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1524 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1525 msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1526
1527 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1528 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1529 h_req->ctx = ctx;
1530
1531 req_id = hpre_add_req_to_ctx(h_req);
1532 if (req_id < 0)
1533 return -EBUSY;
1534
1535 msg->tag = cpu_to_le16((u16)req_id);
1536 return 0;
1537 }
1538
hpre_ecdh_src_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1539 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1540 struct scatterlist *data, unsigned int len)
1541 {
1542 struct hpre_sqe *msg = &hpre_req->req;
1543 struct hpre_ctx *ctx = hpre_req->ctx;
1544 struct device *dev = ctx->dev;
1545 unsigned int tmpshift;
1546 dma_addr_t dma = 0;
1547 void *ptr;
1548 int shift;
1549
1550 /* Src_data include gx and gy. */
1551 shift = ctx->key_sz - (len >> 1);
1552 if (unlikely(shift < 0))
1553 return -EINVAL;
1554
1555 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1556 if (unlikely(!ptr))
1557 return -ENOMEM;
1558
1559 tmpshift = ctx->key_sz << 1;
1560 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1561 memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1562 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1563
1564 hpre_req->src = ptr;
1565 msg->in = cpu_to_le64(dma);
1566 return 0;
1567 }
1568
hpre_ecdh_dst_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1569 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1570 struct scatterlist *data, unsigned int len)
1571 {
1572 struct hpre_sqe *msg = &hpre_req->req;
1573 struct hpre_ctx *ctx = hpre_req->ctx;
1574 struct device *dev = ctx->dev;
1575 dma_addr_t dma;
1576
1577 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1578 dev_err(dev, "data or data length is illegal!\n");
1579 return -EINVAL;
1580 }
1581
1582 hpre_req->dst = NULL;
1583 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1584 if (unlikely(dma_mapping_error(dev, dma))) {
1585 dev_err(dev, "dma map data err!\n");
1586 return -ENOMEM;
1587 }
1588
1589 msg->out = cpu_to_le64(dma);
1590 return 0;
1591 }
1592
hpre_ecdh_compute_value(struct kpp_request * req)1593 static int hpre_ecdh_compute_value(struct kpp_request *req)
1594 {
1595 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1596 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1597 struct device *dev = ctx->dev;
1598 void *tmp = kpp_request_ctx(req);
1599 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1600 struct hpre_sqe *msg = &hpre_req->req;
1601 int ret;
1602
1603 ret = hpre_ecdh_msg_request_set(ctx, req);
1604 if (unlikely(ret)) {
1605 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1606 return ret;
1607 }
1608
1609 if (req->src) {
1610 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1611 if (unlikely(ret)) {
1612 dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1613 goto clear_all;
1614 }
1615 } else {
1616 msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1617 }
1618
1619 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1620 if (unlikely(ret)) {
1621 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1622 goto clear_all;
1623 }
1624
1625 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1626 msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;
1627
1628 ret = hpre_send(ctx, msg);
1629 if (likely(!ret))
1630 return -EINPROGRESS;
1631
1632 clear_all:
1633 hpre_rm_req_from_ctx(hpre_req);
1634 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1635 return ret;
1636 }
1637
hpre_ecdh_max_size(struct crypto_kpp * tfm)1638 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1639 {
1640 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1641
1642 /* max size is the pub_key_size, include x and y */
1643 return ctx->key_sz << 1;
1644 }
1645
hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp * tfm)1646 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1647 {
1648 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1649
1650 ctx->curve_id = ECC_CURVE_NIST_P192;
1651
1652 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1653
1654 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1655 }
1656
hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp * tfm)1657 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1658 {
1659 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1660
1661 ctx->curve_id = ECC_CURVE_NIST_P256;
1662 ctx->enable_hpcore = 1;
1663
1664 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1665
1666 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1667 }
1668
hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp * tfm)1669 static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1670 {
1671 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1672
1673 ctx->curve_id = ECC_CURVE_NIST_P384;
1674
1675 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1676
1677 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1678 }
1679
hpre_ecdh_exit_tfm(struct crypto_kpp * tfm)1680 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1681 {
1682 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1683
1684 hpre_ecc_clear_ctx(ctx, true, true);
1685 }
1686
hpre_curve25519_fill_curve(struct hpre_ctx * ctx,const void * buf,unsigned int len)1687 static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1688 unsigned int len)
1689 {
1690 u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1691 unsigned int sz = ctx->key_sz;
1692 const struct ecc_curve *curve;
1693 unsigned int shift = sz << 1;
1694 void *p;
1695
1696 /*
1697 * The key from 'buf' is in little-endian, we should preprocess it as
1698 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1699 * then convert it to big endian. Only in this way, the result can be
1700 * the same as the software curve-25519 that exists in crypto.
1701 */
1702 memcpy(secret, buf, len);
1703 curve25519_clamp_secret(secret);
1704 hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1705
1706 p = ctx->curve25519.p + sz - len;
1707
1708 curve = ecc_get_curve25519();
1709
1710 /* fill curve parameters */
1711 fill_curve_param(p, curve->p, len, curve->g.ndigits);
1712 fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1713 memcpy(p + shift, secret, len);
1714 fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1715 memzero_explicit(secret, CURVE25519_KEY_SIZE);
1716 }
1717
hpre_curve25519_set_param(struct hpre_ctx * ctx,const void * buf,unsigned int len)1718 static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1719 unsigned int len)
1720 {
1721 struct device *dev = ctx->dev;
1722 unsigned int sz = ctx->key_sz;
1723 unsigned int shift = sz << 1;
1724
1725 /* p->a->k->gx */
1726 if (!ctx->curve25519.p) {
1727 ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1728 &ctx->curve25519.dma_p,
1729 GFP_KERNEL);
1730 if (!ctx->curve25519.p)
1731 return -ENOMEM;
1732 }
1733
1734 ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1735 ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1736
1737 hpre_curve25519_fill_curve(ctx, buf, len);
1738
1739 return 0;
1740 }
1741
hpre_curve25519_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1742 static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1743 unsigned int len)
1744 {
1745 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1746 struct device *dev = ctx->dev;
1747 int ret = -EINVAL;
1748
1749 if (len != CURVE25519_KEY_SIZE ||
1750 !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1751 dev_err(dev, "key is null or key len is not 32bytes!\n");
1752 return ret;
1753 }
1754
1755 /* Free old secret if any */
1756 hpre_ecc_clear_ctx(ctx, false, false);
1757
1758 ctx->key_sz = CURVE25519_KEY_SIZE;
1759 ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1760 if (ret) {
1761 dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1762 hpre_ecc_clear_ctx(ctx, false, false);
1763 return ret;
1764 }
1765
1766 return 0;
1767 }
1768
hpre_curve25519_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1769 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1770 struct hpre_asym_request *req,
1771 struct scatterlist *dst,
1772 struct scatterlist *src)
1773 {
1774 struct device *dev = ctx->dev;
1775 struct hpre_sqe *sqe = &req->req;
1776 dma_addr_t dma;
1777
1778 dma = le64_to_cpu(sqe->in);
1779 if (unlikely(dma_mapping_error(dev, dma)))
1780 return;
1781
1782 if (src && req->src)
1783 dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1784
1785 dma = le64_to_cpu(sqe->out);
1786 if (unlikely(dma_mapping_error(dev, dma)))
1787 return;
1788
1789 if (req->dst)
1790 dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1791 if (dst)
1792 dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1793 }
1794
hpre_curve25519_cb(struct hpre_ctx * ctx,void * resp)1795 static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1796 {
1797 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1798 struct hpre_asym_request *req = NULL;
1799 struct kpp_request *areq;
1800 u64 overtime_thrhld;
1801 int ret;
1802
1803 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1804 areq = req->areq.curve25519;
1805 areq->dst_len = ctx->key_sz;
1806
1807 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1808 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1809 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1810
1811 hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1812
1813 hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1814 kpp_request_complete(areq, ret);
1815
1816 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1817 }
1818
hpre_curve25519_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1819 static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1820 struct kpp_request *req)
1821 {
1822 struct hpre_asym_request *h_req;
1823 struct hpre_sqe *msg;
1824 int req_id;
1825 void *tmp;
1826
1827 if (unlikely(req->dst_len < ctx->key_sz)) {
1828 req->dst_len = ctx->key_sz;
1829 return -EINVAL;
1830 }
1831
1832 tmp = kpp_request_ctx(req);
1833 h_req = PTR_ALIGN(tmp, hpre_align_sz());
1834 h_req->cb = hpre_curve25519_cb;
1835 h_req->areq.curve25519 = req;
1836 msg = &h_req->req;
1837 memset(msg, 0, sizeof(*msg));
1838 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1839 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1840 msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1841
1842 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1843 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1844 h_req->ctx = ctx;
1845
1846 req_id = hpre_add_req_to_ctx(h_req);
1847 if (req_id < 0)
1848 return -EBUSY;
1849
1850 msg->tag = cpu_to_le16((u16)req_id);
1851 return 0;
1852 }
1853
hpre_curve25519_src_modulo_p(u8 * ptr)1854 static void hpre_curve25519_src_modulo_p(u8 *ptr)
1855 {
1856 int i;
1857
1858 for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1859 ptr[i] = 0;
1860
1861 /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1862 ptr[i] -= 0xed;
1863 }
1864
hpre_curve25519_src_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1865 static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1866 struct scatterlist *data, unsigned int len)
1867 {
1868 struct hpre_sqe *msg = &hpre_req->req;
1869 struct hpre_ctx *ctx = hpre_req->ctx;
1870 struct device *dev = ctx->dev;
1871 u8 p[CURVE25519_KEY_SIZE] = { 0 };
1872 const struct ecc_curve *curve;
1873 dma_addr_t dma = 0;
1874 u8 *ptr;
1875
1876 if (len != CURVE25519_KEY_SIZE) {
1877 dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1878 return -EINVAL;
1879 }
1880
1881 ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1882 if (unlikely(!ptr))
1883 return -ENOMEM;
1884
1885 scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1886
1887 if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1888 dev_err(dev, "gx is null!\n");
1889 goto err;
1890 }
1891
1892 /*
1893 * Src_data(gx) is in little-endian order, MSB in the final byte should
1894 * be masked as described in RFC7748, then transform it to big-endian
1895 * form, then hisi_hpre can use the data.
1896 */
1897 ptr[31] &= 0x7f;
1898 hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1899
1900 curve = ecc_get_curve25519();
1901
1902 fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1903
1904 /*
1905 * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
1906 * we get its modulus to p, and then use it.
1907 */
1908 if (memcmp(ptr, p, ctx->key_sz) == 0) {
1909 dev_err(dev, "gx is p!\n");
1910 goto err;
1911 } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
1912 hpre_curve25519_src_modulo_p(ptr);
1913 }
1914
1915 hpre_req->src = ptr;
1916 msg->in = cpu_to_le64(dma);
1917 return 0;
1918
1919 err:
1920 dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1921 return -EINVAL;
1922 }
1923
hpre_curve25519_dst_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1924 static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1925 struct scatterlist *data, unsigned int len)
1926 {
1927 struct hpre_sqe *msg = &hpre_req->req;
1928 struct hpre_ctx *ctx = hpre_req->ctx;
1929 struct device *dev = ctx->dev;
1930 dma_addr_t dma;
1931
1932 if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1933 dev_err(dev, "data or data length is illegal!\n");
1934 return -EINVAL;
1935 }
1936
1937 hpre_req->dst = NULL;
1938 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1939 if (unlikely(dma_mapping_error(dev, dma))) {
1940 dev_err(dev, "dma map data err!\n");
1941 return -ENOMEM;
1942 }
1943
1944 msg->out = cpu_to_le64(dma);
1945 return 0;
1946 }
1947
hpre_curve25519_compute_value(struct kpp_request * req)1948 static int hpre_curve25519_compute_value(struct kpp_request *req)
1949 {
1950 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1951 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1952 struct device *dev = ctx->dev;
1953 void *tmp = kpp_request_ctx(req);
1954 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1955 struct hpre_sqe *msg = &hpre_req->req;
1956 int ret;
1957
1958 ret = hpre_curve25519_msg_request_set(ctx, req);
1959 if (unlikely(ret)) {
1960 dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1961 return ret;
1962 }
1963
1964 if (req->src) {
1965 ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1966 if (unlikely(ret)) {
1967 dev_err(dev, "failed to init src data, ret = %d!\n",
1968 ret);
1969 goto clear_all;
1970 }
1971 } else {
1972 msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1973 }
1974
1975 ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1976 if (unlikely(ret)) {
1977 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1978 goto clear_all;
1979 }
1980
1981 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1982 ret = hpre_send(ctx, msg);
1983 if (likely(!ret))
1984 return -EINPROGRESS;
1985
1986 clear_all:
1987 hpre_rm_req_from_ctx(hpre_req);
1988 hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1989 return ret;
1990 }
1991
hpre_curve25519_max_size(struct crypto_kpp * tfm)1992 static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1993 {
1994 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1995
1996 return ctx->key_sz;
1997 }
1998
hpre_curve25519_init_tfm(struct crypto_kpp * tfm)1999 static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
2000 {
2001 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
2002
2003 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
2004
2005 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
2006 }
2007
hpre_curve25519_exit_tfm(struct crypto_kpp * tfm)2008 static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
2009 {
2010 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
2011
2012 hpre_ecc_clear_ctx(ctx, true, false);
2013 }
2014
2015 static struct akcipher_alg rsa = {
2016 .encrypt = hpre_rsa_enc,
2017 .decrypt = hpre_rsa_dec,
2018 .set_pub_key = hpre_rsa_setpubkey,
2019 .set_priv_key = hpre_rsa_setprivkey,
2020 .max_size = hpre_rsa_max_size,
2021 .init = hpre_rsa_init_tfm,
2022 .exit = hpre_rsa_exit_tfm,
2023 .base = {
2024 .cra_ctxsize = sizeof(struct hpre_ctx),
2025 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2026 .cra_name = "rsa",
2027 .cra_driver_name = "hpre-rsa",
2028 .cra_module = THIS_MODULE,
2029 },
2030 };
2031
2032 static struct kpp_alg dh = {
2033 .set_secret = hpre_dh_set_secret,
2034 .generate_public_key = hpre_dh_compute_value,
2035 .compute_shared_secret = hpre_dh_compute_value,
2036 .max_size = hpre_dh_max_size,
2037 .init = hpre_dh_init_tfm,
2038 .exit = hpre_dh_exit_tfm,
2039 .base = {
2040 .cra_ctxsize = sizeof(struct hpre_ctx),
2041 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2042 .cra_name = "dh",
2043 .cra_driver_name = "hpre-dh",
2044 .cra_module = THIS_MODULE,
2045 },
2046 };
2047
2048 static struct kpp_alg ecdh_curves[] = {
2049 {
2050 .set_secret = hpre_ecdh_set_secret,
2051 .generate_public_key = hpre_ecdh_compute_value,
2052 .compute_shared_secret = hpre_ecdh_compute_value,
2053 .max_size = hpre_ecdh_max_size,
2054 .init = hpre_ecdh_nist_p192_init_tfm,
2055 .exit = hpre_ecdh_exit_tfm,
2056 .base = {
2057 .cra_ctxsize = sizeof(struct hpre_ctx),
2058 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2059 .cra_name = "ecdh-nist-p192",
2060 .cra_driver_name = "hpre-ecdh-nist-p192",
2061 .cra_module = THIS_MODULE,
2062 },
2063 }, {
2064 .set_secret = hpre_ecdh_set_secret,
2065 .generate_public_key = hpre_ecdh_compute_value,
2066 .compute_shared_secret = hpre_ecdh_compute_value,
2067 .max_size = hpre_ecdh_max_size,
2068 .init = hpre_ecdh_nist_p256_init_tfm,
2069 .exit = hpre_ecdh_exit_tfm,
2070 .base = {
2071 .cra_ctxsize = sizeof(struct hpre_ctx),
2072 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2073 .cra_name = "ecdh-nist-p256",
2074 .cra_driver_name = "hpre-ecdh-nist-p256",
2075 .cra_module = THIS_MODULE,
2076 },
2077 }, {
2078 .set_secret = hpre_ecdh_set_secret,
2079 .generate_public_key = hpre_ecdh_compute_value,
2080 .compute_shared_secret = hpre_ecdh_compute_value,
2081 .max_size = hpre_ecdh_max_size,
2082 .init = hpre_ecdh_nist_p384_init_tfm,
2083 .exit = hpre_ecdh_exit_tfm,
2084 .base = {
2085 .cra_ctxsize = sizeof(struct hpre_ctx),
2086 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2087 .cra_name = "ecdh-nist-p384",
2088 .cra_driver_name = "hpre-ecdh-nist-p384",
2089 .cra_module = THIS_MODULE,
2090 },
2091 }
2092 };
2093
2094 static struct kpp_alg curve25519_alg = {
2095 .set_secret = hpre_curve25519_set_secret,
2096 .generate_public_key = hpre_curve25519_compute_value,
2097 .compute_shared_secret = hpre_curve25519_compute_value,
2098 .max_size = hpre_curve25519_max_size,
2099 .init = hpre_curve25519_init_tfm,
2100 .exit = hpre_curve25519_exit_tfm,
2101 .base = {
2102 .cra_ctxsize = sizeof(struct hpre_ctx),
2103 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2104 .cra_name = "curve25519",
2105 .cra_driver_name = "hpre-curve25519",
2106 .cra_module = THIS_MODULE,
2107 },
2108 };
2109
hpre_register_rsa(struct hisi_qm * qm)2110 static int hpre_register_rsa(struct hisi_qm *qm)
2111 {
2112 int ret;
2113
2114 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2115 return 0;
2116
2117 rsa.base.cra_flags = 0;
2118 ret = crypto_register_akcipher(&rsa);
2119 if (ret)
2120 dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
2121
2122 return ret;
2123 }
2124
hpre_unregister_rsa(struct hisi_qm * qm)2125 static void hpre_unregister_rsa(struct hisi_qm *qm)
2126 {
2127 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2128 return;
2129
2130 crypto_unregister_akcipher(&rsa);
2131 }
2132
hpre_register_dh(struct hisi_qm * qm)2133 static int hpre_register_dh(struct hisi_qm *qm)
2134 {
2135 int ret;
2136
2137 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2138 return 0;
2139
2140 ret = crypto_register_kpp(&dh);
2141 if (ret)
2142 dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
2143
2144 return ret;
2145 }
2146
hpre_unregister_dh(struct hisi_qm * qm)2147 static void hpre_unregister_dh(struct hisi_qm *qm)
2148 {
2149 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2150 return;
2151
2152 crypto_unregister_kpp(&dh);
2153 }
2154
hpre_register_ecdh(struct hisi_qm * qm)2155 static int hpre_register_ecdh(struct hisi_qm *qm)
2156 {
2157 int ret, i;
2158
2159 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2160 return 0;
2161
2162 for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
2163 ret = crypto_register_kpp(&ecdh_curves[i]);
2164 if (ret) {
2165 dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
2166 ecdh_curves[i].base.cra_name, ret);
2167 goto unreg_kpp;
2168 }
2169 }
2170
2171 return 0;
2172
2173 unreg_kpp:
2174 for (--i; i >= 0; --i)
2175 crypto_unregister_kpp(&ecdh_curves[i]);
2176
2177 return ret;
2178 }
2179
hpre_unregister_ecdh(struct hisi_qm * qm)2180 static void hpre_unregister_ecdh(struct hisi_qm *qm)
2181 {
2182 int i;
2183
2184 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2185 return;
2186
2187 for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
2188 crypto_unregister_kpp(&ecdh_curves[i]);
2189 }
2190
hpre_register_x25519(struct hisi_qm * qm)2191 static int hpre_register_x25519(struct hisi_qm *qm)
2192 {
2193 int ret;
2194
2195 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2196 return 0;
2197
2198 ret = crypto_register_kpp(&curve25519_alg);
2199 if (ret)
2200 dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
2201
2202 return ret;
2203 }
2204
hpre_unregister_x25519(struct hisi_qm * qm)2205 static void hpre_unregister_x25519(struct hisi_qm *qm)
2206 {
2207 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2208 return;
2209
2210 crypto_unregister_kpp(&curve25519_alg);
2211 }
2212
hpre_algs_register(struct hisi_qm * qm)2213 int hpre_algs_register(struct hisi_qm *qm)
2214 {
2215 int ret = 0;
2216
2217 mutex_lock(&hpre_algs_lock);
2218 if (hpre_available_devs) {
2219 hpre_available_devs++;
2220 goto unlock;
2221 }
2222
2223 ret = hpre_register_rsa(qm);
2224 if (ret)
2225 goto unlock;
2226
2227 ret = hpre_register_dh(qm);
2228 if (ret)
2229 goto unreg_rsa;
2230
2231 ret = hpre_register_ecdh(qm);
2232 if (ret)
2233 goto unreg_dh;
2234
2235 ret = hpre_register_x25519(qm);
2236 if (ret)
2237 goto unreg_ecdh;
2238
2239 hpre_available_devs++;
2240 mutex_unlock(&hpre_algs_lock);
2241
2242 return ret;
2243
2244 unreg_ecdh:
2245 hpre_unregister_ecdh(qm);
2246 unreg_dh:
2247 hpre_unregister_dh(qm);
2248 unreg_rsa:
2249 hpre_unregister_rsa(qm);
2250 unlock:
2251 mutex_unlock(&hpre_algs_lock);
2252 return ret;
2253 }
2254
hpre_algs_unregister(struct hisi_qm * qm)2255 void hpre_algs_unregister(struct hisi_qm *qm)
2256 {
2257 mutex_lock(&hpre_algs_lock);
2258 if (--hpre_available_devs)
2259 goto unlock;
2260
2261 hpre_unregister_x25519(qm);
2262 hpre_unregister_ecdh(qm);
2263 hpre_unregister_dh(qm);
2264 hpre_unregister_rsa(qm);
2265
2266 unlock:
2267 mutex_unlock(&hpre_algs_lock);
2268 }
2269