1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/curve25519.h>
5 #include <crypto/dh.h>
6 #include <crypto/ecc_curve.h>
7 #include <crypto/ecdh.h>
8 #include <crypto/rng.h>
9 #include <crypto/internal/akcipher.h>
10 #include <crypto/internal/kpp.h>
11 #include <crypto/internal/rsa.h>
12 #include <crypto/kpp.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/fips.h>
16 #include <linux/module.h>
17 #include <linux/time.h>
18 #include "hpre.h"
19
20 struct hpre_ctx;
21
22 #define HPRE_CRYPTO_ALG_PRI 1000
23 #define HPRE_ALIGN_SZ 64
24 #define HPRE_BITS_2_BYTES_SHIFT 3
25 #define HPRE_RSA_512BITS_KSZ 64
26 #define HPRE_RSA_1536BITS_KSZ 192
27 #define HPRE_CRT_PRMS 5
28 #define HPRE_CRT_Q 2
29 #define HPRE_CRT_P 3
30 #define HPRE_CRT_INV 4
31 #define HPRE_DH_G_FLAG 0x02
32 #define HPRE_TRY_SEND_TIMES 100
33 #define HPRE_INVLD_REQ_ID (-1)
34
35 #define HPRE_SQE_ALG_BITS 5
36 #define HPRE_SQE_DONE_SHIFT 30
37 #define HPRE_DH_MAX_P_SZ 512
38
39 #define HPRE_DFX_SEC_TO_US 1000000
40 #define HPRE_DFX_US_TO_NS 1000
41
42 #define HPRE_ENABLE_HPCORE_SHIFT 7
43
44 /* due to nist p521 */
45 #define HPRE_ECC_MAX_KSZ 66
46
47 /* size in bytes of the n prime */
48 #define HPRE_ECC_NIST_P192_N_SIZE 24
49 #define HPRE_ECC_NIST_P256_N_SIZE 32
50 #define HPRE_ECC_NIST_P384_N_SIZE 48
51
52 /* size in bytes */
53 #define HPRE_ECC_HW256_KSZ_B 32
54 #define HPRE_ECC_HW384_KSZ_B 48
55
56 /* capability register mask of driver */
57 #define HPRE_DRV_RSA_MASK_CAP BIT(0)
58 #define HPRE_DRV_DH_MASK_CAP BIT(1)
59 #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
60 #define HPRE_DRV_X25519_MASK_CAP BIT(5)
61
62 static DEFINE_MUTEX(hpre_algs_lock);
63 static unsigned int hpre_available_devs;
64
65 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
66
67 struct hpre_rsa_ctx {
68 /* low address: e--->n */
69 char *pubkey;
70 dma_addr_t dma_pubkey;
71
72 /* low address: d--->n */
73 char *prikey;
74 dma_addr_t dma_prikey;
75
76 /* low address: dq->dp->q->p->qinv */
77 char *crt_prikey;
78 dma_addr_t dma_crt_prikey;
79
80 struct crypto_akcipher *soft_tfm;
81 };
82
83 struct hpre_dh_ctx {
84 /*
85 * If base is g we compute the public key
86 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
87 * else if base if the counterpart public key we
88 * compute the shared secret
89 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
90 * low address: d--->n, please refer to Hisilicon HPRE UM
91 */
92 char *xa_p;
93 dma_addr_t dma_xa_p;
94
95 char *g; /* m */
96 dma_addr_t dma_g;
97 };
98
99 struct hpre_ecdh_ctx {
100 /* low address: p->a->k->b */
101 unsigned char *p;
102 dma_addr_t dma_p;
103
104 /* low address: x->y */
105 unsigned char *g;
106 dma_addr_t dma_g;
107 };
108
109 struct hpre_curve25519_ctx {
110 /* low address: p->a->k */
111 unsigned char *p;
112 dma_addr_t dma_p;
113
114 /* gx coordinate */
115 unsigned char *g;
116 dma_addr_t dma_g;
117 };
118
119 struct hpre_ctx {
120 struct hisi_qp *qp;
121 struct device *dev;
122 struct hpre_asym_request **req_list;
123 struct hpre *hpre;
124 spinlock_t req_lock;
125 unsigned int key_sz;
126 bool crt_g2_mode;
127 struct idr req_idr;
128 union {
129 struct hpre_rsa_ctx rsa;
130 struct hpre_dh_ctx dh;
131 struct hpre_ecdh_ctx ecdh;
132 struct hpre_curve25519_ctx curve25519;
133 };
134 /* for ecc algorithms */
135 unsigned int curve_id;
136 /* for high performance core */
137 u8 enable_hpcore;
138 };
139
140 struct hpre_asym_request {
141 char *src;
142 char *dst;
143 struct hpre_sqe req;
144 struct hpre_ctx *ctx;
145 union {
146 struct akcipher_request *rsa;
147 struct kpp_request *dh;
148 struct kpp_request *ecdh;
149 struct kpp_request *curve25519;
150 } areq;
151 int err;
152 int req_id;
153 hpre_cb cb;
154 struct timespec64 req_time;
155 };
156
hpre_align_sz(void)157 static inline unsigned int hpre_align_sz(void)
158 {
159 return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
160 }
161
hpre_align_pd(void)162 static inline unsigned int hpre_align_pd(void)
163 {
164 return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
165 }
166
hpre_alloc_req_id(struct hpre_ctx * ctx)167 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
168 {
169 unsigned long flags;
170 int id;
171
172 spin_lock_irqsave(&ctx->req_lock, flags);
173 id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
174 spin_unlock_irqrestore(&ctx->req_lock, flags);
175
176 return id;
177 }
178
hpre_free_req_id(struct hpre_ctx * ctx,int req_id)179 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
180 {
181 unsigned long flags;
182
183 spin_lock_irqsave(&ctx->req_lock, flags);
184 idr_remove(&ctx->req_idr, req_id);
185 spin_unlock_irqrestore(&ctx->req_lock, flags);
186 }
187
hpre_add_req_to_ctx(struct hpre_asym_request * hpre_req)188 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
189 {
190 struct hpre_ctx *ctx;
191 struct hpre_dfx *dfx;
192 int id;
193
194 ctx = hpre_req->ctx;
195 id = hpre_alloc_req_id(ctx);
196 if (unlikely(id < 0))
197 return -EINVAL;
198
199 ctx->req_list[id] = hpre_req;
200 hpre_req->req_id = id;
201
202 dfx = ctx->hpre->debug.dfx;
203 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
204 ktime_get_ts64(&hpre_req->req_time);
205
206 return id;
207 }
208
hpre_rm_req_from_ctx(struct hpre_asym_request * hpre_req)209 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
210 {
211 struct hpre_ctx *ctx = hpre_req->ctx;
212 int id = hpre_req->req_id;
213
214 if (hpre_req->req_id >= 0) {
215 hpre_req->req_id = HPRE_INVLD_REQ_ID;
216 ctx->req_list[id] = NULL;
217 hpre_free_req_id(ctx, id);
218 }
219 }
220
hpre_get_qp_and_start(u8 type)221 static struct hisi_qp *hpre_get_qp_and_start(u8 type)
222 {
223 struct hisi_qp *qp;
224 int ret;
225
226 qp = hpre_create_qp(type);
227 if (!qp) {
228 pr_err("Can not create hpre qp!\n");
229 return ERR_PTR(-ENODEV);
230 }
231
232 ret = hisi_qm_start_qp(qp, 0);
233 if (ret < 0) {
234 hisi_qm_free_qps(&qp, 1);
235 pci_err(qp->qm->pdev, "Can not start qp!\n");
236 return ERR_PTR(-EINVAL);
237 }
238
239 return qp;
240 }
241
hpre_get_data_dma_addr(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)242 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
243 struct scatterlist *data, unsigned int len,
244 int is_src, dma_addr_t *tmp)
245 {
246 struct device *dev = hpre_req->ctx->dev;
247 enum dma_data_direction dma_dir;
248
249 if (is_src) {
250 hpre_req->src = NULL;
251 dma_dir = DMA_TO_DEVICE;
252 } else {
253 hpre_req->dst = NULL;
254 dma_dir = DMA_FROM_DEVICE;
255 }
256 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
257 if (unlikely(dma_mapping_error(dev, *tmp))) {
258 dev_err(dev, "dma map data err!\n");
259 return -ENOMEM;
260 }
261
262 return 0;
263 }
264
hpre_prepare_dma_buf(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)265 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
266 struct scatterlist *data, unsigned int len,
267 int is_src, dma_addr_t *tmp)
268 {
269 struct hpre_ctx *ctx = hpre_req->ctx;
270 struct device *dev = ctx->dev;
271 void *ptr;
272 int shift;
273
274 shift = ctx->key_sz - len;
275 if (unlikely(shift < 0))
276 return -EINVAL;
277
278 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
279 if (unlikely(!ptr))
280 return -ENOMEM;
281
282 if (is_src) {
283 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
284 hpre_req->src = ptr;
285 } else {
286 hpre_req->dst = ptr;
287 }
288
289 return 0;
290 }
291
hpre_hw_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,int is_dh)292 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
293 struct scatterlist *data, unsigned int len,
294 int is_src, int is_dh)
295 {
296 struct hpre_sqe *msg = &hpre_req->req;
297 struct hpre_ctx *ctx = hpre_req->ctx;
298 dma_addr_t tmp = 0;
299 int ret;
300
301 /* when the data is dh's source, we should format it */
302 if ((sg_is_last(data) && len == ctx->key_sz) &&
303 ((is_dh && !is_src) || !is_dh))
304 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
305 else
306 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
307
308 if (unlikely(ret))
309 return ret;
310
311 if (is_src)
312 msg->in = cpu_to_le64(tmp);
313 else
314 msg->out = cpu_to_le64(tmp);
315
316 return 0;
317 }
318
hpre_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)319 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
320 struct hpre_asym_request *req,
321 struct scatterlist *dst,
322 struct scatterlist *src)
323 {
324 struct device *dev = ctx->dev;
325 struct hpre_sqe *sqe = &req->req;
326 dma_addr_t tmp;
327
328 tmp = le64_to_cpu(sqe->in);
329 if (unlikely(dma_mapping_error(dev, tmp)))
330 return;
331
332 if (src) {
333 if (req->src)
334 dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
335 else
336 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
337 }
338
339 tmp = le64_to_cpu(sqe->out);
340 if (unlikely(dma_mapping_error(dev, tmp)))
341 return;
342
343 if (req->dst) {
344 if (dst)
345 scatterwalk_map_and_copy(req->dst, dst, 0,
346 ctx->key_sz, 1);
347 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
348 } else {
349 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
350 }
351 }
352
hpre_alg_res_post_hf(struct hpre_ctx * ctx,struct hpre_sqe * sqe,void ** kreq)353 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
354 void **kreq)
355 {
356 struct hpre_asym_request *req;
357 unsigned int err, done, alg;
358 int id;
359
360 #define HPRE_NO_HW_ERR 0
361 #define HPRE_HW_TASK_DONE 3
362 #define HREE_HW_ERR_MASK GENMASK(10, 0)
363 #define HREE_SQE_DONE_MASK GENMASK(1, 0)
364 #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
365 id = (int)le16_to_cpu(sqe->tag);
366 req = ctx->req_list[id];
367 hpre_rm_req_from_ctx(req);
368 *kreq = req;
369
370 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
371 HREE_HW_ERR_MASK;
372
373 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
374 HREE_SQE_DONE_MASK;
375
376 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
377 return 0;
378
379 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
380 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
381 alg, done, err);
382
383 return -EINVAL;
384 }
385
hpre_ctx_set(struct hpre_ctx * ctx,struct hisi_qp * qp,int qlen)386 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
387 {
388 struct hpre *hpre;
389
390 if (!ctx || !qp || qlen < 0)
391 return -EINVAL;
392
393 spin_lock_init(&ctx->req_lock);
394 ctx->qp = qp;
395 ctx->dev = &qp->qm->pdev->dev;
396
397 hpre = container_of(ctx->qp->qm, struct hpre, qm);
398 ctx->hpre = hpre;
399 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
400 if (!ctx->req_list)
401 return -ENOMEM;
402 ctx->key_sz = 0;
403 ctx->crt_g2_mode = false;
404 idr_init(&ctx->req_idr);
405
406 return 0;
407 }
408
hpre_ctx_clear(struct hpre_ctx * ctx,bool is_clear_all)409 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
410 {
411 if (is_clear_all) {
412 idr_destroy(&ctx->req_idr);
413 kfree(ctx->req_list);
414 hisi_qm_free_qps(&ctx->qp, 1);
415 }
416
417 ctx->crt_g2_mode = false;
418 ctx->key_sz = 0;
419 }
420
hpre_is_bd_timeout(struct hpre_asym_request * req,u64 overtime_thrhld)421 static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
422 u64 overtime_thrhld)
423 {
424 struct timespec64 reply_time;
425 u64 time_use_us;
426
427 ktime_get_ts64(&reply_time);
428 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
429 HPRE_DFX_SEC_TO_US +
430 (reply_time.tv_nsec - req->req_time.tv_nsec) /
431 HPRE_DFX_US_TO_NS;
432
433 if (time_use_us <= overtime_thrhld)
434 return false;
435
436 return true;
437 }
438
hpre_dh_cb(struct hpre_ctx * ctx,void * resp)439 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
440 {
441 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
442 struct hpre_asym_request *req;
443 struct kpp_request *areq;
444 u64 overtime_thrhld;
445 int ret;
446
447 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
448 areq = req->areq.dh;
449 areq->dst_len = ctx->key_sz;
450
451 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
452 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
453 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
454
455 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
456 kpp_request_complete(areq, ret);
457 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
458 }
459
hpre_rsa_cb(struct hpre_ctx * ctx,void * resp)460 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
461 {
462 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
463 struct hpre_asym_request *req;
464 struct akcipher_request *areq;
465 u64 overtime_thrhld;
466 int ret;
467
468 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
469
470 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
471 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
472 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
473
474 areq = req->areq.rsa;
475 areq->dst_len = ctx->key_sz;
476 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
477 akcipher_request_complete(areq, ret);
478 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
479 }
480
hpre_alg_cb(struct hisi_qp * qp,void * resp)481 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
482 {
483 struct hpre_ctx *ctx = qp->qp_ctx;
484 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
485 struct hpre_sqe *sqe = resp;
486 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
487
488 if (unlikely(!req)) {
489 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
490 return;
491 }
492
493 req->cb(ctx, resp);
494 }
495
hpre_stop_qp_and_put(struct hisi_qp * qp)496 static void hpre_stop_qp_and_put(struct hisi_qp *qp)
497 {
498 hisi_qm_stop_qp(qp);
499 hisi_qm_free_qps(&qp, 1);
500 }
501
hpre_ctx_init(struct hpre_ctx * ctx,u8 type)502 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
503 {
504 struct hisi_qp *qp;
505 int ret;
506
507 qp = hpre_get_qp_and_start(type);
508 if (IS_ERR(qp))
509 return PTR_ERR(qp);
510
511 qp->qp_ctx = ctx;
512 qp->req_cb = hpre_alg_cb;
513
514 ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
515 if (ret)
516 hpre_stop_qp_and_put(qp);
517
518 return ret;
519 }
520
hpre_msg_request_set(struct hpre_ctx * ctx,void * req,bool is_rsa)521 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
522 {
523 struct hpre_asym_request *h_req;
524 struct hpre_sqe *msg;
525 int req_id;
526 void *tmp;
527
528 if (is_rsa) {
529 struct akcipher_request *akreq = req;
530
531 if (akreq->dst_len < ctx->key_sz) {
532 akreq->dst_len = ctx->key_sz;
533 return -EOVERFLOW;
534 }
535
536 tmp = akcipher_request_ctx(akreq);
537 h_req = PTR_ALIGN(tmp, hpre_align_sz());
538 h_req->cb = hpre_rsa_cb;
539 h_req->areq.rsa = akreq;
540 msg = &h_req->req;
541 memset(msg, 0, sizeof(*msg));
542 } else {
543 struct kpp_request *kreq = req;
544
545 if (kreq->dst_len < ctx->key_sz) {
546 kreq->dst_len = ctx->key_sz;
547 return -EOVERFLOW;
548 }
549
550 tmp = kpp_request_ctx(kreq);
551 h_req = PTR_ALIGN(tmp, hpre_align_sz());
552 h_req->cb = hpre_dh_cb;
553 h_req->areq.dh = kreq;
554 msg = &h_req->req;
555 memset(msg, 0, sizeof(*msg));
556 msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
557 }
558
559 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
560 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
561 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
562 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
563 h_req->ctx = ctx;
564
565 req_id = hpre_add_req_to_ctx(h_req);
566 if (req_id < 0)
567 return -EBUSY;
568
569 msg->tag = cpu_to_le16((u16)req_id);
570
571 return 0;
572 }
573
hpre_send(struct hpre_ctx * ctx,struct hpre_sqe * msg)574 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
575 {
576 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
577 int ctr = 0;
578 int ret;
579
580 do {
581 atomic64_inc(&dfx[HPRE_SEND_CNT].value);
582 spin_lock_bh(&ctx->req_lock);
583 ret = hisi_qp_send(ctx->qp, msg);
584 spin_unlock_bh(&ctx->req_lock);
585 if (ret != -EBUSY)
586 break;
587 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
588 } while (ctr++ < HPRE_TRY_SEND_TIMES);
589
590 if (likely(!ret))
591 return ret;
592
593 if (ret != -EBUSY)
594 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
595
596 return ret;
597 }
598
hpre_dh_compute_value(struct kpp_request * req)599 static int hpre_dh_compute_value(struct kpp_request *req)
600 {
601 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
602 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
603 void *tmp = kpp_request_ctx(req);
604 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
605 struct hpre_sqe *msg = &hpre_req->req;
606 int ret;
607
608 ret = hpre_msg_request_set(ctx, req, false);
609 if (unlikely(ret))
610 return ret;
611
612 if (req->src) {
613 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
614 if (unlikely(ret))
615 goto clear_all;
616 } else {
617 msg->in = cpu_to_le64(ctx->dh.dma_g);
618 }
619
620 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
621 if (unlikely(ret))
622 goto clear_all;
623
624 if (ctx->crt_g2_mode && !req->src)
625 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
626 else
627 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
628
629 /* success */
630 ret = hpre_send(ctx, msg);
631 if (likely(!ret))
632 return -EINPROGRESS;
633
634 clear_all:
635 hpre_rm_req_from_ctx(hpre_req);
636 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
637
638 return ret;
639 }
640
hpre_is_dh_params_length_valid(unsigned int key_sz)641 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
642 {
643 #define _HPRE_DH_GRP1 768
644 #define _HPRE_DH_GRP2 1024
645 #define _HPRE_DH_GRP5 1536
646 #define _HPRE_DH_GRP14 2048
647 #define _HPRE_DH_GRP15 3072
648 #define _HPRE_DH_GRP16 4096
649 switch (key_sz) {
650 case _HPRE_DH_GRP1:
651 case _HPRE_DH_GRP2:
652 case _HPRE_DH_GRP5:
653 case _HPRE_DH_GRP14:
654 case _HPRE_DH_GRP15:
655 case _HPRE_DH_GRP16:
656 return 0;
657 default:
658 return -EINVAL;
659 }
660 }
661
hpre_dh_set_params(struct hpre_ctx * ctx,struct dh * params)662 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
663 {
664 struct device *dev = ctx->dev;
665 unsigned int sz;
666
667 if (params->p_size > HPRE_DH_MAX_P_SZ)
668 return -EINVAL;
669
670 if (hpre_is_dh_params_length_valid(params->p_size <<
671 HPRE_BITS_2_BYTES_SHIFT))
672 return -EINVAL;
673
674 sz = ctx->key_sz = params->p_size;
675 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
676 &ctx->dh.dma_xa_p, GFP_KERNEL);
677 if (!ctx->dh.xa_p)
678 return -ENOMEM;
679
680 memcpy(ctx->dh.xa_p + sz, params->p, sz);
681
682 /* If g equals 2 don't copy it */
683 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
684 ctx->crt_g2_mode = true;
685 return 0;
686 }
687
688 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
689 if (!ctx->dh.g) {
690 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
691 ctx->dh.dma_xa_p);
692 ctx->dh.xa_p = NULL;
693 return -ENOMEM;
694 }
695
696 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
697
698 return 0;
699 }
700
hpre_dh_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)701 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
702 {
703 struct device *dev = ctx->dev;
704 unsigned int sz = ctx->key_sz;
705
706 if (is_clear_all)
707 hisi_qm_stop_qp(ctx->qp);
708
709 if (ctx->dh.g) {
710 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
711 ctx->dh.g = NULL;
712 }
713
714 if (ctx->dh.xa_p) {
715 memzero_explicit(ctx->dh.xa_p, sz);
716 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
717 ctx->dh.dma_xa_p);
718 ctx->dh.xa_p = NULL;
719 }
720
721 hpre_ctx_clear(ctx, is_clear_all);
722 }
723
hpre_dh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)724 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
725 unsigned int len)
726 {
727 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
728 struct dh params;
729 int ret;
730
731 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
732 return -EINVAL;
733
734 /* Free old secret if any */
735 hpre_dh_clear_ctx(ctx, false);
736
737 ret = hpre_dh_set_params(ctx, ¶ms);
738 if (ret < 0)
739 goto err_clear_ctx;
740
741 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
742 params.key_size);
743
744 return 0;
745
746 err_clear_ctx:
747 hpre_dh_clear_ctx(ctx, false);
748 return ret;
749 }
750
hpre_dh_max_size(struct crypto_kpp * tfm)751 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
752 {
753 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
754
755 return ctx->key_sz;
756 }
757
hpre_dh_init_tfm(struct crypto_kpp * tfm)758 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
759 {
760 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
761
762 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
763
764 return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
765 }
766
hpre_dh_exit_tfm(struct crypto_kpp * tfm)767 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
768 {
769 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
770
771 hpre_dh_clear_ctx(ctx, true);
772 }
773
hpre_rsa_drop_leading_zeros(const char ** ptr,size_t * len)774 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
775 {
776 while (!**ptr && *len) {
777 (*ptr)++;
778 (*len)--;
779 }
780 }
781
hpre_rsa_key_size_is_support(unsigned int len)782 static bool hpre_rsa_key_size_is_support(unsigned int len)
783 {
784 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
785
786 #define _RSA_1024BITS_KEY_WDTH 1024
787 #define _RSA_2048BITS_KEY_WDTH 2048
788 #define _RSA_3072BITS_KEY_WDTH 3072
789 #define _RSA_4096BITS_KEY_WDTH 4096
790
791 switch (bits) {
792 case _RSA_1024BITS_KEY_WDTH:
793 case _RSA_2048BITS_KEY_WDTH:
794 case _RSA_3072BITS_KEY_WDTH:
795 case _RSA_4096BITS_KEY_WDTH:
796 return true;
797 default:
798 return false;
799 }
800 }
801
hpre_rsa_enc(struct akcipher_request * req)802 static int hpre_rsa_enc(struct akcipher_request *req)
803 {
804 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
805 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
806 void *tmp = akcipher_request_ctx(req);
807 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
808 struct hpre_sqe *msg = &hpre_req->req;
809 int ret;
810
811 /* For 512 and 1536 bits key size, use soft tfm instead */
812 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
813 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
814 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
815 ret = crypto_akcipher_encrypt(req);
816 akcipher_request_set_tfm(req, tfm);
817 return ret;
818 }
819
820 if (unlikely(!ctx->rsa.pubkey))
821 return -EINVAL;
822
823 ret = hpre_msg_request_set(ctx, req, true);
824 if (unlikely(ret))
825 return ret;
826
827 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
828 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
829
830 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
831 if (unlikely(ret))
832 goto clear_all;
833
834 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
835 if (unlikely(ret))
836 goto clear_all;
837
838 /* success */
839 ret = hpre_send(ctx, msg);
840 if (likely(!ret))
841 return -EINPROGRESS;
842
843 clear_all:
844 hpre_rm_req_from_ctx(hpre_req);
845 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
846
847 return ret;
848 }
849
hpre_rsa_dec(struct akcipher_request * req)850 static int hpre_rsa_dec(struct akcipher_request *req)
851 {
852 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
853 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
854 void *tmp = akcipher_request_ctx(req);
855 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
856 struct hpre_sqe *msg = &hpre_req->req;
857 int ret;
858
859 /* For 512 and 1536 bits key size, use soft tfm instead */
860 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
861 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
862 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
863 ret = crypto_akcipher_decrypt(req);
864 akcipher_request_set_tfm(req, tfm);
865 return ret;
866 }
867
868 if (unlikely(!ctx->rsa.prikey))
869 return -EINVAL;
870
871 ret = hpre_msg_request_set(ctx, req, true);
872 if (unlikely(ret))
873 return ret;
874
875 if (ctx->crt_g2_mode) {
876 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
877 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
878 HPRE_ALG_NC_CRT);
879 } else {
880 msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
881 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
882 HPRE_ALG_NC_NCRT);
883 }
884
885 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
886 if (unlikely(ret))
887 goto clear_all;
888
889 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
890 if (unlikely(ret))
891 goto clear_all;
892
893 /* success */
894 ret = hpre_send(ctx, msg);
895 if (likely(!ret))
896 return -EINPROGRESS;
897
898 clear_all:
899 hpre_rm_req_from_ctx(hpre_req);
900 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
901
902 return ret;
903 }
904
hpre_rsa_set_n(struct hpre_ctx * ctx,const char * value,size_t vlen,bool private)905 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
906 size_t vlen, bool private)
907 {
908 const char *ptr = value;
909
910 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
911
912 ctx->key_sz = vlen;
913
914 /* if invalid key size provided, we use software tfm */
915 if (!hpre_rsa_key_size_is_support(ctx->key_sz))
916 return 0;
917
918 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
919 &ctx->rsa.dma_pubkey,
920 GFP_KERNEL);
921 if (!ctx->rsa.pubkey)
922 return -ENOMEM;
923
924 if (private) {
925 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
926 &ctx->rsa.dma_prikey,
927 GFP_KERNEL);
928 if (!ctx->rsa.prikey) {
929 dma_free_coherent(ctx->dev, vlen << 1,
930 ctx->rsa.pubkey,
931 ctx->rsa.dma_pubkey);
932 ctx->rsa.pubkey = NULL;
933 return -ENOMEM;
934 }
935 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
936 }
937 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
938
939 /* Using hardware HPRE to do RSA */
940 return 1;
941 }
942
hpre_rsa_set_e(struct hpre_ctx * ctx,const char * value,size_t vlen)943 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
944 size_t vlen)
945 {
946 const char *ptr = value;
947
948 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
949
950 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
951 return -EINVAL;
952
953 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
954
955 return 0;
956 }
957
hpre_rsa_set_d(struct hpre_ctx * ctx,const char * value,size_t vlen)958 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
959 size_t vlen)
960 {
961 const char *ptr = value;
962
963 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
964
965 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
966 return -EINVAL;
967
968 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
969
970 return 0;
971 }
972
hpre_crt_para_get(char * para,size_t para_sz,const char * raw,size_t raw_sz)973 static int hpre_crt_para_get(char *para, size_t para_sz,
974 const char *raw, size_t raw_sz)
975 {
976 const char *ptr = raw;
977 size_t len = raw_sz;
978
979 hpre_rsa_drop_leading_zeros(&ptr, &len);
980 if (!len || len > para_sz)
981 return -EINVAL;
982
983 memcpy(para + para_sz - len, ptr, len);
984
985 return 0;
986 }
987
hpre_rsa_setkey_crt(struct hpre_ctx * ctx,struct rsa_key * rsa_key)988 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
989 {
990 unsigned int hlf_ksz = ctx->key_sz >> 1;
991 struct device *dev = ctx->dev;
992 u64 offset;
993 int ret;
994
995 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
996 &ctx->rsa.dma_crt_prikey,
997 GFP_KERNEL);
998 if (!ctx->rsa.crt_prikey)
999 return -ENOMEM;
1000
1001 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
1002 rsa_key->dq, rsa_key->dq_sz);
1003 if (ret)
1004 goto free_key;
1005
1006 offset = hlf_ksz;
1007 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1008 rsa_key->dp, rsa_key->dp_sz);
1009 if (ret)
1010 goto free_key;
1011
1012 offset = hlf_ksz * HPRE_CRT_Q;
1013 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1014 rsa_key->q, rsa_key->q_sz);
1015 if (ret)
1016 goto free_key;
1017
1018 offset = hlf_ksz * HPRE_CRT_P;
1019 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1020 rsa_key->p, rsa_key->p_sz);
1021 if (ret)
1022 goto free_key;
1023
1024 offset = hlf_ksz * HPRE_CRT_INV;
1025 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1026 rsa_key->qinv, rsa_key->qinv_sz);
1027 if (ret)
1028 goto free_key;
1029
1030 ctx->crt_g2_mode = true;
1031
1032 return 0;
1033
1034 free_key:
1035 offset = hlf_ksz * HPRE_CRT_PRMS;
1036 memzero_explicit(ctx->rsa.crt_prikey, offset);
1037 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1038 ctx->rsa.dma_crt_prikey);
1039 ctx->rsa.crt_prikey = NULL;
1040 ctx->crt_g2_mode = false;
1041
1042 return ret;
1043 }
1044
1045 /* If it is clear all, all the resources of the QP will be cleaned. */
hpre_rsa_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)1046 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1047 {
1048 unsigned int half_key_sz = ctx->key_sz >> 1;
1049 struct device *dev = ctx->dev;
1050
1051 if (is_clear_all)
1052 hisi_qm_stop_qp(ctx->qp);
1053
1054 if (ctx->rsa.pubkey) {
1055 dma_free_coherent(dev, ctx->key_sz << 1,
1056 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1057 ctx->rsa.pubkey = NULL;
1058 }
1059
1060 if (ctx->rsa.crt_prikey) {
1061 memzero_explicit(ctx->rsa.crt_prikey,
1062 half_key_sz * HPRE_CRT_PRMS);
1063 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1064 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1065 ctx->rsa.crt_prikey = NULL;
1066 }
1067
1068 if (ctx->rsa.prikey) {
1069 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1070 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1071 ctx->rsa.dma_prikey);
1072 ctx->rsa.prikey = NULL;
1073 }
1074
1075 hpre_ctx_clear(ctx, is_clear_all);
1076 }
1077
1078 /*
1079 * we should judge if it is CRT or not,
1080 * CRT: return true, N-CRT: return false .
1081 */
hpre_is_crt_key(struct rsa_key * key)1082 static bool hpre_is_crt_key(struct rsa_key *key)
1083 {
1084 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1085 key->qinv_sz;
1086
1087 #define LEN_OF_NCRT_PARA 5
1088
1089 /* N-CRT less than 5 parameters */
1090 return len > LEN_OF_NCRT_PARA;
1091 }
1092
hpre_rsa_setkey(struct hpre_ctx * ctx,const void * key,unsigned int keylen,bool private)1093 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1094 unsigned int keylen, bool private)
1095 {
1096 struct rsa_key rsa_key;
1097 int ret;
1098
1099 hpre_rsa_clear_ctx(ctx, false);
1100
1101 if (private)
1102 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1103 else
1104 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1105 if (ret < 0)
1106 return ret;
1107
1108 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1109 if (ret <= 0)
1110 return ret;
1111
1112 if (private) {
1113 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1114 if (ret < 0)
1115 goto free;
1116
1117 if (hpre_is_crt_key(&rsa_key)) {
1118 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1119 if (ret < 0)
1120 goto free;
1121 }
1122 }
1123
1124 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1125 if (ret < 0)
1126 goto free;
1127
1128 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1129 ret = -EINVAL;
1130 goto free;
1131 }
1132
1133 return 0;
1134
1135 free:
1136 hpre_rsa_clear_ctx(ctx, false);
1137 return ret;
1138 }
1139
hpre_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1140 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1141 unsigned int keylen)
1142 {
1143 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1144 int ret;
1145
1146 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1147 if (ret)
1148 return ret;
1149
1150 return hpre_rsa_setkey(ctx, key, keylen, false);
1151 }
1152
hpre_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1153 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1154 unsigned int keylen)
1155 {
1156 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1157 int ret;
1158
1159 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1160 if (ret)
1161 return ret;
1162
1163 return hpre_rsa_setkey(ctx, key, keylen, true);
1164 }
1165
hpre_rsa_max_size(struct crypto_akcipher * tfm)1166 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1167 {
1168 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1169
1170 /* For 512 and 1536 bits key size, use soft tfm instead */
1171 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1172 ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1173 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1174
1175 return ctx->key_sz;
1176 }
1177
hpre_rsa_init_tfm(struct crypto_akcipher * tfm)1178 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1179 {
1180 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1181 int ret;
1182
1183 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1184 if (IS_ERR(ctx->rsa.soft_tfm)) {
1185 pr_err("Can not alloc_akcipher!\n");
1186 return PTR_ERR(ctx->rsa.soft_tfm);
1187 }
1188
1189 akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
1190 hpre_align_pd());
1191
1192 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1193 if (ret)
1194 crypto_free_akcipher(ctx->rsa.soft_tfm);
1195
1196 return ret;
1197 }
1198
hpre_rsa_exit_tfm(struct crypto_akcipher * tfm)1199 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1200 {
1201 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1202
1203 hpre_rsa_clear_ctx(ctx, true);
1204 crypto_free_akcipher(ctx->rsa.soft_tfm);
1205 }
1206
hpre_key_to_big_end(u8 * data,int len)1207 static void hpre_key_to_big_end(u8 *data, int len)
1208 {
1209 int i, j;
1210
1211 for (i = 0; i < len / 2; i++) {
1212 j = len - i - 1;
1213 swap(data[j], data[i]);
1214 }
1215 }
1216
hpre_ecc_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all,bool is_ecdh)1217 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1218 bool is_ecdh)
1219 {
1220 struct device *dev = ctx->dev;
1221 unsigned int sz = ctx->key_sz;
1222 unsigned int shift = sz << 1;
1223
1224 if (is_clear_all)
1225 hisi_qm_stop_qp(ctx->qp);
1226
1227 if (is_ecdh && ctx->ecdh.p) {
1228 /* ecdh: p->a->k->b */
1229 memzero_explicit(ctx->ecdh.p + shift, sz);
1230 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1231 ctx->ecdh.p = NULL;
1232 } else if (!is_ecdh && ctx->curve25519.p) {
1233 /* curve25519: p->a->k */
1234 memzero_explicit(ctx->curve25519.p + shift, sz);
1235 dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1236 ctx->curve25519.dma_p);
1237 ctx->curve25519.p = NULL;
1238 }
1239
1240 hpre_ctx_clear(ctx, is_clear_all);
1241 }
1242
1243 /*
1244 * The bits of 192/224/256/384/521 are supported by HPRE,
1245 * and convert the bits like:
1246 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1247 * If the parameter bit width is insufficient, then we fill in the
1248 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1249 */
hpre_ecdh_supported_curve(unsigned short id)1250 static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1251 {
1252 switch (id) {
1253 case ECC_CURVE_NIST_P192:
1254 case ECC_CURVE_NIST_P256:
1255 return HPRE_ECC_HW256_KSZ_B;
1256 case ECC_CURVE_NIST_P384:
1257 return HPRE_ECC_HW384_KSZ_B;
1258 default:
1259 break;
1260 }
1261
1262 return 0;
1263 }
1264
fill_curve_param(void * addr,u64 * param,unsigned int cur_sz,u8 ndigits)1265 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1266 {
1267 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1268 u8 i = 0;
1269
1270 while (i < ndigits - 1) {
1271 memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
1272 i++;
1273 }
1274
1275 memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
1276 hpre_key_to_big_end((u8 *)addr, cur_sz);
1277 }
1278
hpre_ecdh_fill_curve(struct hpre_ctx * ctx,struct ecdh * params,unsigned int cur_sz)1279 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1280 unsigned int cur_sz)
1281 {
1282 unsigned int shifta = ctx->key_sz << 1;
1283 unsigned int shiftb = ctx->key_sz << 2;
1284 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1285 void *a = ctx->ecdh.p + shifta - cur_sz;
1286 void *b = ctx->ecdh.p + shiftb - cur_sz;
1287 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1288 void *y = ctx->ecdh.g + shifta - cur_sz;
1289 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1290 char *n;
1291
1292 if (unlikely(!curve))
1293 return -EINVAL;
1294
1295 n = kzalloc(ctx->key_sz, GFP_KERNEL);
1296 if (!n)
1297 return -ENOMEM;
1298
1299 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1300 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1301 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1302 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1303 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1304 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1305
1306 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1307 kfree(n);
1308 return -EINVAL;
1309 }
1310
1311 kfree(n);
1312 return 0;
1313 }
1314
hpre_ecdh_get_curvesz(unsigned short id)1315 static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1316 {
1317 switch (id) {
1318 case ECC_CURVE_NIST_P192:
1319 return HPRE_ECC_NIST_P192_N_SIZE;
1320 case ECC_CURVE_NIST_P256:
1321 return HPRE_ECC_NIST_P256_N_SIZE;
1322 case ECC_CURVE_NIST_P384:
1323 return HPRE_ECC_NIST_P384_N_SIZE;
1324 default:
1325 break;
1326 }
1327
1328 return 0;
1329 }
1330
hpre_ecdh_set_param(struct hpre_ctx * ctx,struct ecdh * params)1331 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1332 {
1333 struct device *dev = ctx->dev;
1334 unsigned int sz, shift, curve_sz;
1335 int ret;
1336
1337 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1338 if (!ctx->key_sz)
1339 return -EINVAL;
1340
1341 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1342 if (!curve_sz || params->key_size > curve_sz)
1343 return -EINVAL;
1344
1345 sz = ctx->key_sz;
1346
1347 if (!ctx->ecdh.p) {
1348 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1349 GFP_KERNEL);
1350 if (!ctx->ecdh.p)
1351 return -ENOMEM;
1352 }
1353
1354 shift = sz << 2;
1355 ctx->ecdh.g = ctx->ecdh.p + shift;
1356 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1357
1358 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1359 if (ret) {
1360 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1361 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1362 ctx->ecdh.p = NULL;
1363 return ret;
1364 }
1365
1366 return 0;
1367 }
1368
hpre_key_is_zero(char * key,unsigned short key_sz)1369 static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1370 {
1371 int i;
1372
1373 for (i = 0; i < key_sz; i++)
1374 if (key[i])
1375 return false;
1376
1377 return true;
1378 }
1379
ecdh_gen_privkey(struct hpre_ctx * ctx,struct ecdh * params)1380 static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1381 {
1382 struct device *dev = ctx->dev;
1383 int ret;
1384
1385 ret = crypto_get_default_rng();
1386 if (ret) {
1387 dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1388 return ret;
1389 }
1390
1391 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1392 params->key_size);
1393 crypto_put_default_rng();
1394 if (ret)
1395 dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1396
1397 return ret;
1398 }
1399
hpre_ecdh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1400 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1401 unsigned int len)
1402 {
1403 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1404 unsigned int sz, sz_shift, curve_sz;
1405 struct device *dev = ctx->dev;
1406 char key[HPRE_ECC_MAX_KSZ];
1407 struct ecdh params;
1408 int ret;
1409
1410 if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
1411 dev_err(dev, "failed to decode ecdh key!\n");
1412 return -EINVAL;
1413 }
1414
1415 /* Use stdrng to generate private key */
1416 if (!params.key || !params.key_size) {
1417 params.key = key;
1418 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1419 if (!curve_sz) {
1420 dev_err(dev, "Invalid curve size!\n");
1421 return -EINVAL;
1422 }
1423
1424 params.key_size = curve_sz - 1;
1425 ret = ecdh_gen_privkey(ctx, ¶ms);
1426 if (ret)
1427 return ret;
1428 }
1429
1430 if (hpre_key_is_zero(params.key, params.key_size)) {
1431 dev_err(dev, "Invalid hpre key!\n");
1432 return -EINVAL;
1433 }
1434
1435 hpre_ecc_clear_ctx(ctx, false, true);
1436
1437 ret = hpre_ecdh_set_param(ctx, ¶ms);
1438 if (ret < 0) {
1439 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1440 return ret;
1441 }
1442
1443 sz = ctx->key_sz;
1444 sz_shift = (sz << 1) + sz - params.key_size;
1445 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1446
1447 return 0;
1448 }
1449
hpre_ecdh_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1450 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1451 struct hpre_asym_request *req,
1452 struct scatterlist *dst,
1453 struct scatterlist *src)
1454 {
1455 struct device *dev = ctx->dev;
1456 struct hpre_sqe *sqe = &req->req;
1457 dma_addr_t dma;
1458
1459 dma = le64_to_cpu(sqe->in);
1460 if (unlikely(dma_mapping_error(dev, dma)))
1461 return;
1462
1463 if (src && req->src)
1464 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1465
1466 dma = le64_to_cpu(sqe->out);
1467 if (unlikely(dma_mapping_error(dev, dma)))
1468 return;
1469
1470 if (req->dst)
1471 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1472 if (dst)
1473 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1474 }
1475
hpre_ecdh_cb(struct hpre_ctx * ctx,void * resp)1476 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1477 {
1478 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1479 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1480 struct hpre_asym_request *req = NULL;
1481 struct kpp_request *areq;
1482 u64 overtime_thrhld;
1483 char *p;
1484 int ret;
1485
1486 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1487 areq = req->areq.ecdh;
1488 areq->dst_len = ctx->key_sz << 1;
1489
1490 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1491 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1492 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1493
1494 /* Do unmap before data processing */
1495 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1496
1497 p = sg_virt(areq->dst);
1498 memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1499 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1500
1501 kpp_request_complete(areq, ret);
1502
1503 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1504 }
1505
hpre_ecdh_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1506 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1507 struct kpp_request *req)
1508 {
1509 struct hpre_asym_request *h_req;
1510 struct hpre_sqe *msg;
1511 int req_id;
1512 void *tmp;
1513
1514 if (req->dst_len < ctx->key_sz << 1) {
1515 req->dst_len = ctx->key_sz << 1;
1516 return -EINVAL;
1517 }
1518
1519 tmp = kpp_request_ctx(req);
1520 h_req = PTR_ALIGN(tmp, hpre_align_sz());
1521 h_req->cb = hpre_ecdh_cb;
1522 h_req->areq.ecdh = req;
1523 msg = &h_req->req;
1524 memset(msg, 0, sizeof(*msg));
1525 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1526 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1527 msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1528
1529 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1530 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1531 h_req->ctx = ctx;
1532
1533 req_id = hpre_add_req_to_ctx(h_req);
1534 if (req_id < 0)
1535 return -EBUSY;
1536
1537 msg->tag = cpu_to_le16((u16)req_id);
1538 return 0;
1539 }
1540
hpre_ecdh_src_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1541 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1542 struct scatterlist *data, unsigned int len)
1543 {
1544 struct hpre_sqe *msg = &hpre_req->req;
1545 struct hpre_ctx *ctx = hpre_req->ctx;
1546 struct device *dev = ctx->dev;
1547 unsigned int tmpshift;
1548 dma_addr_t dma = 0;
1549 void *ptr;
1550 int shift;
1551
1552 /* Src_data include gx and gy. */
1553 shift = ctx->key_sz - (len >> 1);
1554 if (unlikely(shift < 0))
1555 return -EINVAL;
1556
1557 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1558 if (unlikely(!ptr))
1559 return -ENOMEM;
1560
1561 tmpshift = ctx->key_sz << 1;
1562 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1563 memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1564 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1565
1566 hpre_req->src = ptr;
1567 msg->in = cpu_to_le64(dma);
1568 return 0;
1569 }
1570
hpre_ecdh_dst_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1571 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1572 struct scatterlist *data, unsigned int len)
1573 {
1574 struct hpre_sqe *msg = &hpre_req->req;
1575 struct hpre_ctx *ctx = hpre_req->ctx;
1576 struct device *dev = ctx->dev;
1577 dma_addr_t dma;
1578
1579 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1580 dev_err(dev, "data or data length is illegal!\n");
1581 return -EINVAL;
1582 }
1583
1584 hpre_req->dst = NULL;
1585 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1586 if (unlikely(dma_mapping_error(dev, dma))) {
1587 dev_err(dev, "dma map data err!\n");
1588 return -ENOMEM;
1589 }
1590
1591 msg->out = cpu_to_le64(dma);
1592 return 0;
1593 }
1594
hpre_ecdh_compute_value(struct kpp_request * req)1595 static int hpre_ecdh_compute_value(struct kpp_request *req)
1596 {
1597 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1598 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1599 struct device *dev = ctx->dev;
1600 void *tmp = kpp_request_ctx(req);
1601 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1602 struct hpre_sqe *msg = &hpre_req->req;
1603 int ret;
1604
1605 ret = hpre_ecdh_msg_request_set(ctx, req);
1606 if (unlikely(ret)) {
1607 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1608 return ret;
1609 }
1610
1611 if (req->src) {
1612 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1613 if (unlikely(ret)) {
1614 dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1615 goto clear_all;
1616 }
1617 } else {
1618 msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1619 }
1620
1621 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1622 if (unlikely(ret)) {
1623 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1624 goto clear_all;
1625 }
1626
1627 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1628 msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT;
1629
1630 ret = hpre_send(ctx, msg);
1631 if (likely(!ret))
1632 return -EINPROGRESS;
1633
1634 clear_all:
1635 hpre_rm_req_from_ctx(hpre_req);
1636 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1637 return ret;
1638 }
1639
hpre_ecdh_max_size(struct crypto_kpp * tfm)1640 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1641 {
1642 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1643
1644 /* max size is the pub_key_size, include x and y */
1645 return ctx->key_sz << 1;
1646 }
1647
hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp * tfm)1648 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1649 {
1650 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1651
1652 ctx->curve_id = ECC_CURVE_NIST_P192;
1653
1654 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1655
1656 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1657 }
1658
hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp * tfm)1659 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1660 {
1661 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1662
1663 ctx->curve_id = ECC_CURVE_NIST_P256;
1664 ctx->enable_hpcore = 1;
1665
1666 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1667
1668 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1669 }
1670
hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp * tfm)1671 static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1672 {
1673 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1674
1675 ctx->curve_id = ECC_CURVE_NIST_P384;
1676
1677 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1678
1679 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1680 }
1681
hpre_ecdh_exit_tfm(struct crypto_kpp * tfm)1682 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1683 {
1684 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1685
1686 hpre_ecc_clear_ctx(ctx, true, true);
1687 }
1688
hpre_curve25519_fill_curve(struct hpre_ctx * ctx,const void * buf,unsigned int len)1689 static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1690 unsigned int len)
1691 {
1692 u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1693 unsigned int sz = ctx->key_sz;
1694 const struct ecc_curve *curve;
1695 unsigned int shift = sz << 1;
1696 void *p;
1697
1698 /*
1699 * The key from 'buf' is in little-endian, we should preprocess it as
1700 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1701 * then convert it to big endian. Only in this way, the result can be
1702 * the same as the software curve-25519 that exists in crypto.
1703 */
1704 memcpy(secret, buf, len);
1705 curve25519_clamp_secret(secret);
1706 hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1707
1708 p = ctx->curve25519.p + sz - len;
1709
1710 curve = ecc_get_curve25519();
1711
1712 /* fill curve parameters */
1713 fill_curve_param(p, curve->p, len, curve->g.ndigits);
1714 fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1715 memcpy(p + shift, secret, len);
1716 fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1717 memzero_explicit(secret, CURVE25519_KEY_SIZE);
1718 }
1719
hpre_curve25519_set_param(struct hpre_ctx * ctx,const void * buf,unsigned int len)1720 static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1721 unsigned int len)
1722 {
1723 struct device *dev = ctx->dev;
1724 unsigned int sz = ctx->key_sz;
1725 unsigned int shift = sz << 1;
1726
1727 /* p->a->k->gx */
1728 if (!ctx->curve25519.p) {
1729 ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1730 &ctx->curve25519.dma_p,
1731 GFP_KERNEL);
1732 if (!ctx->curve25519.p)
1733 return -ENOMEM;
1734 }
1735
1736 ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1737 ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1738
1739 hpre_curve25519_fill_curve(ctx, buf, len);
1740
1741 return 0;
1742 }
1743
hpre_curve25519_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1744 static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1745 unsigned int len)
1746 {
1747 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1748 struct device *dev = ctx->dev;
1749 int ret = -EINVAL;
1750
1751 if (len != CURVE25519_KEY_SIZE ||
1752 !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1753 dev_err(dev, "key is null or key len is not 32bytes!\n");
1754 return ret;
1755 }
1756
1757 /* Free old secret if any */
1758 hpre_ecc_clear_ctx(ctx, false, false);
1759
1760 ctx->key_sz = CURVE25519_KEY_SIZE;
1761 ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1762 if (ret) {
1763 dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1764 hpre_ecc_clear_ctx(ctx, false, false);
1765 return ret;
1766 }
1767
1768 return 0;
1769 }
1770
hpre_curve25519_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1771 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1772 struct hpre_asym_request *req,
1773 struct scatterlist *dst,
1774 struct scatterlist *src)
1775 {
1776 struct device *dev = ctx->dev;
1777 struct hpre_sqe *sqe = &req->req;
1778 dma_addr_t dma;
1779
1780 dma = le64_to_cpu(sqe->in);
1781 if (unlikely(dma_mapping_error(dev, dma)))
1782 return;
1783
1784 if (src && req->src)
1785 dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1786
1787 dma = le64_to_cpu(sqe->out);
1788 if (unlikely(dma_mapping_error(dev, dma)))
1789 return;
1790
1791 if (req->dst)
1792 dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1793 if (dst)
1794 dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1795 }
1796
hpre_curve25519_cb(struct hpre_ctx * ctx,void * resp)1797 static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1798 {
1799 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1800 struct hpre_asym_request *req = NULL;
1801 struct kpp_request *areq;
1802 u64 overtime_thrhld;
1803 int ret;
1804
1805 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1806 areq = req->areq.curve25519;
1807 areq->dst_len = ctx->key_sz;
1808
1809 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1810 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1811 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1812
1813 /* Do unmap before data processing */
1814 hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1815
1816 hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1817
1818 kpp_request_complete(areq, ret);
1819
1820 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1821 }
1822
hpre_curve25519_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1823 static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1824 struct kpp_request *req)
1825 {
1826 struct hpre_asym_request *h_req;
1827 struct hpre_sqe *msg;
1828 int req_id;
1829 void *tmp;
1830
1831 if (unlikely(req->dst_len < ctx->key_sz)) {
1832 req->dst_len = ctx->key_sz;
1833 return -EINVAL;
1834 }
1835
1836 tmp = kpp_request_ctx(req);
1837 h_req = PTR_ALIGN(tmp, hpre_align_sz());
1838 h_req->cb = hpre_curve25519_cb;
1839 h_req->areq.curve25519 = req;
1840 msg = &h_req->req;
1841 memset(msg, 0, sizeof(*msg));
1842 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1843 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1844 msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1845
1846 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1847 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1848 h_req->ctx = ctx;
1849
1850 req_id = hpre_add_req_to_ctx(h_req);
1851 if (req_id < 0)
1852 return -EBUSY;
1853
1854 msg->tag = cpu_to_le16((u16)req_id);
1855 return 0;
1856 }
1857
hpre_curve25519_src_modulo_p(u8 * ptr)1858 static void hpre_curve25519_src_modulo_p(u8 *ptr)
1859 {
1860 int i;
1861
1862 for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1863 ptr[i] = 0;
1864
1865 /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1866 ptr[i] -= 0xed;
1867 }
1868
hpre_curve25519_src_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1869 static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1870 struct scatterlist *data, unsigned int len)
1871 {
1872 struct hpre_sqe *msg = &hpre_req->req;
1873 struct hpre_ctx *ctx = hpre_req->ctx;
1874 struct device *dev = ctx->dev;
1875 u8 p[CURVE25519_KEY_SIZE] = { 0 };
1876 const struct ecc_curve *curve;
1877 dma_addr_t dma = 0;
1878 u8 *ptr;
1879
1880 if (len != CURVE25519_KEY_SIZE) {
1881 dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1882 return -EINVAL;
1883 }
1884
1885 ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1886 if (unlikely(!ptr))
1887 return -ENOMEM;
1888
1889 scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1890
1891 if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1892 dev_err(dev, "gx is null!\n");
1893 goto err;
1894 }
1895
1896 /*
1897 * Src_data(gx) is in little-endian order, MSB in the final byte should
1898 * be masked as described in RFC7748, then transform it to big-endian
1899 * form, then hisi_hpre can use the data.
1900 */
1901 ptr[31] &= 0x7f;
1902 hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1903
1904 curve = ecc_get_curve25519();
1905
1906 fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1907
1908 /*
1909 * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
1910 * we get its modulus to p, and then use it.
1911 */
1912 if (memcmp(ptr, p, ctx->key_sz) == 0) {
1913 dev_err(dev, "gx is p!\n");
1914 goto err;
1915 } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
1916 hpre_curve25519_src_modulo_p(ptr);
1917 }
1918
1919 hpre_req->src = ptr;
1920 msg->in = cpu_to_le64(dma);
1921 return 0;
1922
1923 err:
1924 dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1925 return -EINVAL;
1926 }
1927
hpre_curve25519_dst_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1928 static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1929 struct scatterlist *data, unsigned int len)
1930 {
1931 struct hpre_sqe *msg = &hpre_req->req;
1932 struct hpre_ctx *ctx = hpre_req->ctx;
1933 struct device *dev = ctx->dev;
1934 dma_addr_t dma;
1935
1936 if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1937 dev_err(dev, "data or data length is illegal!\n");
1938 return -EINVAL;
1939 }
1940
1941 hpre_req->dst = NULL;
1942 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1943 if (unlikely(dma_mapping_error(dev, dma))) {
1944 dev_err(dev, "dma map data err!\n");
1945 return -ENOMEM;
1946 }
1947
1948 msg->out = cpu_to_le64(dma);
1949 return 0;
1950 }
1951
hpre_curve25519_compute_value(struct kpp_request * req)1952 static int hpre_curve25519_compute_value(struct kpp_request *req)
1953 {
1954 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1955 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1956 struct device *dev = ctx->dev;
1957 void *tmp = kpp_request_ctx(req);
1958 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1959 struct hpre_sqe *msg = &hpre_req->req;
1960 int ret;
1961
1962 ret = hpre_curve25519_msg_request_set(ctx, req);
1963 if (unlikely(ret)) {
1964 dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1965 return ret;
1966 }
1967
1968 if (req->src) {
1969 ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1970 if (unlikely(ret)) {
1971 dev_err(dev, "failed to init src data, ret = %d!\n",
1972 ret);
1973 goto clear_all;
1974 }
1975 } else {
1976 msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1977 }
1978
1979 ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1980 if (unlikely(ret)) {
1981 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1982 goto clear_all;
1983 }
1984
1985 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1986 ret = hpre_send(ctx, msg);
1987 if (likely(!ret))
1988 return -EINPROGRESS;
1989
1990 clear_all:
1991 hpre_rm_req_from_ctx(hpre_req);
1992 hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1993 return ret;
1994 }
1995
hpre_curve25519_max_size(struct crypto_kpp * tfm)1996 static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1997 {
1998 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1999
2000 return ctx->key_sz;
2001 }
2002
hpre_curve25519_init_tfm(struct crypto_kpp * tfm)2003 static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
2004 {
2005 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
2006
2007 kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
2008
2009 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
2010 }
2011
hpre_curve25519_exit_tfm(struct crypto_kpp * tfm)2012 static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
2013 {
2014 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
2015
2016 hpre_ecc_clear_ctx(ctx, true, false);
2017 }
2018
2019 static struct akcipher_alg rsa = {
2020 .encrypt = hpre_rsa_enc,
2021 .decrypt = hpre_rsa_dec,
2022 .set_pub_key = hpre_rsa_setpubkey,
2023 .set_priv_key = hpre_rsa_setprivkey,
2024 .max_size = hpre_rsa_max_size,
2025 .init = hpre_rsa_init_tfm,
2026 .exit = hpre_rsa_exit_tfm,
2027 .base = {
2028 .cra_ctxsize = sizeof(struct hpre_ctx),
2029 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2030 .cra_name = "rsa",
2031 .cra_driver_name = "hpre-rsa",
2032 .cra_module = THIS_MODULE,
2033 },
2034 };
2035
2036 static struct kpp_alg dh = {
2037 .set_secret = hpre_dh_set_secret,
2038 .generate_public_key = hpre_dh_compute_value,
2039 .compute_shared_secret = hpre_dh_compute_value,
2040 .max_size = hpre_dh_max_size,
2041 .init = hpre_dh_init_tfm,
2042 .exit = hpre_dh_exit_tfm,
2043 .base = {
2044 .cra_ctxsize = sizeof(struct hpre_ctx),
2045 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2046 .cra_name = "dh",
2047 .cra_driver_name = "hpre-dh",
2048 .cra_module = THIS_MODULE,
2049 },
2050 };
2051
2052 static struct kpp_alg ecdh_curves[] = {
2053 {
2054 .set_secret = hpre_ecdh_set_secret,
2055 .generate_public_key = hpre_ecdh_compute_value,
2056 .compute_shared_secret = hpre_ecdh_compute_value,
2057 .max_size = hpre_ecdh_max_size,
2058 .init = hpre_ecdh_nist_p192_init_tfm,
2059 .exit = hpre_ecdh_exit_tfm,
2060 .base = {
2061 .cra_ctxsize = sizeof(struct hpre_ctx),
2062 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2063 .cra_name = "ecdh-nist-p192",
2064 .cra_driver_name = "hpre-ecdh-nist-p192",
2065 .cra_module = THIS_MODULE,
2066 },
2067 }, {
2068 .set_secret = hpre_ecdh_set_secret,
2069 .generate_public_key = hpre_ecdh_compute_value,
2070 .compute_shared_secret = hpre_ecdh_compute_value,
2071 .max_size = hpre_ecdh_max_size,
2072 .init = hpre_ecdh_nist_p256_init_tfm,
2073 .exit = hpre_ecdh_exit_tfm,
2074 .base = {
2075 .cra_ctxsize = sizeof(struct hpre_ctx),
2076 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2077 .cra_name = "ecdh-nist-p256",
2078 .cra_driver_name = "hpre-ecdh-nist-p256",
2079 .cra_module = THIS_MODULE,
2080 },
2081 }, {
2082 .set_secret = hpre_ecdh_set_secret,
2083 .generate_public_key = hpre_ecdh_compute_value,
2084 .compute_shared_secret = hpre_ecdh_compute_value,
2085 .max_size = hpre_ecdh_max_size,
2086 .init = hpre_ecdh_nist_p384_init_tfm,
2087 .exit = hpre_ecdh_exit_tfm,
2088 .base = {
2089 .cra_ctxsize = sizeof(struct hpre_ctx),
2090 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2091 .cra_name = "ecdh-nist-p384",
2092 .cra_driver_name = "hpre-ecdh-nist-p384",
2093 .cra_module = THIS_MODULE,
2094 },
2095 }
2096 };
2097
2098 static struct kpp_alg curve25519_alg = {
2099 .set_secret = hpre_curve25519_set_secret,
2100 .generate_public_key = hpre_curve25519_compute_value,
2101 .compute_shared_secret = hpre_curve25519_compute_value,
2102 .max_size = hpre_curve25519_max_size,
2103 .init = hpre_curve25519_init_tfm,
2104 .exit = hpre_curve25519_exit_tfm,
2105 .base = {
2106 .cra_ctxsize = sizeof(struct hpre_ctx),
2107 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2108 .cra_name = "curve25519",
2109 .cra_driver_name = "hpre-curve25519",
2110 .cra_module = THIS_MODULE,
2111 },
2112 };
2113
hpre_register_rsa(struct hisi_qm * qm)2114 static int hpre_register_rsa(struct hisi_qm *qm)
2115 {
2116 int ret;
2117
2118 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2119 return 0;
2120
2121 rsa.base.cra_flags = 0;
2122 ret = crypto_register_akcipher(&rsa);
2123 if (ret)
2124 dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
2125
2126 return ret;
2127 }
2128
hpre_unregister_rsa(struct hisi_qm * qm)2129 static void hpre_unregister_rsa(struct hisi_qm *qm)
2130 {
2131 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2132 return;
2133
2134 crypto_unregister_akcipher(&rsa);
2135 }
2136
hpre_register_dh(struct hisi_qm * qm)2137 static int hpre_register_dh(struct hisi_qm *qm)
2138 {
2139 int ret;
2140
2141 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2142 return 0;
2143
2144 ret = crypto_register_kpp(&dh);
2145 if (ret)
2146 dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
2147
2148 return ret;
2149 }
2150
hpre_unregister_dh(struct hisi_qm * qm)2151 static void hpre_unregister_dh(struct hisi_qm *qm)
2152 {
2153 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2154 return;
2155
2156 crypto_unregister_kpp(&dh);
2157 }
2158
hpre_register_ecdh(struct hisi_qm * qm)2159 static int hpre_register_ecdh(struct hisi_qm *qm)
2160 {
2161 int ret, i;
2162
2163 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2164 return 0;
2165
2166 for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
2167 ret = crypto_register_kpp(&ecdh_curves[i]);
2168 if (ret) {
2169 dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
2170 ecdh_curves[i].base.cra_name, ret);
2171 goto unreg_kpp;
2172 }
2173 }
2174
2175 return 0;
2176
2177 unreg_kpp:
2178 for (--i; i >= 0; --i)
2179 crypto_unregister_kpp(&ecdh_curves[i]);
2180
2181 return ret;
2182 }
2183
hpre_unregister_ecdh(struct hisi_qm * qm)2184 static void hpre_unregister_ecdh(struct hisi_qm *qm)
2185 {
2186 int i;
2187
2188 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2189 return;
2190
2191 for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
2192 crypto_unregister_kpp(&ecdh_curves[i]);
2193 }
2194
hpre_register_x25519(struct hisi_qm * qm)2195 static int hpre_register_x25519(struct hisi_qm *qm)
2196 {
2197 int ret;
2198
2199 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2200 return 0;
2201
2202 ret = crypto_register_kpp(&curve25519_alg);
2203 if (ret)
2204 dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
2205
2206 return ret;
2207 }
2208
hpre_unregister_x25519(struct hisi_qm * qm)2209 static void hpre_unregister_x25519(struct hisi_qm *qm)
2210 {
2211 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2212 return;
2213
2214 crypto_unregister_kpp(&curve25519_alg);
2215 }
2216
hpre_algs_register(struct hisi_qm * qm)2217 int hpre_algs_register(struct hisi_qm *qm)
2218 {
2219 int ret = 0;
2220
2221 mutex_lock(&hpre_algs_lock);
2222 if (hpre_available_devs) {
2223 hpre_available_devs++;
2224 goto unlock;
2225 }
2226
2227 ret = hpre_register_rsa(qm);
2228 if (ret)
2229 goto unlock;
2230
2231 ret = hpre_register_dh(qm);
2232 if (ret)
2233 goto unreg_rsa;
2234
2235 ret = hpre_register_ecdh(qm);
2236 if (ret)
2237 goto unreg_dh;
2238
2239 ret = hpre_register_x25519(qm);
2240 if (ret)
2241 goto unreg_ecdh;
2242
2243 hpre_available_devs++;
2244 mutex_unlock(&hpre_algs_lock);
2245
2246 return ret;
2247
2248 unreg_ecdh:
2249 hpre_unregister_ecdh(qm);
2250 unreg_dh:
2251 hpre_unregister_dh(qm);
2252 unreg_rsa:
2253 hpre_unregister_rsa(qm);
2254 unlock:
2255 mutex_unlock(&hpre_algs_lock);
2256 return ret;
2257 }
2258
hpre_algs_unregister(struct hisi_qm * qm)2259 void hpre_algs_unregister(struct hisi_qm *qm)
2260 {
2261 mutex_lock(&hpre_algs_lock);
2262 if (--hpre_available_devs)
2263 goto unlock;
2264
2265 hpre_unregister_x25519(qm);
2266 hpre_unregister_ecdh(qm);
2267 hpre_unregister_dh(qm);
2268 hpre_unregister_rsa(qm);
2269
2270 unlock:
2271 mutex_unlock(&hpre_algs_lock);
2272 }
2273