1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Cryptographic API.
4 *
5 * s390 implementation of the AES Cipher Algorithm with protected keys.
6 *
7 * s390 Version:
8 * Copyright IBM Corp. 2017, 2025
9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Harald Freudenberger <freude@de.ibm.com>
11 */
12
13 #define pr_fmt(fmt) "paes_s390: " fmt
14
15 #include <linux/atomic.h>
16 #include <linux/cpufeature.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
24 #include <crypto/aes.h>
25 #include <crypto/algapi.h>
26 #include <crypto/engine.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/xts.h>
29 #include <asm/cpacf.h>
30 #include <asm/pkey.h>
31
32 /*
33 * Key blobs smaller/bigger than these defines are rejected
34 * by the common code even before the individual setkey function
35 * is called. As paes can handle different kinds of key blobs
36 * and padding is also possible, the limits need to be generous.
37 */
38 #define PAES_MIN_KEYSIZE 16
39 #define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
40 #define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */
41 #define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */
42
43 static bool pkey_clrkey_allowed;
44 module_param_named(clrkey, pkey_clrkey_allowed, bool, 0444);
45 MODULE_PARM_DESC(clrkey, "Allow clear key material (default N)");
46
47 static u8 *ctrblk;
48 static DEFINE_MUTEX(ctrblk_lock);
49
50 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
51
52 static struct crypto_engine *paes_crypto_engine;
53 #define MAX_QLEN 10
54
55 /*
56 * protected key specific stuff
57 */
58
59 struct paes_protkey {
60 u32 type;
61 u32 len;
62 u8 protkey[PXTS_256_PROTKEY_SIZE];
63 };
64
65 #define PK_STATE_NO_KEY 0
66 #define PK_STATE_CONVERT_IN_PROGRESS 1
67 #define PK_STATE_VALID 2
68
69 struct s390_paes_ctx {
70 /* source key material used to derive a protected key from */
71 u8 keybuf[PAES_MAX_KEYSIZE];
72 unsigned int keylen;
73
74 /* cpacf function code to use with this protected key type */
75 long fc;
76
77 /* nr of requests enqueued via crypto engine which use this tfm ctx */
78 atomic_t via_engine_ctr;
79
80 /* spinlock to atomic read/update all the following fields */
81 spinlock_t pk_lock;
82
83 /* see PK_STATE* defines above, < 0 holds convert failure rc */
84 int pk_state;
85 /* if state is valid, pk holds the protected key */
86 struct paes_protkey pk;
87 };
88
89 struct s390_pxts_ctx {
90 /* source key material used to derive a protected key from */
91 u8 keybuf[2 * PAES_MAX_KEYSIZE];
92 unsigned int keylen;
93
94 /* cpacf function code to use with this protected key type */
95 long fc;
96
97 /* nr of requests enqueued via crypto engine which use this tfm ctx */
98 atomic_t via_engine_ctr;
99
100 /* spinlock to atomic read/update all the following fields */
101 spinlock_t pk_lock;
102
103 /* see PK_STATE* defines above, < 0 holds convert failure rc */
104 int pk_state;
105 /* if state is valid, pk[] hold(s) the protected key(s) */
106 struct paes_protkey pk[2];
107 };
108
109 /*
110 * make_clrkey_token() - wrap the raw key ck with pkey clearkey token
111 * information.
112 * @returns the size of the clearkey token
113 */
make_clrkey_token(const u8 * ck,size_t cklen,u8 * dest)114 static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
115 {
116 struct clrkey_token {
117 u8 type;
118 u8 res0[3];
119 u8 version;
120 u8 res1[3];
121 u32 keytype;
122 u32 len;
123 u8 key[];
124 } __packed *token = (struct clrkey_token *)dest;
125
126 token->type = 0x00;
127 token->version = 0x02;
128 token->keytype = (cklen - 8) >> 3;
129 token->len = cklen;
130 memcpy(token->key, ck, cklen);
131
132 return sizeof(*token) + cklen;
133 }
134
135 /*
136 * paes_ctx_setkey() - Set key value into context, maybe construct
137 * a clear key token digestible by pkey from a clear key value.
138 */
paes_ctx_setkey(struct s390_paes_ctx * ctx,const u8 * key,unsigned int keylen)139 static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx,
140 const u8 *key, unsigned int keylen)
141 {
142 if (keylen > sizeof(ctx->keybuf))
143 return -EINVAL;
144
145 switch (keylen) {
146 case 16:
147 case 24:
148 case 32:
149 /* clear key value, prepare pkey clear key token in keybuf */
150 memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
151 ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf);
152 break;
153 default:
154 /* other key material, let pkey handle this */
155 memcpy(ctx->keybuf, key, keylen);
156 ctx->keylen = keylen;
157 break;
158 }
159
160 return 0;
161 }
162
163 /*
164 * pxts_ctx_setkey() - Set key value into context, maybe construct
165 * a clear key token digestible by pkey from a clear key value.
166 */
pxts_ctx_setkey(struct s390_pxts_ctx * ctx,const u8 * key,unsigned int keylen)167 static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx,
168 const u8 *key, unsigned int keylen)
169 {
170 size_t cklen = keylen / 2;
171
172 if (keylen > sizeof(ctx->keybuf))
173 return -EINVAL;
174
175 switch (keylen) {
176 case 32:
177 case 64:
178 /* clear key value, prepare pkey clear key tokens in keybuf */
179 memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
180 ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf);
181 ctx->keylen += make_clrkey_token(key + cklen, cklen,
182 ctx->keybuf + ctx->keylen);
183 break;
184 default:
185 /* other key material, let pkey handle this */
186 memcpy(ctx->keybuf, key, keylen);
187 ctx->keylen = keylen;
188 break;
189 }
190
191 return 0;
192 }
193
194 /*
195 * Convert the raw key material into a protected key via PKEY api.
196 * This function may sleep - don't call in non-sleeping context.
197 */
convert_key(const u8 * key,unsigned int keylen,struct paes_protkey * pk,bool tested)198 static inline int convert_key(const u8 *key, unsigned int keylen,
199 struct paes_protkey *pk, bool tested)
200 {
201 u32 xflags = PKEY_XFLAG_NOMEMALLOC;
202 int rc, i;
203
204 if (tested && !pkey_clrkey_allowed)
205 xflags |= PKEY_XFLAG_NOCLEARKEY;
206
207 pk->len = sizeof(pk->protkey);
208
209 /*
210 * In case of a busy card retry with increasing delay
211 * of 200, 400, 800 and 1600 ms - in total 3 s.
212 */
213 for (rc = -EIO, i = 0; rc && i < 5; i++) {
214 if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
215 rc = -EINTR;
216 goto out;
217 }
218 rc = pkey_key2protkey(key, keylen,
219 pk->protkey, &pk->len, &pk->type,
220 xflags);
221 }
222
223 out:
224 pr_debug("rc=%d\n", rc);
225 return rc;
226 }
227
228 /*
229 * (Re-)Convert the raw key material from the ctx into a protected key
230 * via convert_key() function. Update the pk_state, pk_type, pk_len
231 * and the protected key in the tfm context.
232 * Please note this function may be invoked concurrently with the very
233 * same tfm context. The pk_lock spinlock in the context ensures an
234 * atomic update of the pk and the pk state but does not guarantee any
235 * order of update. So a fresh converted valid protected key may get
236 * updated with an 'old' expired key value. As the cpacf instructions
237 * detect this, refuse to operate with an invalid key and the calling
238 * code triggers a (re-)conversion this does no harm. This may lead to
239 * unnecessary additional conversion but never to invalid data on en-
240 * or decrypt operations.
241 */
paes_convert_key(struct s390_paes_ctx * ctx,bool tested)242 static int paes_convert_key(struct s390_paes_ctx *ctx, bool tested)
243 {
244 struct paes_protkey pk;
245 int rc;
246
247 spin_lock_bh(&ctx->pk_lock);
248 ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
249 spin_unlock_bh(&ctx->pk_lock);
250
251 rc = convert_key(ctx->keybuf, ctx->keylen, &pk, tested);
252
253 /* update context */
254 spin_lock_bh(&ctx->pk_lock);
255 if (rc) {
256 ctx->pk_state = rc;
257 } else {
258 ctx->pk_state = PK_STATE_VALID;
259 ctx->pk = pk;
260 }
261 spin_unlock_bh(&ctx->pk_lock);
262
263 memzero_explicit(&pk, sizeof(pk));
264 pr_debug("rc=%d\n", rc);
265 return rc;
266 }
267
268 /*
269 * (Re-)Convert the raw xts key material from the ctx into a
270 * protected key via convert_key() function. Update the pk_state,
271 * pk_type, pk_len and the protected key in the tfm context.
272 * See also comments on function paes_convert_key.
273 */
pxts_convert_key(struct s390_pxts_ctx * ctx,bool tested)274 static int pxts_convert_key(struct s390_pxts_ctx *ctx, bool tested)
275 {
276 struct paes_protkey pk0, pk1;
277 size_t split_keylen;
278 int rc;
279
280 spin_lock_bh(&ctx->pk_lock);
281 ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS;
282 spin_unlock_bh(&ctx->pk_lock);
283
284 rc = convert_key(ctx->keybuf, ctx->keylen, &pk0, tested);
285 if (rc)
286 goto out;
287
288 switch (pk0.type) {
289 case PKEY_KEYTYPE_AES_128:
290 case PKEY_KEYTYPE_AES_256:
291 /* second keytoken required */
292 if (ctx->keylen % 2) {
293 rc = -EINVAL;
294 goto out;
295 }
296 split_keylen = ctx->keylen / 2;
297 rc = convert_key(ctx->keybuf + split_keylen,
298 split_keylen, &pk1, tested);
299 if (rc)
300 goto out;
301 if (pk0.type != pk1.type) {
302 rc = -EINVAL;
303 goto out;
304 }
305 break;
306 case PKEY_KEYTYPE_AES_XTS_128:
307 case PKEY_KEYTYPE_AES_XTS_256:
308 /* single key */
309 pk1.type = 0;
310 break;
311 default:
312 /* unsupported protected keytype */
313 rc = -EINVAL;
314 goto out;
315 }
316
317 out:
318 /* update context */
319 spin_lock_bh(&ctx->pk_lock);
320 if (rc) {
321 ctx->pk_state = rc;
322 } else {
323 ctx->pk_state = PK_STATE_VALID;
324 ctx->pk[0] = pk0;
325 ctx->pk[1] = pk1;
326 }
327 spin_unlock_bh(&ctx->pk_lock);
328
329 memzero_explicit(&pk0, sizeof(pk0));
330 memzero_explicit(&pk1, sizeof(pk1));
331 pr_debug("rc=%d\n", rc);
332 return rc;
333 }
334
335 /*
336 * PAES ECB implementation
337 */
338
339 struct ecb_param {
340 u8 key[PAES_256_PROTKEY_SIZE];
341 } __packed;
342
343 struct s390_pecb_req_ctx {
344 unsigned long modifier;
345 struct skcipher_walk walk;
346 bool param_init_done;
347 struct ecb_param param;
348 };
349
ecb_paes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)350 static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
351 unsigned int key_len)
352 {
353 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
354 bool tested = crypto_skcipher_tested(tfm);
355 long fc;
356 int rc;
357
358 /* set raw key into context */
359 rc = paes_ctx_setkey(ctx, in_key, key_len);
360 if (rc)
361 goto out;
362
363 /* convert key into protected key */
364 rc = paes_convert_key(ctx, tested);
365 if (rc)
366 goto out;
367
368 /* Pick the correct function code based on the protected key type */
369 switch (ctx->pk.type) {
370 case PKEY_KEYTYPE_AES_128:
371 fc = CPACF_KM_PAES_128;
372 break;
373 case PKEY_KEYTYPE_AES_192:
374 fc = CPACF_KM_PAES_192;
375 break;
376 case PKEY_KEYTYPE_AES_256:
377 fc = CPACF_KM_PAES_256;
378 break;
379 default:
380 fc = 0;
381 break;
382 }
383 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
384
385 rc = fc ? 0 : -EINVAL;
386
387 out:
388 pr_debug("rc=%d\n", rc);
389 return rc;
390 }
391
ecb_paes_do_crypt(struct s390_paes_ctx * ctx,struct s390_pecb_req_ctx * req_ctx,bool tested,bool maysleep)392 static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx,
393 struct s390_pecb_req_ctx *req_ctx,
394 bool tested, bool maysleep)
395 {
396 struct ecb_param *param = &req_ctx->param;
397 struct skcipher_walk *walk = &req_ctx->walk;
398 unsigned int nbytes, n, k;
399 int pk_state, rc = 0;
400
401 if (!req_ctx->param_init_done) {
402 /* fetch and check protected key state */
403 spin_lock_bh(&ctx->pk_lock);
404 pk_state = ctx->pk_state;
405 switch (pk_state) {
406 case PK_STATE_NO_KEY:
407 rc = -ENOKEY;
408 break;
409 case PK_STATE_CONVERT_IN_PROGRESS:
410 rc = -EKEYEXPIRED;
411 break;
412 case PK_STATE_VALID:
413 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
414 req_ctx->param_init_done = true;
415 break;
416 default:
417 rc = pk_state < 0 ? pk_state : -EIO;
418 break;
419 }
420 spin_unlock_bh(&ctx->pk_lock);
421 }
422 if (rc)
423 goto out;
424
425 /*
426 * Note that in case of partial processing or failure the walk
427 * is NOT unmapped here. So a follow up task may reuse the walk
428 * or in case of unrecoverable failure needs to unmap it.
429 */
430 while ((nbytes = walk->nbytes) != 0) {
431 /* only use complete blocks */
432 n = nbytes & ~(AES_BLOCK_SIZE - 1);
433 k = cpacf_km(ctx->fc | req_ctx->modifier, param,
434 walk->dst.virt.addr, walk->src.virt.addr, n);
435 if (k)
436 rc = skcipher_walk_done(walk, nbytes - k);
437 if (k < n) {
438 if (!maysleep) {
439 rc = -EKEYEXPIRED;
440 goto out;
441 }
442 rc = paes_convert_key(ctx, tested);
443 if (rc)
444 goto out;
445 spin_lock_bh(&ctx->pk_lock);
446 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
447 spin_unlock_bh(&ctx->pk_lock);
448 }
449 }
450
451 out:
452 pr_debug("rc=%d\n", rc);
453 return rc;
454 }
455
ecb_paes_crypt(struct skcipher_request * req,unsigned long modifier)456 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
457 {
458 struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req);
459 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
460 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
461 struct skcipher_walk *walk = &req_ctx->walk;
462 bool tested = crypto_skcipher_tested(tfm);
463 int rc;
464
465 /*
466 * Attempt synchronous encryption first. If it fails, schedule the request
467 * asynchronously via the crypto engine. To preserve execution order,
468 * once a request is queued to the engine, further requests using the same
469 * tfm will also be routed through the engine.
470 */
471
472 rc = skcipher_walk_virt(walk, req, false);
473 if (rc)
474 goto out;
475
476 req_ctx->modifier = modifier;
477 req_ctx->param_init_done = false;
478
479 /* Try synchronous operation if no active engine usage */
480 if (!atomic_read(&ctx->via_engine_ctr)) {
481 rc = ecb_paes_do_crypt(ctx, req_ctx, tested, false);
482 if (rc == 0)
483 goto out;
484 }
485
486 /*
487 * If sync operation failed or key expired or there are already
488 * requests enqueued via engine, fallback to async. Mark tfm as
489 * using engine to serialize requests.
490 */
491 if (rc == 0 || rc == -EKEYEXPIRED) {
492 atomic_inc(&ctx->via_engine_ctr);
493 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
494 if (rc != -EINPROGRESS)
495 atomic_dec(&ctx->via_engine_ctr);
496 }
497
498 if (rc != -EINPROGRESS)
499 skcipher_walk_done(walk, rc);
500
501 out:
502 if (rc != -EINPROGRESS)
503 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
504 pr_debug("rc=%d\n", rc);
505 return rc;
506 }
507
ecb_paes_encrypt(struct skcipher_request * req)508 static int ecb_paes_encrypt(struct skcipher_request *req)
509 {
510 return ecb_paes_crypt(req, 0);
511 }
512
ecb_paes_decrypt(struct skcipher_request * req)513 static int ecb_paes_decrypt(struct skcipher_request *req)
514 {
515 return ecb_paes_crypt(req, CPACF_DECRYPT);
516 }
517
ecb_paes_init(struct crypto_skcipher * tfm)518 static int ecb_paes_init(struct crypto_skcipher *tfm)
519 {
520 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
521
522 memset(ctx, 0, sizeof(*ctx));
523 spin_lock_init(&ctx->pk_lock);
524
525 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx));
526
527 return 0;
528 }
529
ecb_paes_exit(struct crypto_skcipher * tfm)530 static void ecb_paes_exit(struct crypto_skcipher *tfm)
531 {
532 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
533
534 memzero_explicit(ctx, sizeof(*ctx));
535 }
536
ecb_paes_do_one_request(struct crypto_engine * engine,void * areq)537 static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq)
538 {
539 struct skcipher_request *req = skcipher_request_cast(areq);
540 struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req);
541 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
542 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
543 struct skcipher_walk *walk = &req_ctx->walk;
544 bool tested = crypto_skcipher_tested(tfm);
545 int rc;
546
547 /* walk has already been prepared */
548
549 rc = ecb_paes_do_crypt(ctx, req_ctx, tested, true);
550 if (rc == -EKEYEXPIRED) {
551 /*
552 * Protected key expired, conversion is in process.
553 * Trigger a re-schedule of this request by returning
554 * -ENOSPC ("hardware queue is full") to the crypto engine.
555 * To avoid immediately re-invocation of this callback,
556 * tell the scheduler to voluntarily give up the CPU here.
557 */
558 cond_resched();
559 pr_debug("rescheduling request\n");
560 return -ENOSPC;
561 } else if (rc) {
562 skcipher_walk_done(walk, rc);
563 }
564
565 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
566 pr_debug("request complete with rc=%d\n", rc);
567 local_bh_disable();
568 atomic_dec(&ctx->via_engine_ctr);
569 crypto_finalize_skcipher_request(engine, req, rc);
570 local_bh_enable();
571 return rc;
572 }
573
574 static struct skcipher_engine_alg ecb_paes_alg = {
575 .base = {
576 .base.cra_name = "ecb(paes)",
577 .base.cra_driver_name = "ecb-paes-s390",
578 .base.cra_priority = 401, /* combo: aes + ecb + 1 */
579 .base.cra_blocksize = AES_BLOCK_SIZE,
580 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
581 .base.cra_module = THIS_MODULE,
582 .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list),
583 .init = ecb_paes_init,
584 .exit = ecb_paes_exit,
585 .min_keysize = PAES_MIN_KEYSIZE,
586 .max_keysize = PAES_MAX_KEYSIZE,
587 .setkey = ecb_paes_setkey,
588 .encrypt = ecb_paes_encrypt,
589 .decrypt = ecb_paes_decrypt,
590 },
591 .op = {
592 .do_one_request = ecb_paes_do_one_request,
593 },
594 };
595
596 /*
597 * PAES CBC implementation
598 */
599
600 struct cbc_param {
601 u8 iv[AES_BLOCK_SIZE];
602 u8 key[PAES_256_PROTKEY_SIZE];
603 } __packed;
604
605 struct s390_pcbc_req_ctx {
606 unsigned long modifier;
607 struct skcipher_walk walk;
608 bool param_init_done;
609 struct cbc_param param;
610 };
611
cbc_paes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)612 static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
613 unsigned int key_len)
614 {
615 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
616 bool tested = crypto_skcipher_tested(tfm);
617 long fc;
618 int rc;
619
620 /* set raw key into context */
621 rc = paes_ctx_setkey(ctx, in_key, key_len);
622 if (rc)
623 goto out;
624
625 /* convert raw key into protected key */
626 rc = paes_convert_key(ctx, tested);
627 if (rc)
628 goto out;
629
630 /* Pick the correct function code based on the protected key type */
631 switch (ctx->pk.type) {
632 case PKEY_KEYTYPE_AES_128:
633 fc = CPACF_KMC_PAES_128;
634 break;
635 case PKEY_KEYTYPE_AES_192:
636 fc = CPACF_KMC_PAES_192;
637 break;
638 case PKEY_KEYTYPE_AES_256:
639 fc = CPACF_KMC_PAES_256;
640 break;
641 default:
642 fc = 0;
643 break;
644 }
645 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
646
647 rc = fc ? 0 : -EINVAL;
648
649 out:
650 pr_debug("rc=%d\n", rc);
651 return rc;
652 }
653
cbc_paes_do_crypt(struct s390_paes_ctx * ctx,struct s390_pcbc_req_ctx * req_ctx,bool tested,bool maysleep)654 static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx,
655 struct s390_pcbc_req_ctx *req_ctx,
656 bool tested, bool maysleep)
657 {
658 struct cbc_param *param = &req_ctx->param;
659 struct skcipher_walk *walk = &req_ctx->walk;
660 unsigned int nbytes, n, k;
661 int pk_state, rc = 0;
662
663 if (!req_ctx->param_init_done) {
664 /* fetch and check protected key state */
665 spin_lock_bh(&ctx->pk_lock);
666 pk_state = ctx->pk_state;
667 switch (pk_state) {
668 case PK_STATE_NO_KEY:
669 rc = -ENOKEY;
670 break;
671 case PK_STATE_CONVERT_IN_PROGRESS:
672 rc = -EKEYEXPIRED;
673 break;
674 case PK_STATE_VALID:
675 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
676 req_ctx->param_init_done = true;
677 break;
678 default:
679 rc = pk_state < 0 ? pk_state : -EIO;
680 break;
681 }
682 spin_unlock_bh(&ctx->pk_lock);
683 }
684 if (rc)
685 goto out;
686
687 memcpy(param->iv, walk->iv, AES_BLOCK_SIZE);
688
689 /*
690 * Note that in case of partial processing or failure the walk
691 * is NOT unmapped here. So a follow up task may reuse the walk
692 * or in case of unrecoverable failure needs to unmap it.
693 */
694 while ((nbytes = walk->nbytes) != 0) {
695 /* only use complete blocks */
696 n = nbytes & ~(AES_BLOCK_SIZE - 1);
697 k = cpacf_kmc(ctx->fc | req_ctx->modifier, param,
698 walk->dst.virt.addr, walk->src.virt.addr, n);
699 if (k) {
700 memcpy(walk->iv, param->iv, AES_BLOCK_SIZE);
701 rc = skcipher_walk_done(walk, nbytes - k);
702 }
703 if (k < n) {
704 if (!maysleep) {
705 rc = -EKEYEXPIRED;
706 goto out;
707 }
708 rc = paes_convert_key(ctx, tested);
709 if (rc)
710 goto out;
711 spin_lock_bh(&ctx->pk_lock);
712 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
713 spin_unlock_bh(&ctx->pk_lock);
714 }
715 }
716
717 out:
718 pr_debug("rc=%d\n", rc);
719 return rc;
720 }
721
cbc_paes_crypt(struct skcipher_request * req,unsigned long modifier)722 static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
723 {
724 struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req);
725 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
726 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
727 struct skcipher_walk *walk = &req_ctx->walk;
728 bool tested = crypto_skcipher_tested(tfm);
729 int rc;
730
731 /*
732 * Attempt synchronous encryption first. If it fails, schedule the request
733 * asynchronously via the crypto engine. To preserve execution order,
734 * once a request is queued to the engine, further requests using the same
735 * tfm will also be routed through the engine.
736 */
737
738 rc = skcipher_walk_virt(walk, req, false);
739 if (rc)
740 goto out;
741
742 req_ctx->modifier = modifier;
743 req_ctx->param_init_done = false;
744
745 /* Try synchronous operation if no active engine usage */
746 if (!atomic_read(&ctx->via_engine_ctr)) {
747 rc = cbc_paes_do_crypt(ctx, req_ctx, tested, false);
748 if (rc == 0)
749 goto out;
750 }
751
752 /*
753 * If sync operation failed or key expired or there are already
754 * requests enqueued via engine, fallback to async. Mark tfm as
755 * using engine to serialize requests.
756 */
757 if (rc == 0 || rc == -EKEYEXPIRED) {
758 atomic_inc(&ctx->via_engine_ctr);
759 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
760 if (rc != -EINPROGRESS)
761 atomic_dec(&ctx->via_engine_ctr);
762 }
763
764 if (rc != -EINPROGRESS)
765 skcipher_walk_done(walk, rc);
766
767 out:
768 if (rc != -EINPROGRESS)
769 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
770 pr_debug("rc=%d\n", rc);
771 return rc;
772 }
773
cbc_paes_encrypt(struct skcipher_request * req)774 static int cbc_paes_encrypt(struct skcipher_request *req)
775 {
776 return cbc_paes_crypt(req, 0);
777 }
778
cbc_paes_decrypt(struct skcipher_request * req)779 static int cbc_paes_decrypt(struct skcipher_request *req)
780 {
781 return cbc_paes_crypt(req, CPACF_DECRYPT);
782 }
783
cbc_paes_init(struct crypto_skcipher * tfm)784 static int cbc_paes_init(struct crypto_skcipher *tfm)
785 {
786 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
787
788 memset(ctx, 0, sizeof(*ctx));
789 spin_lock_init(&ctx->pk_lock);
790
791 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx));
792
793 return 0;
794 }
795
cbc_paes_exit(struct crypto_skcipher * tfm)796 static void cbc_paes_exit(struct crypto_skcipher *tfm)
797 {
798 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
799
800 memzero_explicit(ctx, sizeof(*ctx));
801 }
802
cbc_paes_do_one_request(struct crypto_engine * engine,void * areq)803 static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq)
804 {
805 struct skcipher_request *req = skcipher_request_cast(areq);
806 struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req);
807 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
808 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
809 struct skcipher_walk *walk = &req_ctx->walk;
810 bool tested = crypto_skcipher_tested(tfm);
811 int rc;
812
813 /* walk has already been prepared */
814
815 rc = cbc_paes_do_crypt(ctx, req_ctx, tested, true);
816 if (rc == -EKEYEXPIRED) {
817 /*
818 * Protected key expired, conversion is in process.
819 * Trigger a re-schedule of this request by returning
820 * -ENOSPC ("hardware queue is full") to the crypto engine.
821 * To avoid immediately re-invocation of this callback,
822 * tell the scheduler to voluntarily give up the CPU here.
823 */
824 cond_resched();
825 pr_debug("rescheduling request\n");
826 return -ENOSPC;
827 } else if (rc) {
828 skcipher_walk_done(walk, rc);
829 }
830
831 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
832 pr_debug("request complete with rc=%d\n", rc);
833 local_bh_disable();
834 atomic_dec(&ctx->via_engine_ctr);
835 crypto_finalize_skcipher_request(engine, req, rc);
836 local_bh_enable();
837 return rc;
838 }
839
840 static struct skcipher_engine_alg cbc_paes_alg = {
841 .base = {
842 .base.cra_name = "cbc(paes)",
843 .base.cra_driver_name = "cbc-paes-s390",
844 .base.cra_priority = 402, /* cbc-paes-s390 + 1 */
845 .base.cra_blocksize = AES_BLOCK_SIZE,
846 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
847 .base.cra_module = THIS_MODULE,
848 .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list),
849 .init = cbc_paes_init,
850 .exit = cbc_paes_exit,
851 .min_keysize = PAES_MIN_KEYSIZE,
852 .max_keysize = PAES_MAX_KEYSIZE,
853 .ivsize = AES_BLOCK_SIZE,
854 .setkey = cbc_paes_setkey,
855 .encrypt = cbc_paes_encrypt,
856 .decrypt = cbc_paes_decrypt,
857 },
858 .op = {
859 .do_one_request = cbc_paes_do_one_request,
860 },
861 };
862
863 /*
864 * PAES CTR implementation
865 */
866
867 struct ctr_param {
868 u8 key[PAES_256_PROTKEY_SIZE];
869 } __packed;
870
871 struct s390_pctr_req_ctx {
872 unsigned long modifier;
873 struct skcipher_walk walk;
874 bool param_init_done;
875 struct ctr_param param;
876 };
877
ctr_paes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)878 static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
879 unsigned int key_len)
880 {
881 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
882 bool tested = crypto_skcipher_tested(tfm);
883 long fc;
884 int rc;
885
886 /* set raw key into context */
887 rc = paes_ctx_setkey(ctx, in_key, key_len);
888 if (rc)
889 goto out;
890
891 /* convert raw key into protected key */
892 rc = paes_convert_key(ctx, tested);
893 if (rc)
894 goto out;
895
896 /* Pick the correct function code based on the protected key type */
897 switch (ctx->pk.type) {
898 case PKEY_KEYTYPE_AES_128:
899 fc = CPACF_KMCTR_PAES_128;
900 break;
901 case PKEY_KEYTYPE_AES_192:
902 fc = CPACF_KMCTR_PAES_192;
903 break;
904 case PKEY_KEYTYPE_AES_256:
905 fc = CPACF_KMCTR_PAES_256;
906 break;
907 default:
908 fc = 0;
909 break;
910 }
911 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
912
913 rc = fc ? 0 : -EINVAL;
914
915 out:
916 pr_debug("rc=%d\n", rc);
917 return rc;
918 }
919
__ctrblk_init(u8 * ctrptr,u8 * iv,unsigned int nbytes)920 static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
921 {
922 unsigned int i, n;
923
924 /* only use complete blocks, max. PAGE_SIZE */
925 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
926 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
927 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
928 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
929 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
930 ctrptr += AES_BLOCK_SIZE;
931 }
932 return n;
933 }
934
ctr_paes_do_crypt(struct s390_paes_ctx * ctx,struct s390_pctr_req_ctx * req_ctx,bool tested,bool maysleep)935 static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx,
936 struct s390_pctr_req_ctx *req_ctx,
937 bool tested, bool maysleep)
938 {
939 struct ctr_param *param = &req_ctx->param;
940 struct skcipher_walk *walk = &req_ctx->walk;
941 u8 buf[AES_BLOCK_SIZE], *ctrptr;
942 unsigned int nbytes, n, k;
943 int pk_state, locked, rc = 0;
944
945 if (!req_ctx->param_init_done) {
946 /* fetch and check protected key state */
947 spin_lock_bh(&ctx->pk_lock);
948 pk_state = ctx->pk_state;
949 switch (pk_state) {
950 case PK_STATE_NO_KEY:
951 rc = -ENOKEY;
952 break;
953 case PK_STATE_CONVERT_IN_PROGRESS:
954 rc = -EKEYEXPIRED;
955 break;
956 case PK_STATE_VALID:
957 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
958 req_ctx->param_init_done = true;
959 break;
960 default:
961 rc = pk_state < 0 ? pk_state : -EIO;
962 break;
963 }
964 spin_unlock_bh(&ctx->pk_lock);
965 }
966 if (rc)
967 goto out;
968
969 locked = mutex_trylock(&ctrblk_lock);
970
971 /*
972 * Note that in case of partial processing or failure the walk
973 * is NOT unmapped here. So a follow up task may reuse the walk
974 * or in case of unrecoverable failure needs to unmap it.
975 */
976 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
977 n = AES_BLOCK_SIZE;
978 if (nbytes >= 2 * AES_BLOCK_SIZE && locked)
979 n = __ctrblk_init(ctrblk, walk->iv, nbytes);
980 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
981 k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr,
982 walk->src.virt.addr, n, ctrptr);
983 if (k) {
984 if (ctrptr == ctrblk)
985 memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
986 AES_BLOCK_SIZE);
987 crypto_inc(walk->iv, AES_BLOCK_SIZE);
988 rc = skcipher_walk_done(walk, nbytes - k);
989 }
990 if (k < n) {
991 if (!maysleep) {
992 if (locked)
993 mutex_unlock(&ctrblk_lock);
994 rc = -EKEYEXPIRED;
995 goto out;
996 }
997 rc = paes_convert_key(ctx, tested);
998 if (rc) {
999 if (locked)
1000 mutex_unlock(&ctrblk_lock);
1001 goto out;
1002 }
1003 spin_lock_bh(&ctx->pk_lock);
1004 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
1005 spin_unlock_bh(&ctx->pk_lock);
1006 }
1007 }
1008 if (locked)
1009 mutex_unlock(&ctrblk_lock);
1010
1011 /* final block may be < AES_BLOCK_SIZE, copy only nbytes */
1012 if (nbytes) {
1013 memset(buf, 0, AES_BLOCK_SIZE);
1014 memcpy(buf, walk->src.virt.addr, nbytes);
1015 while (1) {
1016 if (cpacf_kmctr(ctx->fc, param, buf,
1017 buf, AES_BLOCK_SIZE,
1018 walk->iv) == AES_BLOCK_SIZE)
1019 break;
1020 if (!maysleep) {
1021 rc = -EKEYEXPIRED;
1022 goto out;
1023 }
1024 rc = paes_convert_key(ctx, tested);
1025 if (rc)
1026 goto out;
1027 spin_lock_bh(&ctx->pk_lock);
1028 memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
1029 spin_unlock_bh(&ctx->pk_lock);
1030 }
1031 memcpy(walk->dst.virt.addr, buf, nbytes);
1032 crypto_inc(walk->iv, AES_BLOCK_SIZE);
1033 rc = skcipher_walk_done(walk, 0);
1034 }
1035
1036 out:
1037 pr_debug("rc=%d\n", rc);
1038 return rc;
1039 }
1040
ctr_paes_crypt(struct skcipher_request * req)1041 static int ctr_paes_crypt(struct skcipher_request *req)
1042 {
1043 struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req);
1044 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1045 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1046 struct skcipher_walk *walk = &req_ctx->walk;
1047 bool tested = crypto_skcipher_tested(tfm);
1048 int rc;
1049
1050 /*
1051 * Attempt synchronous encryption first. If it fails, schedule the request
1052 * asynchronously via the crypto engine. To preserve execution order,
1053 * once a request is queued to the engine, further requests using the same
1054 * tfm will also be routed through the engine.
1055 */
1056
1057 rc = skcipher_walk_virt(walk, req, false);
1058 if (rc)
1059 goto out;
1060
1061 req_ctx->param_init_done = false;
1062
1063 /* Try synchronous operation if no active engine usage */
1064 if (!atomic_read(&ctx->via_engine_ctr)) {
1065 rc = ctr_paes_do_crypt(ctx, req_ctx, tested, false);
1066 if (rc == 0)
1067 goto out;
1068 }
1069
1070 /*
1071 * If sync operation failed or key expired or there are already
1072 * requests enqueued via engine, fallback to async. Mark tfm as
1073 * using engine to serialize requests.
1074 */
1075 if (rc == 0 || rc == -EKEYEXPIRED) {
1076 atomic_inc(&ctx->via_engine_ctr);
1077 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
1078 if (rc != -EINPROGRESS)
1079 atomic_dec(&ctx->via_engine_ctr);
1080 }
1081
1082 if (rc != -EINPROGRESS)
1083 skcipher_walk_done(walk, rc);
1084
1085 out:
1086 if (rc != -EINPROGRESS)
1087 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1088 pr_debug("rc=%d\n", rc);
1089 return rc;
1090 }
1091
ctr_paes_init(struct crypto_skcipher * tfm)1092 static int ctr_paes_init(struct crypto_skcipher *tfm)
1093 {
1094 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1095
1096 memset(ctx, 0, sizeof(*ctx));
1097 spin_lock_init(&ctx->pk_lock);
1098
1099 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx));
1100
1101 return 0;
1102 }
1103
ctr_paes_exit(struct crypto_skcipher * tfm)1104 static void ctr_paes_exit(struct crypto_skcipher *tfm)
1105 {
1106 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1107
1108 memzero_explicit(ctx, sizeof(*ctx));
1109 }
1110
ctr_paes_do_one_request(struct crypto_engine * engine,void * areq)1111 static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq)
1112 {
1113 struct skcipher_request *req = skcipher_request_cast(areq);
1114 struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req);
1115 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1116 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
1117 struct skcipher_walk *walk = &req_ctx->walk;
1118 bool tested = crypto_skcipher_tested(tfm);
1119 int rc;
1120
1121 /* walk has already been prepared */
1122
1123 rc = ctr_paes_do_crypt(ctx, req_ctx, tested, true);
1124 if (rc == -EKEYEXPIRED) {
1125 /*
1126 * Protected key expired, conversion is in process.
1127 * Trigger a re-schedule of this request by returning
1128 * -ENOSPC ("hardware queue is full") to the crypto engine.
1129 * To avoid immediately re-invocation of this callback,
1130 * tell the scheduler to voluntarily give up the CPU here.
1131 */
1132 cond_resched();
1133 pr_debug("rescheduling request\n");
1134 return -ENOSPC;
1135 } else if (rc) {
1136 skcipher_walk_done(walk, rc);
1137 }
1138
1139 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1140 pr_debug("request complete with rc=%d\n", rc);
1141 local_bh_disable();
1142 atomic_dec(&ctx->via_engine_ctr);
1143 crypto_finalize_skcipher_request(engine, req, rc);
1144 local_bh_enable();
1145 return rc;
1146 }
1147
1148 static struct skcipher_engine_alg ctr_paes_alg = {
1149 .base = {
1150 .base.cra_name = "ctr(paes)",
1151 .base.cra_driver_name = "ctr-paes-s390",
1152 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
1153 .base.cra_blocksize = 1,
1154 .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
1155 .base.cra_module = THIS_MODULE,
1156 .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list),
1157 .init = ctr_paes_init,
1158 .exit = ctr_paes_exit,
1159 .min_keysize = PAES_MIN_KEYSIZE,
1160 .max_keysize = PAES_MAX_KEYSIZE,
1161 .ivsize = AES_BLOCK_SIZE,
1162 .setkey = ctr_paes_setkey,
1163 .encrypt = ctr_paes_crypt,
1164 .decrypt = ctr_paes_crypt,
1165 .chunksize = AES_BLOCK_SIZE,
1166 },
1167 .op = {
1168 .do_one_request = ctr_paes_do_one_request,
1169 },
1170 };
1171
1172 /*
1173 * PAES XTS implementation
1174 */
1175
1176 struct xts_full_km_param {
1177 u8 key[64];
1178 u8 tweak[16];
1179 u8 nap[16];
1180 u8 wkvp[32];
1181 } __packed;
1182
1183 struct xts_km_param {
1184 u8 key[PAES_256_PROTKEY_SIZE];
1185 u8 init[16];
1186 } __packed;
1187
1188 struct xts_pcc_param {
1189 u8 key[PAES_256_PROTKEY_SIZE];
1190 u8 tweak[16];
1191 u8 block[16];
1192 u8 bit[16];
1193 u8 xts[16];
1194 } __packed;
1195
1196 struct s390_pxts_req_ctx {
1197 unsigned long modifier;
1198 struct skcipher_walk walk;
1199 bool param_init_done;
1200 union {
1201 struct xts_full_km_param full_km_param;
1202 struct xts_km_param km_param;
1203 } param;
1204 };
1205
xts_paes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int in_keylen)1206 static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
1207 unsigned int in_keylen)
1208 {
1209 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1210 bool tested = crypto_skcipher_tested(tfm);
1211 u8 ckey[2 * AES_MAX_KEY_SIZE];
1212 unsigned int ckey_len;
1213 long fc;
1214 int rc;
1215
1216 if ((in_keylen == 32 || in_keylen == 64) &&
1217 xts_verify_key(tfm, in_key, in_keylen))
1218 return -EINVAL;
1219
1220 /* set raw key into context */
1221 rc = pxts_ctx_setkey(ctx, in_key, in_keylen);
1222 if (rc)
1223 goto out;
1224
1225 /* convert raw key(s) into protected key(s) */
1226 rc = pxts_convert_key(ctx, tested);
1227 if (rc)
1228 goto out;
1229
1230 /*
1231 * xts_verify_key verifies the key length is not odd and makes
1232 * sure that the two keys are not the same. This can be done
1233 * on the two protected keys as well - but not for full xts keys.
1234 */
1235 if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 ||
1236 ctx->pk[0].type == PKEY_KEYTYPE_AES_256) {
1237 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
1238 AES_KEYSIZE_128 : AES_KEYSIZE_256;
1239 memcpy(ckey, ctx->pk[0].protkey, ckey_len);
1240 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
1241 rc = xts_verify_key(tfm, ckey, 2 * ckey_len);
1242 memzero_explicit(ckey, sizeof(ckey));
1243 if (rc)
1244 goto out;
1245 }
1246
1247 /* Pick the correct function code based on the protected key type */
1248 switch (ctx->pk[0].type) {
1249 case PKEY_KEYTYPE_AES_128:
1250 fc = CPACF_KM_PXTS_128;
1251 break;
1252 case PKEY_KEYTYPE_AES_256:
1253 fc = CPACF_KM_PXTS_256;
1254 break;
1255 case PKEY_KEYTYPE_AES_XTS_128:
1256 fc = CPACF_KM_PXTS_128_FULL;
1257 break;
1258 case PKEY_KEYTYPE_AES_XTS_256:
1259 fc = CPACF_KM_PXTS_256_FULL;
1260 break;
1261 default:
1262 fc = 0;
1263 break;
1264 }
1265 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
1266
1267 rc = fc ? 0 : -EINVAL;
1268
1269 out:
1270 pr_debug("rc=%d\n", rc);
1271 return rc;
1272 }
1273
xts_paes_do_crypt_fullkey(struct s390_pxts_ctx * ctx,struct s390_pxts_req_ctx * req_ctx,bool tested,bool maysleep)1274 static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx,
1275 struct s390_pxts_req_ctx *req_ctx,
1276 bool tested, bool maysleep)
1277 {
1278 struct xts_full_km_param *param = &req_ctx->param.full_km_param;
1279 struct skcipher_walk *walk = &req_ctx->walk;
1280 unsigned int keylen, offset, nbytes, n, k;
1281 int rc = 0;
1282
1283 /*
1284 * The calling function xts_paes_do_crypt() ensures the
1285 * protected key state is always PK_STATE_VALID when this
1286 * function is invoked.
1287 */
1288
1289 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64;
1290 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0;
1291
1292 if (!req_ctx->param_init_done) {
1293 memset(param, 0, sizeof(*param));
1294 spin_lock_bh(&ctx->pk_lock);
1295 memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1296 memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
1297 spin_unlock_bh(&ctx->pk_lock);
1298 memcpy(param->tweak, walk->iv, sizeof(param->tweak));
1299 param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */
1300 req_ctx->param_init_done = true;
1301 }
1302
1303 /*
1304 * Note that in case of partial processing or failure the walk
1305 * is NOT unmapped here. So a follow up task may reuse the walk
1306 * or in case of unrecoverable failure needs to unmap it.
1307 */
1308 while ((nbytes = walk->nbytes) != 0) {
1309 /* only use complete blocks */
1310 n = nbytes & ~(AES_BLOCK_SIZE - 1);
1311 k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
1312 walk->dst.virt.addr, walk->src.virt.addr, n);
1313 if (k)
1314 rc = skcipher_walk_done(walk, nbytes - k);
1315 if (k < n) {
1316 if (!maysleep) {
1317 rc = -EKEYEXPIRED;
1318 goto out;
1319 }
1320 rc = pxts_convert_key(ctx, tested);
1321 if (rc)
1322 goto out;
1323 spin_lock_bh(&ctx->pk_lock);
1324 memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1325 memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
1326 spin_unlock_bh(&ctx->pk_lock);
1327 }
1328 }
1329
1330 out:
1331 pr_debug("rc=%d\n", rc);
1332 return rc;
1333 }
1334
__xts_2keys_prep_param(struct s390_pxts_ctx * ctx,struct xts_km_param * param,struct skcipher_walk * walk,unsigned int keylen,unsigned int offset,bool tested,bool maysleep)1335 static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx,
1336 struct xts_km_param *param,
1337 struct skcipher_walk *walk,
1338 unsigned int keylen,
1339 unsigned int offset,
1340 bool tested, bool maysleep)
1341 {
1342 struct xts_pcc_param pcc_param;
1343 unsigned long cc = 1;
1344 int rc = 0;
1345
1346 while (cc) {
1347 memset(&pcc_param, 0, sizeof(pcc_param));
1348 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
1349 spin_lock_bh(&ctx->pk_lock);
1350 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
1351 memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1352 spin_unlock_bh(&ctx->pk_lock);
1353 cc = cpacf_pcc(ctx->fc, pcc_param.key + offset);
1354 if (cc) {
1355 if (!maysleep) {
1356 rc = -EKEYEXPIRED;
1357 break;
1358 }
1359 rc = pxts_convert_key(ctx, tested);
1360 if (rc)
1361 break;
1362 continue;
1363 }
1364 memcpy(param->init, pcc_param.xts, 16);
1365 }
1366
1367 memzero_explicit(pcc_param.key, sizeof(pcc_param.key));
1368 return rc;
1369 }
1370
xts_paes_do_crypt_2keys(struct s390_pxts_ctx * ctx,struct s390_pxts_req_ctx * req_ctx,bool tested,bool maysleep)1371 static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx,
1372 struct s390_pxts_req_ctx *req_ctx,
1373 bool tested, bool maysleep)
1374 {
1375 struct xts_km_param *param = &req_ctx->param.km_param;
1376 struct skcipher_walk *walk = &req_ctx->walk;
1377 unsigned int keylen, offset, nbytes, n, k;
1378 int rc = 0;
1379
1380 /*
1381 * The calling function xts_paes_do_crypt() ensures the
1382 * protected key state is always PK_STATE_VALID when this
1383 * function is invoked.
1384 */
1385
1386 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
1387 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
1388
1389 if (!req_ctx->param_init_done) {
1390 rc = __xts_2keys_prep_param(ctx, param, walk,
1391 keylen, offset, tested, maysleep);
1392 if (rc)
1393 goto out;
1394 req_ctx->param_init_done = true;
1395 }
1396
1397 /*
1398 * Note that in case of partial processing or failure the walk
1399 * is NOT unmapped here. So a follow up task may reuse the walk
1400 * or in case of unrecoverable failure needs to unmap it.
1401 */
1402 while ((nbytes = walk->nbytes) != 0) {
1403 /* only use complete blocks */
1404 n = nbytes & ~(AES_BLOCK_SIZE - 1);
1405 k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
1406 walk->dst.virt.addr, walk->src.virt.addr, n);
1407 if (k)
1408 rc = skcipher_walk_done(walk, nbytes - k);
1409 if (k < n) {
1410 if (!maysleep) {
1411 rc = -EKEYEXPIRED;
1412 goto out;
1413 }
1414 rc = pxts_convert_key(ctx, tested);
1415 if (rc)
1416 goto out;
1417 spin_lock_bh(&ctx->pk_lock);
1418 memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
1419 spin_unlock_bh(&ctx->pk_lock);
1420 }
1421 }
1422
1423 out:
1424 pr_debug("rc=%d\n", rc);
1425 return rc;
1426 }
1427
xts_paes_do_crypt(struct s390_pxts_ctx * ctx,struct s390_pxts_req_ctx * req_ctx,bool tested,bool maysleep)1428 static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx,
1429 struct s390_pxts_req_ctx *req_ctx,
1430 bool tested, bool maysleep)
1431 {
1432 int pk_state, rc = 0;
1433
1434 /* fetch and check protected key state */
1435 spin_lock_bh(&ctx->pk_lock);
1436 pk_state = ctx->pk_state;
1437 switch (pk_state) {
1438 case PK_STATE_NO_KEY:
1439 rc = -ENOKEY;
1440 break;
1441 case PK_STATE_CONVERT_IN_PROGRESS:
1442 rc = -EKEYEXPIRED;
1443 break;
1444 case PK_STATE_VALID:
1445 break;
1446 default:
1447 rc = pk_state < 0 ? pk_state : -EIO;
1448 break;
1449 }
1450 spin_unlock_bh(&ctx->pk_lock);
1451 if (rc)
1452 goto out;
1453
1454 /* Call the 'real' crypt function based on the xts prot key type. */
1455 switch (ctx->fc) {
1456 case CPACF_KM_PXTS_128:
1457 case CPACF_KM_PXTS_256:
1458 rc = xts_paes_do_crypt_2keys(ctx, req_ctx, tested, maysleep);
1459 break;
1460 case CPACF_KM_PXTS_128_FULL:
1461 case CPACF_KM_PXTS_256_FULL:
1462 rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, tested, maysleep);
1463 break;
1464 default:
1465 rc = -EINVAL;
1466 }
1467
1468 out:
1469 pr_debug("rc=%d\n", rc);
1470 return rc;
1471 }
1472
xts_paes_crypt(struct skcipher_request * req,unsigned long modifier)1473 static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
1474 {
1475 struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req);
1476 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1477 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1478 struct skcipher_walk *walk = &req_ctx->walk;
1479 bool tested = crypto_skcipher_tested(tfm);
1480 int rc;
1481
1482 /*
1483 * Attempt synchronous encryption first. If it fails, schedule the request
1484 * asynchronously via the crypto engine. To preserve execution order,
1485 * once a request is queued to the engine, further requests using the same
1486 * tfm will also be routed through the engine.
1487 */
1488
1489 rc = skcipher_walk_virt(walk, req, false);
1490 if (rc)
1491 goto out;
1492
1493 req_ctx->modifier = modifier;
1494 req_ctx->param_init_done = false;
1495
1496 /* Try synchronous operation if no active engine usage */
1497 if (!atomic_read(&ctx->via_engine_ctr)) {
1498 rc = xts_paes_do_crypt(ctx, req_ctx, tested, false);
1499 if (rc == 0)
1500 goto out;
1501 }
1502
1503 /*
1504 * If sync operation failed or key expired or there are already
1505 * requests enqueued via engine, fallback to async. Mark tfm as
1506 * using engine to serialize requests.
1507 */
1508 if (rc == 0 || rc == -EKEYEXPIRED) {
1509 atomic_inc(&ctx->via_engine_ctr);
1510 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req);
1511 if (rc != -EINPROGRESS)
1512 atomic_dec(&ctx->via_engine_ctr);
1513 }
1514
1515 if (rc != -EINPROGRESS)
1516 skcipher_walk_done(walk, rc);
1517
1518 out:
1519 if (rc != -EINPROGRESS)
1520 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1521 pr_debug("rc=%d\n", rc);
1522 return rc;
1523 }
1524
xts_paes_encrypt(struct skcipher_request * req)1525 static int xts_paes_encrypt(struct skcipher_request *req)
1526 {
1527 return xts_paes_crypt(req, 0);
1528 }
1529
xts_paes_decrypt(struct skcipher_request * req)1530 static int xts_paes_decrypt(struct skcipher_request *req)
1531 {
1532 return xts_paes_crypt(req, CPACF_DECRYPT);
1533 }
1534
xts_paes_init(struct crypto_skcipher * tfm)1535 static int xts_paes_init(struct crypto_skcipher *tfm)
1536 {
1537 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1538
1539 memset(ctx, 0, sizeof(*ctx));
1540 spin_lock_init(&ctx->pk_lock);
1541
1542 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx));
1543
1544 return 0;
1545 }
1546
xts_paes_exit(struct crypto_skcipher * tfm)1547 static void xts_paes_exit(struct crypto_skcipher *tfm)
1548 {
1549 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1550
1551 memzero_explicit(ctx, sizeof(*ctx));
1552 }
1553
xts_paes_do_one_request(struct crypto_engine * engine,void * areq)1554 static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq)
1555 {
1556 struct skcipher_request *req = skcipher_request_cast(areq);
1557 struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req);
1558 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1559 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
1560 struct skcipher_walk *walk = &req_ctx->walk;
1561 bool tested = crypto_skcipher_tested(tfm);
1562 int rc;
1563
1564 /* walk has already been prepared */
1565
1566 rc = xts_paes_do_crypt(ctx, req_ctx, tested, true);
1567 if (rc == -EKEYEXPIRED) {
1568 /*
1569 * Protected key expired, conversion is in process.
1570 * Trigger a re-schedule of this request by returning
1571 * -ENOSPC ("hardware queue is full") to the crypto engine.
1572 * To avoid immediately re-invocation of this callback,
1573 * tell the scheduler to voluntarily give up the CPU here.
1574 */
1575 cond_resched();
1576 pr_debug("rescheduling request\n");
1577 return -ENOSPC;
1578 } else if (rc) {
1579 skcipher_walk_done(walk, rc);
1580 }
1581
1582 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param));
1583 pr_debug("request complete with rc=%d\n", rc);
1584 local_bh_disable();
1585 atomic_dec(&ctx->via_engine_ctr);
1586 crypto_finalize_skcipher_request(engine, req, rc);
1587 local_bh_enable();
1588 return rc;
1589 }
1590
1591 static struct skcipher_engine_alg xts_paes_alg = {
1592 .base = {
1593 .base.cra_name = "xts(paes)",
1594 .base.cra_driver_name = "xts-paes-s390",
1595 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
1596 .base.cra_blocksize = AES_BLOCK_SIZE,
1597 .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
1598 .base.cra_module = THIS_MODULE,
1599 .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list),
1600 .init = xts_paes_init,
1601 .exit = xts_paes_exit,
1602 .min_keysize = 2 * PAES_MIN_KEYSIZE,
1603 .max_keysize = 2 * PAES_MAX_KEYSIZE,
1604 .ivsize = AES_BLOCK_SIZE,
1605 .setkey = xts_paes_setkey,
1606 .encrypt = xts_paes_encrypt,
1607 .decrypt = xts_paes_decrypt,
1608 },
1609 .op = {
1610 .do_one_request = xts_paes_do_one_request,
1611 },
1612 };
1613
1614 /*
1615 * alg register, unregister, module init, exit
1616 */
1617
1618 static struct miscdevice paes_dev = {
1619 .name = "paes",
1620 .minor = MISC_DYNAMIC_MINOR,
1621 };
1622
__crypto_unregister_skcipher(struct skcipher_engine_alg * alg)1623 static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg)
1624 {
1625 if (!list_empty(&alg->base.base.cra_list))
1626 crypto_engine_unregister_skcipher(alg);
1627 }
1628
paes_s390_fini(void)1629 static void paes_s390_fini(void)
1630 {
1631 if (paes_crypto_engine) {
1632 crypto_engine_stop(paes_crypto_engine);
1633 crypto_engine_exit(paes_crypto_engine);
1634 }
1635 __crypto_unregister_skcipher(&ctr_paes_alg);
1636 __crypto_unregister_skcipher(&xts_paes_alg);
1637 __crypto_unregister_skcipher(&cbc_paes_alg);
1638 __crypto_unregister_skcipher(&ecb_paes_alg);
1639 if (ctrblk)
1640 free_page((unsigned long)ctrblk);
1641 misc_deregister(&paes_dev);
1642 }
1643
paes_s390_init(void)1644 static int __init paes_s390_init(void)
1645 {
1646 int rc;
1647
1648 /* register a simple paes pseudo misc device */
1649 rc = misc_register(&paes_dev);
1650 if (rc)
1651 return rc;
1652
1653 /* with this pseudo devie alloc and start a crypto engine */
1654 paes_crypto_engine =
1655 crypto_engine_alloc_init_and_set(paes_dev.this_device,
1656 true, false, MAX_QLEN);
1657 if (!paes_crypto_engine) {
1658 rc = -ENOMEM;
1659 goto out_err;
1660 }
1661 rc = crypto_engine_start(paes_crypto_engine);
1662 if (rc) {
1663 crypto_engine_exit(paes_crypto_engine);
1664 paes_crypto_engine = NULL;
1665 goto out_err;
1666 }
1667
1668 /* Query available functions for KM, KMC and KMCTR */
1669 cpacf_query(CPACF_KM, &km_functions);
1670 cpacf_query(CPACF_KMC, &kmc_functions);
1671 cpacf_query(CPACF_KMCTR, &kmctr_functions);
1672
1673 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
1674 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
1675 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
1676 rc = crypto_engine_register_skcipher(&ecb_paes_alg);
1677 if (rc)
1678 goto out_err;
1679 pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name);
1680 }
1681
1682 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
1683 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
1684 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
1685 rc = crypto_engine_register_skcipher(&cbc_paes_alg);
1686 if (rc)
1687 goto out_err;
1688 pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name);
1689 }
1690
1691 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
1692 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
1693 rc = crypto_engine_register_skcipher(&xts_paes_alg);
1694 if (rc)
1695 goto out_err;
1696 pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name);
1697 }
1698
1699 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
1700 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
1701 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
1702 ctrblk = (u8 *)__get_free_page(GFP_KERNEL);
1703 if (!ctrblk) {
1704 rc = -ENOMEM;
1705 goto out_err;
1706 }
1707 rc = crypto_engine_register_skcipher(&ctr_paes_alg);
1708 if (rc)
1709 goto out_err;
1710 pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name);
1711 }
1712
1713 return 0;
1714
1715 out_err:
1716 paes_s390_fini();
1717 return rc;
1718 }
1719
1720 module_init(paes_s390_init);
1721 module_exit(paes_s390_fini);
1722
1723 MODULE_ALIAS_CRYPTO("ecb(paes)");
1724 MODULE_ALIAS_CRYPTO("cbc(paes)");
1725 MODULE_ALIAS_CRYPTO("ctr(paes)");
1726 MODULE_ALIAS_CRYPTO("xts(paes)");
1727
1728 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys");
1729 MODULE_LICENSE("GPL");
1730