1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/arm64/crypto/aes-glue.c - wrapper code for ARMv8 AES
4 *
5 * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
6 */
7
8 #include <crypto/aes.h>
9 #include <crypto/ctr.h>
10 #include <crypto/internal/hash.h>
11 #include <crypto/internal/skcipher.h>
12 #include <crypto/scatterwalk.h>
13 #include <crypto/sha2.h>
14 #include <crypto/utils.h>
15 #include <crypto/xts.h>
16 #include <linux/cpufeature.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20
21 #include <asm/hwcap.h>
22 #include <asm/simd.h>
23
24 #ifdef USE_V8_CRYPTO_EXTENSIONS
25 #define MODE "ce"
26 #define PRIO 300
27 #define aes_expandkey ce_aes_expandkey
28 #define aes_ecb_encrypt ce_aes_ecb_encrypt
29 #define aes_ecb_decrypt ce_aes_ecb_decrypt
30 #define aes_cbc_encrypt ce_aes_cbc_encrypt
31 #define aes_cbc_decrypt ce_aes_cbc_decrypt
32 #define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
33 #define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
34 #define aes_essiv_cbc_encrypt ce_aes_essiv_cbc_encrypt
35 #define aes_essiv_cbc_decrypt ce_aes_essiv_cbc_decrypt
36 #define aes_ctr_encrypt ce_aes_ctr_encrypt
37 #define aes_xctr_encrypt ce_aes_xctr_encrypt
38 #define aes_xts_encrypt ce_aes_xts_encrypt
39 #define aes_xts_decrypt ce_aes_xts_decrypt
40 #define aes_mac_update ce_aes_mac_update
41 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 Crypto Extensions");
42 #else
43 #define MODE "neon"
44 #define PRIO 200
45 #define aes_ecb_encrypt neon_aes_ecb_encrypt
46 #define aes_ecb_decrypt neon_aes_ecb_decrypt
47 #define aes_cbc_encrypt neon_aes_cbc_encrypt
48 #define aes_cbc_decrypt neon_aes_cbc_decrypt
49 #define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
50 #define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
51 #define aes_essiv_cbc_encrypt neon_aes_essiv_cbc_encrypt
52 #define aes_essiv_cbc_decrypt neon_aes_essiv_cbc_decrypt
53 #define aes_ctr_encrypt neon_aes_ctr_encrypt
54 #define aes_xctr_encrypt neon_aes_xctr_encrypt
55 #define aes_xts_encrypt neon_aes_xts_encrypt
56 #define aes_xts_decrypt neon_aes_xts_decrypt
57 #define aes_mac_update neon_aes_mac_update
58 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS/XCTR using ARMv8 NEON");
59 #endif
60 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
61 MODULE_ALIAS_CRYPTO("ecb(aes)");
62 MODULE_ALIAS_CRYPTO("cbc(aes)");
63 MODULE_ALIAS_CRYPTO("ctr(aes)");
64 MODULE_ALIAS_CRYPTO("xts(aes)");
65 MODULE_ALIAS_CRYPTO("xctr(aes)");
66 #endif
67 MODULE_ALIAS_CRYPTO("cts(cbc(aes))");
68 MODULE_ALIAS_CRYPTO("essiv(cbc(aes),sha256)");
69 MODULE_ALIAS_CRYPTO("cmac(aes)");
70 MODULE_ALIAS_CRYPTO("xcbc(aes)");
71 MODULE_ALIAS_CRYPTO("cbcmac(aes)");
72
73 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
74 MODULE_LICENSE("GPL v2");
75
76 /* defined in aes-modes.S */
77 asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
78 int rounds, int blocks);
79 asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
80 int rounds, int blocks);
81
82 asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
83 int rounds, int blocks, u8 iv[]);
84 asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
85 int rounds, int blocks, u8 iv[]);
86
87 asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
88 int rounds, int bytes, u8 const iv[]);
89 asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
90 int rounds, int bytes, u8 const iv[]);
91
92 asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
93 int rounds, int bytes, u8 ctr[]);
94
95 asmlinkage void aes_xctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
96 int rounds, int bytes, u8 ctr[], int byte_ctr);
97
98 asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
99 int rounds, int bytes, u32 const rk2[], u8 iv[],
100 int first);
101 asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
102 int rounds, int bytes, u32 const rk2[], u8 iv[],
103 int first);
104
105 asmlinkage void aes_essiv_cbc_encrypt(u8 out[], u8 const in[], u32 const rk1[],
106 int rounds, int blocks, u8 iv[],
107 u32 const rk2[]);
108 asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
109 int rounds, int blocks, u8 iv[],
110 u32 const rk2[]);
111
112 asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds,
113 int blocks, u8 dg[], int enc_before,
114 int enc_after);
115
116 struct crypto_aes_xts_ctx {
117 struct crypto_aes_ctx key1;
118 struct crypto_aes_ctx __aligned(8) key2;
119 };
120
121 struct crypto_aes_essiv_cbc_ctx {
122 struct crypto_aes_ctx key1;
123 struct crypto_aes_ctx __aligned(8) key2;
124 };
125
126 struct mac_tfm_ctx {
127 struct crypto_aes_ctx key;
128 u8 __aligned(8) consts[];
129 };
130
131 struct mac_desc_ctx {
132 u8 dg[AES_BLOCK_SIZE];
133 };
134
skcipher_aes_setkey(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)135 static int skcipher_aes_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
136 unsigned int key_len)
137 {
138 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
139
140 return aes_expandkey(ctx, in_key, key_len);
141 }
142
xts_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)143 static int __maybe_unused xts_set_key(struct crypto_skcipher *tfm,
144 const u8 *in_key, unsigned int key_len)
145 {
146 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
147 int ret;
148
149 ret = xts_verify_key(tfm, in_key, key_len);
150 if (ret)
151 return ret;
152
153 ret = aes_expandkey(&ctx->key1, in_key, key_len / 2);
154 if (!ret)
155 ret = aes_expandkey(&ctx->key2, &in_key[key_len / 2],
156 key_len / 2);
157 return ret;
158 }
159
essiv_cbc_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)160 static int __maybe_unused essiv_cbc_set_key(struct crypto_skcipher *tfm,
161 const u8 *in_key,
162 unsigned int key_len)
163 {
164 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
165 u8 digest[SHA256_DIGEST_SIZE];
166 int ret;
167
168 ret = aes_expandkey(&ctx->key1, in_key, key_len);
169 if (ret)
170 return ret;
171
172 sha256(in_key, key_len, digest);
173
174 return aes_expandkey(&ctx->key2, digest, sizeof(digest));
175 }
176
ecb_encrypt(struct skcipher_request * req)177 static int __maybe_unused ecb_encrypt(struct skcipher_request *req)
178 {
179 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
180 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
181 int err, rounds = 6 + ctx->key_length / 4;
182 struct skcipher_walk walk;
183 unsigned int blocks;
184
185 err = skcipher_walk_virt(&walk, req, false);
186
187 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
188 scoped_ksimd()
189 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
190 ctx->key_enc, rounds, blocks);
191 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
192 }
193 return err;
194 }
195
ecb_decrypt(struct skcipher_request * req)196 static int __maybe_unused ecb_decrypt(struct skcipher_request *req)
197 {
198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
199 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
200 int err, rounds = 6 + ctx->key_length / 4;
201 struct skcipher_walk walk;
202 unsigned int blocks;
203
204 err = skcipher_walk_virt(&walk, req, false);
205
206 while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
207 scoped_ksimd()
208 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
209 ctx->key_dec, rounds, blocks);
210 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
211 }
212 return err;
213 }
214
cbc_encrypt_walk(struct skcipher_request * req,struct skcipher_walk * walk)215 static int cbc_encrypt_walk(struct skcipher_request *req,
216 struct skcipher_walk *walk)
217 {
218 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
219 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
220 int err = 0, rounds = 6 + ctx->key_length / 4;
221 unsigned int blocks;
222
223 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
224 scoped_ksimd()
225 aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr,
226 ctx->key_enc, rounds, blocks, walk->iv);
227 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
228 }
229 return err;
230 }
231
cbc_encrypt(struct skcipher_request * req)232 static int __maybe_unused cbc_encrypt(struct skcipher_request *req)
233 {
234 struct skcipher_walk walk;
235 int err;
236
237 err = skcipher_walk_virt(&walk, req, false);
238 if (err)
239 return err;
240 return cbc_encrypt_walk(req, &walk);
241 }
242
cbc_decrypt_walk(struct skcipher_request * req,struct skcipher_walk * walk)243 static int cbc_decrypt_walk(struct skcipher_request *req,
244 struct skcipher_walk *walk)
245 {
246 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
247 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
248 int err = 0, rounds = 6 + ctx->key_length / 4;
249 unsigned int blocks;
250
251 while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) {
252 scoped_ksimd()
253 aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr,
254 ctx->key_dec, rounds, blocks, walk->iv);
255 err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE);
256 }
257 return err;
258 }
259
cbc_decrypt(struct skcipher_request * req)260 static int __maybe_unused cbc_decrypt(struct skcipher_request *req)
261 {
262 struct skcipher_walk walk;
263 int err;
264
265 err = skcipher_walk_virt(&walk, req, false);
266 if (err)
267 return err;
268 return cbc_decrypt_walk(req, &walk);
269 }
270
cts_cbc_encrypt(struct skcipher_request * req)271 static int cts_cbc_encrypt(struct skcipher_request *req)
272 {
273 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
274 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
275 int err, rounds = 6 + ctx->key_length / 4;
276 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
277 struct scatterlist *src = req->src, *dst = req->dst;
278 struct scatterlist sg_src[2], sg_dst[2];
279 struct skcipher_request subreq;
280 struct skcipher_walk walk;
281
282 skcipher_request_set_tfm(&subreq, tfm);
283 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
284 NULL, NULL);
285
286 if (req->cryptlen <= AES_BLOCK_SIZE) {
287 if (req->cryptlen < AES_BLOCK_SIZE)
288 return -EINVAL;
289 cbc_blocks = 1;
290 }
291
292 if (cbc_blocks > 0) {
293 skcipher_request_set_crypt(&subreq, req->src, req->dst,
294 cbc_blocks * AES_BLOCK_SIZE,
295 req->iv);
296
297 err = skcipher_walk_virt(&walk, &subreq, false) ?:
298 cbc_encrypt_walk(&subreq, &walk);
299 if (err)
300 return err;
301
302 if (req->cryptlen == AES_BLOCK_SIZE)
303 return 0;
304
305 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
306 if (req->dst != req->src)
307 dst = scatterwalk_ffwd(sg_dst, req->dst,
308 subreq.cryptlen);
309 }
310
311 /* handle ciphertext stealing */
312 skcipher_request_set_crypt(&subreq, src, dst,
313 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
314 req->iv);
315
316 err = skcipher_walk_virt(&walk, &subreq, false);
317 if (err)
318 return err;
319
320 scoped_ksimd()
321 aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
322 ctx->key_enc, rounds, walk.nbytes, walk.iv);
323
324 return skcipher_walk_done(&walk, 0);
325 }
326
cts_cbc_decrypt(struct skcipher_request * req)327 static int cts_cbc_decrypt(struct skcipher_request *req)
328 {
329 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
330 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
331 int err, rounds = 6 + ctx->key_length / 4;
332 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
333 struct scatterlist *src = req->src, *dst = req->dst;
334 struct scatterlist sg_src[2], sg_dst[2];
335 struct skcipher_request subreq;
336 struct skcipher_walk walk;
337
338 skcipher_request_set_tfm(&subreq, tfm);
339 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
340 NULL, NULL);
341
342 if (req->cryptlen <= AES_BLOCK_SIZE) {
343 if (req->cryptlen < AES_BLOCK_SIZE)
344 return -EINVAL;
345 cbc_blocks = 1;
346 }
347
348 if (cbc_blocks > 0) {
349 skcipher_request_set_crypt(&subreq, req->src, req->dst,
350 cbc_blocks * AES_BLOCK_SIZE,
351 req->iv);
352
353 err = skcipher_walk_virt(&walk, &subreq, false) ?:
354 cbc_decrypt_walk(&subreq, &walk);
355 if (err)
356 return err;
357
358 if (req->cryptlen == AES_BLOCK_SIZE)
359 return 0;
360
361 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
362 if (req->dst != req->src)
363 dst = scatterwalk_ffwd(sg_dst, req->dst,
364 subreq.cryptlen);
365 }
366
367 /* handle ciphertext stealing */
368 skcipher_request_set_crypt(&subreq, src, dst,
369 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
370 req->iv);
371
372 err = skcipher_walk_virt(&walk, &subreq, false);
373 if (err)
374 return err;
375
376 scoped_ksimd()
377 aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
378 ctx->key_dec, rounds, walk.nbytes, walk.iv);
379
380 return skcipher_walk_done(&walk, 0);
381 }
382
essiv_cbc_encrypt(struct skcipher_request * req)383 static int __maybe_unused essiv_cbc_encrypt(struct skcipher_request *req)
384 {
385 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
386 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
387 int err, rounds = 6 + ctx->key1.key_length / 4;
388 struct skcipher_walk walk;
389 unsigned int blocks;
390
391 err = skcipher_walk_virt(&walk, req, false);
392
393 blocks = walk.nbytes / AES_BLOCK_SIZE;
394 if (blocks) {
395 scoped_ksimd()
396 aes_essiv_cbc_encrypt(walk.dst.virt.addr,
397 walk.src.virt.addr,
398 ctx->key1.key_enc, rounds, blocks,
399 req->iv, ctx->key2.key_enc);
400 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
401 }
402 return err ?: cbc_encrypt_walk(req, &walk);
403 }
404
essiv_cbc_decrypt(struct skcipher_request * req)405 static int __maybe_unused essiv_cbc_decrypt(struct skcipher_request *req)
406 {
407 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
408 struct crypto_aes_essiv_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
409 int err, rounds = 6 + ctx->key1.key_length / 4;
410 struct skcipher_walk walk;
411 unsigned int blocks;
412
413 err = skcipher_walk_virt(&walk, req, false);
414
415 blocks = walk.nbytes / AES_BLOCK_SIZE;
416 if (blocks) {
417 scoped_ksimd()
418 aes_essiv_cbc_decrypt(walk.dst.virt.addr,
419 walk.src.virt.addr,
420 ctx->key1.key_dec, rounds, blocks,
421 req->iv, ctx->key2.key_enc);
422 err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
423 }
424 return err ?: cbc_decrypt_walk(req, &walk);
425 }
426
xctr_encrypt(struct skcipher_request * req)427 static int __maybe_unused xctr_encrypt(struct skcipher_request *req)
428 {
429 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
430 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
431 int err, rounds = 6 + ctx->key_length / 4;
432 struct skcipher_walk walk;
433 unsigned int byte_ctr = 0;
434
435 err = skcipher_walk_virt(&walk, req, false);
436
437 while (walk.nbytes > 0) {
438 const u8 *src = walk.src.virt.addr;
439 unsigned int nbytes = walk.nbytes;
440 u8 *dst = walk.dst.virt.addr;
441 u8 buf[AES_BLOCK_SIZE];
442
443 /*
444 * If given less than 16 bytes, we must copy the partial block
445 * into a temporary buffer of 16 bytes to avoid out of bounds
446 * reads and writes. Furthermore, this code is somewhat unusual
447 * in that it expects the end of the data to be at the end of
448 * the temporary buffer, rather than the start of the data at
449 * the start of the temporary buffer.
450 */
451 if (unlikely(nbytes < AES_BLOCK_SIZE))
452 src = dst = memcpy(buf + sizeof(buf) - nbytes,
453 src, nbytes);
454 else if (nbytes < walk.total)
455 nbytes &= ~(AES_BLOCK_SIZE - 1);
456
457 scoped_ksimd()
458 aes_xctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
459 walk.iv, byte_ctr);
460
461 if (unlikely(nbytes < AES_BLOCK_SIZE))
462 memcpy(walk.dst.virt.addr,
463 buf + sizeof(buf) - nbytes, nbytes);
464 byte_ctr += nbytes;
465
466 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
467 }
468
469 return err;
470 }
471
ctr_encrypt(struct skcipher_request * req)472 static int __maybe_unused ctr_encrypt(struct skcipher_request *req)
473 {
474 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
475 struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
476 int err, rounds = 6 + ctx->key_length / 4;
477 struct skcipher_walk walk;
478
479 err = skcipher_walk_virt(&walk, req, false);
480
481 while (walk.nbytes > 0) {
482 const u8 *src = walk.src.virt.addr;
483 unsigned int nbytes = walk.nbytes;
484 u8 *dst = walk.dst.virt.addr;
485 u8 buf[AES_BLOCK_SIZE];
486
487 /*
488 * If given less than 16 bytes, we must copy the partial block
489 * into a temporary buffer of 16 bytes to avoid out of bounds
490 * reads and writes. Furthermore, this code is somewhat unusual
491 * in that it expects the end of the data to be at the end of
492 * the temporary buffer, rather than the start of the data at
493 * the start of the temporary buffer.
494 */
495 if (unlikely(nbytes < AES_BLOCK_SIZE))
496 src = dst = memcpy(buf + sizeof(buf) - nbytes,
497 src, nbytes);
498 else if (nbytes < walk.total)
499 nbytes &= ~(AES_BLOCK_SIZE - 1);
500
501 scoped_ksimd()
502 aes_ctr_encrypt(dst, src, ctx->key_enc, rounds, nbytes,
503 walk.iv);
504
505 if (unlikely(nbytes < AES_BLOCK_SIZE))
506 memcpy(walk.dst.virt.addr,
507 buf + sizeof(buf) - nbytes, nbytes);
508
509 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
510 }
511
512 return err;
513 }
514
xts_encrypt(struct skcipher_request * req)515 static int __maybe_unused xts_encrypt(struct skcipher_request *req)
516 {
517 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
518 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
519 int err, first, rounds = 6 + ctx->key1.key_length / 4;
520 int tail = req->cryptlen % AES_BLOCK_SIZE;
521 struct scatterlist sg_src[2], sg_dst[2];
522 struct skcipher_request subreq;
523 struct scatterlist *src, *dst;
524 struct skcipher_walk walk;
525
526 if (req->cryptlen < AES_BLOCK_SIZE)
527 return -EINVAL;
528
529 err = skcipher_walk_virt(&walk, req, false);
530
531 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
532 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
533 AES_BLOCK_SIZE) - 2;
534
535 skcipher_walk_abort(&walk);
536
537 skcipher_request_set_tfm(&subreq, tfm);
538 skcipher_request_set_callback(&subreq,
539 skcipher_request_flags(req),
540 NULL, NULL);
541 skcipher_request_set_crypt(&subreq, req->src, req->dst,
542 xts_blocks * AES_BLOCK_SIZE,
543 req->iv);
544 req = &subreq;
545 err = skcipher_walk_virt(&walk, req, false);
546 } else {
547 tail = 0;
548 }
549
550 scoped_ksimd() {
551 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
552 int nbytes = walk.nbytes;
553
554 if (walk.nbytes < walk.total)
555 nbytes &= ~(AES_BLOCK_SIZE - 1);
556
557 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
558 ctx->key1.key_enc, rounds, nbytes,
559 ctx->key2.key_enc, walk.iv, first);
560 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
561 }
562
563 if (err || likely(!tail))
564 return err;
565
566 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
567 if (req->dst != req->src)
568 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
569
570 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
571 req->iv);
572
573 err = skcipher_walk_virt(&walk, &subreq, false);
574 if (err)
575 return err;
576
577 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
578 ctx->key1.key_enc, rounds, walk.nbytes,
579 ctx->key2.key_enc, walk.iv, first);
580 }
581 return skcipher_walk_done(&walk, 0);
582 }
583
xts_decrypt(struct skcipher_request * req)584 static int __maybe_unused xts_decrypt(struct skcipher_request *req)
585 {
586 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
587 struct crypto_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
588 int err, first, rounds = 6 + ctx->key1.key_length / 4;
589 int tail = req->cryptlen % AES_BLOCK_SIZE;
590 struct scatterlist sg_src[2], sg_dst[2];
591 struct skcipher_request subreq;
592 struct scatterlist *src, *dst;
593 struct skcipher_walk walk;
594
595 if (req->cryptlen < AES_BLOCK_SIZE)
596 return -EINVAL;
597
598 err = skcipher_walk_virt(&walk, req, false);
599
600 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
601 int xts_blocks = DIV_ROUND_UP(req->cryptlen,
602 AES_BLOCK_SIZE) - 2;
603
604 skcipher_walk_abort(&walk);
605
606 skcipher_request_set_tfm(&subreq, tfm);
607 skcipher_request_set_callback(&subreq,
608 skcipher_request_flags(req),
609 NULL, NULL);
610 skcipher_request_set_crypt(&subreq, req->src, req->dst,
611 xts_blocks * AES_BLOCK_SIZE,
612 req->iv);
613 req = &subreq;
614 err = skcipher_walk_virt(&walk, req, false);
615 } else {
616 tail = 0;
617 }
618
619 scoped_ksimd() {
620 for (first = 1; walk.nbytes >= AES_BLOCK_SIZE; first = 0) {
621 int nbytes = walk.nbytes;
622
623 if (walk.nbytes < walk.total)
624 nbytes &= ~(AES_BLOCK_SIZE - 1);
625
626 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
627 ctx->key1.key_dec, rounds, nbytes,
628 ctx->key2.key_enc, walk.iv, first);
629 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
630 }
631
632 if (err || likely(!tail))
633 return err;
634
635 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
636 if (req->dst != req->src)
637 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
638
639 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
640 req->iv);
641
642 err = skcipher_walk_virt(&walk, &subreq, false);
643 if (err)
644 return err;
645
646 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
647 ctx->key1.key_dec, rounds, walk.nbytes,
648 ctx->key2.key_enc, walk.iv, first);
649 }
650 return skcipher_walk_done(&walk, 0);
651 }
652
653 static struct skcipher_alg aes_algs[] = { {
654 #if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
655 .base = {
656 .cra_name = "ecb(aes)",
657 .cra_driver_name = "ecb-aes-" MODE,
658 .cra_priority = PRIO,
659 .cra_blocksize = AES_BLOCK_SIZE,
660 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
661 .cra_module = THIS_MODULE,
662 },
663 .min_keysize = AES_MIN_KEY_SIZE,
664 .max_keysize = AES_MAX_KEY_SIZE,
665 .setkey = skcipher_aes_setkey,
666 .encrypt = ecb_encrypt,
667 .decrypt = ecb_decrypt,
668 }, {
669 .base = {
670 .cra_name = "cbc(aes)",
671 .cra_driver_name = "cbc-aes-" MODE,
672 .cra_priority = PRIO,
673 .cra_blocksize = AES_BLOCK_SIZE,
674 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
675 .cra_module = THIS_MODULE,
676 },
677 .min_keysize = AES_MIN_KEY_SIZE,
678 .max_keysize = AES_MAX_KEY_SIZE,
679 .ivsize = AES_BLOCK_SIZE,
680 .setkey = skcipher_aes_setkey,
681 .encrypt = cbc_encrypt,
682 .decrypt = cbc_decrypt,
683 }, {
684 .base = {
685 .cra_name = "ctr(aes)",
686 .cra_driver_name = "ctr-aes-" MODE,
687 .cra_priority = PRIO,
688 .cra_blocksize = 1,
689 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
690 .cra_module = THIS_MODULE,
691 },
692 .min_keysize = AES_MIN_KEY_SIZE,
693 .max_keysize = AES_MAX_KEY_SIZE,
694 .ivsize = AES_BLOCK_SIZE,
695 .chunksize = AES_BLOCK_SIZE,
696 .setkey = skcipher_aes_setkey,
697 .encrypt = ctr_encrypt,
698 .decrypt = ctr_encrypt,
699 }, {
700 .base = {
701 .cra_name = "xctr(aes)",
702 .cra_driver_name = "xctr-aes-" MODE,
703 .cra_priority = PRIO,
704 .cra_blocksize = 1,
705 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
706 .cra_module = THIS_MODULE,
707 },
708 .min_keysize = AES_MIN_KEY_SIZE,
709 .max_keysize = AES_MAX_KEY_SIZE,
710 .ivsize = AES_BLOCK_SIZE,
711 .chunksize = AES_BLOCK_SIZE,
712 .setkey = skcipher_aes_setkey,
713 .encrypt = xctr_encrypt,
714 .decrypt = xctr_encrypt,
715 }, {
716 .base = {
717 .cra_name = "xts(aes)",
718 .cra_driver_name = "xts-aes-" MODE,
719 .cra_priority = PRIO,
720 .cra_blocksize = AES_BLOCK_SIZE,
721 .cra_ctxsize = sizeof(struct crypto_aes_xts_ctx),
722 .cra_module = THIS_MODULE,
723 },
724 .min_keysize = 2 * AES_MIN_KEY_SIZE,
725 .max_keysize = 2 * AES_MAX_KEY_SIZE,
726 .ivsize = AES_BLOCK_SIZE,
727 .walksize = 2 * AES_BLOCK_SIZE,
728 .setkey = xts_set_key,
729 .encrypt = xts_encrypt,
730 .decrypt = xts_decrypt,
731 }, {
732 #endif
733 .base = {
734 .cra_name = "cts(cbc(aes))",
735 .cra_driver_name = "cts-cbc-aes-" MODE,
736 .cra_priority = PRIO,
737 .cra_blocksize = AES_BLOCK_SIZE,
738 .cra_ctxsize = sizeof(struct crypto_aes_ctx),
739 .cra_module = THIS_MODULE,
740 },
741 .min_keysize = AES_MIN_KEY_SIZE,
742 .max_keysize = AES_MAX_KEY_SIZE,
743 .ivsize = AES_BLOCK_SIZE,
744 .walksize = 2 * AES_BLOCK_SIZE,
745 .setkey = skcipher_aes_setkey,
746 .encrypt = cts_cbc_encrypt,
747 .decrypt = cts_cbc_decrypt,
748 }, {
749 .base = {
750 .cra_name = "essiv(cbc(aes),sha256)",
751 .cra_driver_name = "essiv-cbc-aes-sha256-" MODE,
752 .cra_priority = PRIO + 1,
753 .cra_blocksize = AES_BLOCK_SIZE,
754 .cra_ctxsize = sizeof(struct crypto_aes_essiv_cbc_ctx),
755 .cra_module = THIS_MODULE,
756 },
757 .min_keysize = AES_MIN_KEY_SIZE,
758 .max_keysize = AES_MAX_KEY_SIZE,
759 .ivsize = AES_BLOCK_SIZE,
760 .setkey = essiv_cbc_set_key,
761 .encrypt = essiv_cbc_encrypt,
762 .decrypt = essiv_cbc_decrypt,
763 } };
764
cbcmac_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)765 static int cbcmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
766 unsigned int key_len)
767 {
768 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
769
770 return aes_expandkey(&ctx->key, in_key, key_len);
771 }
772
cmac_gf128_mul_by_x(be128 * y,const be128 * x)773 static void cmac_gf128_mul_by_x(be128 *y, const be128 *x)
774 {
775 u64 a = be64_to_cpu(x->a);
776 u64 b = be64_to_cpu(x->b);
777
778 y->a = cpu_to_be64((a << 1) | (b >> 63));
779 y->b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
780 }
781
cmac_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)782 static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
783 unsigned int key_len)
784 {
785 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
786 be128 *consts = (be128 *)ctx->consts;
787 int rounds = 6 + key_len / 4;
788 int err;
789
790 err = cbcmac_setkey(tfm, in_key, key_len);
791 if (err)
792 return err;
793
794 /* encrypt the zero vector */
795 scoped_ksimd()
796 aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){},
797 ctx->key.key_enc, rounds, 1);
798
799 cmac_gf128_mul_by_x(consts, consts);
800 cmac_gf128_mul_by_x(consts + 1, consts);
801
802 return 0;
803 }
804
xcbc_setkey(struct crypto_shash * tfm,const u8 * in_key,unsigned int key_len)805 static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
806 unsigned int key_len)
807 {
808 static u8 const ks[3][AES_BLOCK_SIZE] = {
809 { [0 ... AES_BLOCK_SIZE - 1] = 0x1 },
810 { [0 ... AES_BLOCK_SIZE - 1] = 0x2 },
811 { [0 ... AES_BLOCK_SIZE - 1] = 0x3 },
812 };
813
814 struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
815 int rounds = 6 + key_len / 4;
816 u8 key[AES_BLOCK_SIZE];
817 int err;
818
819 err = cbcmac_setkey(tfm, in_key, key_len);
820 if (err)
821 return err;
822
823 scoped_ksimd() {
824 aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
825 aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
826 }
827
828 return cbcmac_setkey(tfm, key, sizeof(key));
829 }
830
mac_init(struct shash_desc * desc)831 static int mac_init(struct shash_desc *desc)
832 {
833 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
834
835 memset(ctx->dg, 0, AES_BLOCK_SIZE);
836 return 0;
837 }
838
mac_do_update(struct crypto_aes_ctx * ctx,u8 const in[],int blocks,u8 dg[],int enc_before)839 static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
840 u8 dg[], int enc_before)
841 {
842 int rounds = 6 + ctx->key_length / 4;
843 int rem;
844
845 do {
846 scoped_ksimd()
847 rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
848 dg, enc_before, !enc_before);
849 in += (blocks - rem) * AES_BLOCK_SIZE;
850 blocks = rem;
851 } while (blocks);
852 }
853
mac_update(struct shash_desc * desc,const u8 * p,unsigned int len)854 static int mac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
855 {
856 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
857 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
858 int blocks = len / AES_BLOCK_SIZE;
859
860 len %= AES_BLOCK_SIZE;
861 mac_do_update(&tctx->key, p, blocks, ctx->dg, 0);
862 return len;
863 }
864
cbcmac_finup(struct shash_desc * desc,const u8 * src,unsigned int len,u8 * out)865 static int cbcmac_finup(struct shash_desc *desc, const u8 *src,
866 unsigned int len, u8 *out)
867 {
868 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
869 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
870
871 if (len) {
872 crypto_xor(ctx->dg, src, len);
873 mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1);
874 }
875 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
876 return 0;
877 }
878
cmac_finup(struct shash_desc * desc,const u8 * src,unsigned int len,u8 * out)879 static int cmac_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
880 u8 *out)
881 {
882 struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
883 struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
884 u8 *consts = tctx->consts;
885
886 crypto_xor(ctx->dg, src, len);
887 if (len != AES_BLOCK_SIZE) {
888 ctx->dg[len] ^= 0x80;
889 consts += AES_BLOCK_SIZE;
890 }
891 mac_do_update(&tctx->key, consts, 1, ctx->dg, 0);
892 memcpy(out, ctx->dg, AES_BLOCK_SIZE);
893 return 0;
894 }
895
896 static struct shash_alg mac_algs[] = { {
897 .base.cra_name = "cmac(aes)",
898 .base.cra_driver_name = "cmac-aes-" MODE,
899 .base.cra_priority = PRIO,
900 .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
901 CRYPTO_AHASH_ALG_FINAL_NONZERO,
902 .base.cra_blocksize = AES_BLOCK_SIZE,
903 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
904 2 * AES_BLOCK_SIZE,
905 .base.cra_module = THIS_MODULE,
906
907 .digestsize = AES_BLOCK_SIZE,
908 .init = mac_init,
909 .update = mac_update,
910 .finup = cmac_finup,
911 .setkey = cmac_setkey,
912 .descsize = sizeof(struct mac_desc_ctx),
913 }, {
914 .base.cra_name = "xcbc(aes)",
915 .base.cra_driver_name = "xcbc-aes-" MODE,
916 .base.cra_priority = PRIO,
917 .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
918 CRYPTO_AHASH_ALG_FINAL_NONZERO,
919 .base.cra_blocksize = AES_BLOCK_SIZE,
920 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx) +
921 2 * AES_BLOCK_SIZE,
922 .base.cra_module = THIS_MODULE,
923
924 .digestsize = AES_BLOCK_SIZE,
925 .init = mac_init,
926 .update = mac_update,
927 .finup = cmac_finup,
928 .setkey = xcbc_setkey,
929 .descsize = sizeof(struct mac_desc_ctx),
930 }, {
931 .base.cra_name = "cbcmac(aes)",
932 .base.cra_driver_name = "cbcmac-aes-" MODE,
933 .base.cra_priority = PRIO,
934 .base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY,
935 .base.cra_blocksize = AES_BLOCK_SIZE,
936 .base.cra_ctxsize = sizeof(struct mac_tfm_ctx),
937 .base.cra_module = THIS_MODULE,
938
939 .digestsize = AES_BLOCK_SIZE,
940 .init = mac_init,
941 .update = mac_update,
942 .finup = cbcmac_finup,
943 .setkey = cbcmac_setkey,
944 .descsize = sizeof(struct mac_desc_ctx),
945 } };
946
aes_exit(void)947 static void aes_exit(void)
948 {
949 crypto_unregister_shashes(mac_algs, ARRAY_SIZE(mac_algs));
950 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
951 }
952
aes_init(void)953 static int __init aes_init(void)
954 {
955 int err;
956
957 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
958 if (err)
959 return err;
960
961 err = crypto_register_shashes(mac_algs, ARRAY_SIZE(mac_algs));
962 if (err)
963 goto unregister_ciphers;
964
965 return 0;
966
967 unregister_ciphers:
968 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
969 return err;
970 }
971
972 #ifdef USE_V8_CRYPTO_EXTENSIONS
973 module_cpu_feature_match(AES, aes_init);
974 EXPORT_SYMBOL_NS(ce_aes_mac_update, "CRYPTO_INTERNAL");
975 #else
976 module_init(aes_init);
977 EXPORT_SYMBOL(neon_aes_ecb_encrypt);
978 EXPORT_SYMBOL(neon_aes_cbc_encrypt);
979 EXPORT_SYMBOL(neon_aes_ctr_encrypt);
980 EXPORT_SYMBOL(neon_aes_xts_encrypt);
981 EXPORT_SYMBOL(neon_aes_xts_decrypt);
982 #endif
983 module_exit(aes_exit);
984