1 /*
2 * linux/net/sunrpc/gss_krb5_crypto.c
3 *
4 * Copyright (c) 2000-2008 The Regents of the University of Michigan.
5 * All rights reserved.
6 *
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
9 */
10
11 /*
12 * Copyright (C) 1998 by the FundsXpress, INC.
13 *
14 * All rights reserved.
15 *
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
20 *
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
31 *
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35 */
36
37 #include <crypto/hash.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/utils.h>
40 #include <linux/err.h>
41 #include <linux/types.h>
42 #include <linux/mm.h>
43 #include <linux/scatterlist.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/random.h>
47 #include <linux/sunrpc/gss_krb5.h>
48 #include <linux/sunrpc/xdr.h>
49 #include <kunit/visibility.h>
50
51 #include "gss_krb5_internal.h"
52
53 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
54 # define RPCDBG_FACILITY RPCDBG_AUTH
55 #endif
56
57 /**
58 * krb5_make_confounder - Generate a confounder string
59 * @p: memory location into which to write the string
60 * @conflen: string length to write, in octets
61 *
62 * RFCs 1964 and 3961 mention only "a random confounder" without going
63 * into detail about its function or cryptographic requirements. The
64 * assumed purpose is to prevent repeated encryption of a plaintext with
65 * the same key from generating the same ciphertext. It is also used to
66 * pad minimum plaintext length to at least a single cipher block.
67 *
68 * However, in situations like the GSS Kerberos 5 mechanism, where the
69 * encryption IV is always all zeroes, the confounder also effectively
70 * functions like an IV. Thus, not only must it be unique from message
71 * to message, but it must also be difficult to predict. Otherwise an
72 * attacker can correlate the confounder to previous or future values,
73 * making the encryption easier to break.
74 *
75 * Given that the primary consumer of this encryption mechanism is a
76 * network storage protocol, a type of traffic that often carries
77 * predictable payloads (eg, all zeroes when reading unallocated blocks
78 * from a file), our confounder generation has to be cryptographically
79 * strong.
80 */
krb5_make_confounder(u8 * p,int conflen)81 void krb5_make_confounder(u8 *p, int conflen)
82 {
83 get_random_bytes(p, conflen);
84 }
85
86 /**
87 * krb5_encrypt - simple encryption of an RPCSEC GSS payload
88 * @tfm: initialized cipher transform
89 * @iv: pointer to an IV
90 * @in: plaintext to encrypt
91 * @out: OUT: ciphertext
92 * @length: length of input and output buffers, in bytes
93 *
94 * @iv may be NULL to force the use of an all-zero IV.
95 * The buffer containing the IV must be as large as the
96 * cipher's ivsize.
97 *
98 * Return values:
99 * %0: @in successfully encrypted into @out
100 * negative errno: @in not encrypted
101 */
102 u32
krb5_encrypt(struct crypto_sync_skcipher * tfm,void * iv,void * in,void * out,int length)103 krb5_encrypt(
104 struct crypto_sync_skcipher *tfm,
105 void * iv,
106 void * in,
107 void * out,
108 int length)
109 {
110 u32 ret = -EINVAL;
111 struct scatterlist sg[1];
112 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
113 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
114
115 if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
116 goto out;
117
118 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
119 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
120 crypto_sync_skcipher_ivsize(tfm));
121 goto out;
122 }
123
124 if (iv)
125 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
126
127 memcpy(out, in, length);
128 sg_init_one(sg, out, length);
129
130 skcipher_request_set_sync_tfm(req, tfm);
131 skcipher_request_set_callback(req, 0, NULL, NULL);
132 skcipher_request_set_crypt(req, sg, sg, length, local_iv);
133
134 ret = crypto_skcipher_encrypt(req);
135 skcipher_request_zero(req);
136 out:
137 dprintk("RPC: krb5_encrypt returns %d\n", ret);
138 return ret;
139 }
140
141 static int
checksummer(struct scatterlist * sg,void * data)142 checksummer(struct scatterlist *sg, void *data)
143 {
144 struct ahash_request *req = data;
145
146 ahash_request_set_crypt(req, sg, NULL, sg->length);
147
148 return crypto_ahash_update(req);
149 }
150
151 /**
152 * gss_krb5_checksum - Compute the MAC for a GSS Wrap or MIC token
153 * @tfm: an initialized hash transform
154 * @header: pointer to a buffer containing the token header, or NULL
155 * @hdrlen: number of octets in @header
156 * @body: xdr_buf containing an RPC message (body.len is the message length)
157 * @body_offset: byte offset into @body to start checksumming
158 * @cksumout: OUT: a buffer to be filled in with the computed HMAC
159 *
160 * Usually expressed as H = HMAC(K, message)[1..h] .
161 *
162 * Caller provides the truncation length of the output token (h) in
163 * cksumout.len.
164 *
165 * Return values:
166 * %GSS_S_COMPLETE: Digest computed, @cksumout filled in
167 * %GSS_S_FAILURE: Call failed
168 */
169 u32
gss_krb5_checksum(struct crypto_ahash * tfm,char * header,int hdrlen,const struct xdr_buf * body,int body_offset,struct xdr_netobj * cksumout)170 gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
171 const struct xdr_buf *body, int body_offset,
172 struct xdr_netobj *cksumout)
173 {
174 struct ahash_request *req;
175 int err = -ENOMEM;
176 u8 *checksumdata;
177
178 checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
179 if (!checksumdata)
180 return GSS_S_FAILURE;
181
182 req = ahash_request_alloc(tfm, GFP_KERNEL);
183 if (!req)
184 goto out_free_cksum;
185 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
186 err = crypto_ahash_init(req);
187 if (err)
188 goto out_free_ahash;
189
190 /*
191 * Per RFC 4121 Section 4.2.4, the checksum is performed over the
192 * data body first, then over the octets in "header".
193 */
194 err = xdr_process_buf(body, body_offset, body->len - body_offset,
195 checksummer, req);
196 if (err)
197 goto out_free_ahash;
198 if (header) {
199 struct scatterlist sg[1];
200
201 sg_init_one(sg, header, hdrlen);
202 ahash_request_set_crypt(req, sg, NULL, hdrlen);
203 err = crypto_ahash_update(req);
204 if (err)
205 goto out_free_ahash;
206 }
207
208 ahash_request_set_crypt(req, NULL, checksumdata, 0);
209 err = crypto_ahash_final(req);
210 if (err)
211 goto out_free_ahash;
212
213 memcpy(cksumout->data, checksumdata,
214 min_t(int, cksumout->len, crypto_ahash_digestsize(tfm)));
215
216 out_free_ahash:
217 ahash_request_free(req);
218 out_free_cksum:
219 kfree_sensitive(checksumdata);
220 return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
221 }
222 EXPORT_SYMBOL_IF_KUNIT(gss_krb5_checksum);
223
224 struct encryptor_desc {
225 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
226 struct skcipher_request *req;
227 int pos;
228 struct xdr_buf *outbuf;
229 struct page **pages;
230 struct scatterlist infrags[4];
231 struct scatterlist outfrags[4];
232 int fragno;
233 int fraglen;
234 };
235
236 static int
encryptor(struct scatterlist * sg,void * data)237 encryptor(struct scatterlist *sg, void *data)
238 {
239 struct encryptor_desc *desc = data;
240 struct xdr_buf *outbuf = desc->outbuf;
241 struct crypto_sync_skcipher *tfm =
242 crypto_sync_skcipher_reqtfm(desc->req);
243 struct page *in_page;
244 int thislen = desc->fraglen + sg->length;
245 int fraglen, ret;
246 int page_pos;
247
248 /* Worst case is 4 fragments: head, end of page 1, start
249 * of page 2, tail. Anything more is a bug. */
250 BUG_ON(desc->fragno > 3);
251
252 page_pos = desc->pos - outbuf->head[0].iov_len;
253 if (page_pos >= 0 && page_pos < outbuf->page_len) {
254 /* pages are not in place: */
255 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
256 in_page = desc->pages[i];
257 } else {
258 in_page = sg_page(sg);
259 }
260 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
261 sg->offset);
262 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
263 sg->offset);
264 desc->fragno++;
265 desc->fraglen += sg->length;
266 desc->pos += sg->length;
267
268 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
269 thislen -= fraglen;
270
271 if (thislen == 0)
272 return 0;
273
274 sg_mark_end(&desc->infrags[desc->fragno - 1]);
275 sg_mark_end(&desc->outfrags[desc->fragno - 1]);
276
277 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
278 thislen, desc->iv);
279
280 ret = crypto_skcipher_encrypt(desc->req);
281 if (ret)
282 return ret;
283
284 sg_init_table(desc->infrags, 4);
285 sg_init_table(desc->outfrags, 4);
286
287 if (fraglen) {
288 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
289 sg->offset + sg->length - fraglen);
290 desc->infrags[0] = desc->outfrags[0];
291 sg_assign_page(&desc->infrags[0], in_page);
292 desc->fragno = 1;
293 desc->fraglen = fraglen;
294 } else {
295 desc->fragno = 0;
296 desc->fraglen = 0;
297 }
298 return 0;
299 }
300
301 struct decryptor_desc {
302 u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
303 struct skcipher_request *req;
304 struct scatterlist frags[4];
305 int fragno;
306 int fraglen;
307 };
308
309 static int
decryptor(struct scatterlist * sg,void * data)310 decryptor(struct scatterlist *sg, void *data)
311 {
312 struct decryptor_desc *desc = data;
313 int thislen = desc->fraglen + sg->length;
314 struct crypto_sync_skcipher *tfm =
315 crypto_sync_skcipher_reqtfm(desc->req);
316 int fraglen, ret;
317
318 /* Worst case is 4 fragments: head, end of page 1, start
319 * of page 2, tail. Anything more is a bug. */
320 BUG_ON(desc->fragno > 3);
321 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
322 sg->offset);
323 desc->fragno++;
324 desc->fraglen += sg->length;
325
326 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
327 thislen -= fraglen;
328
329 if (thislen == 0)
330 return 0;
331
332 sg_mark_end(&desc->frags[desc->fragno - 1]);
333
334 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
335 thislen, desc->iv);
336
337 ret = crypto_skcipher_decrypt(desc->req);
338 if (ret)
339 return ret;
340
341 sg_init_table(desc->frags, 4);
342
343 if (fraglen) {
344 sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
345 sg->offset + sg->length - fraglen);
346 desc->fragno = 1;
347 desc->fraglen = fraglen;
348 } else {
349 desc->fragno = 0;
350 desc->fraglen = 0;
351 }
352 return 0;
353 }
354
355 /*
356 * This function makes the assumption that it was ultimately called
357 * from gss_wrap().
358 *
359 * The client auth_gss code moves any existing tail data into a
360 * separate page before calling gss_wrap.
361 * The server svcauth_gss code ensures that both the head and the
362 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
363 *
364 * Even with that guarantee, this function may be called more than
365 * once in the processing of gss_wrap(). The best we can do is
366 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
367 * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
368 * At run-time we can verify that a single invocation of this
369 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
370 */
371
372 int
xdr_extend_head(struct xdr_buf * buf,unsigned int base,unsigned int shiftlen)373 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
374 {
375 u8 *p;
376
377 if (shiftlen == 0)
378 return 0;
379
380 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
381
382 p = buf->head[0].iov_base + base;
383
384 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
385
386 buf->head[0].iov_len += shiftlen;
387 buf->len += shiftlen;
388
389 return 0;
390 }
391
392 static u32
gss_krb5_cts_crypt(struct crypto_sync_skcipher * cipher,struct xdr_buf * buf,u32 offset,u8 * iv,struct page ** pages,int encrypt)393 gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
394 u32 offset, u8 *iv, struct page **pages, int encrypt)
395 {
396 u32 ret;
397 struct scatterlist sg[1];
398 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
399 u8 *data;
400 struct page **save_pages;
401 u32 len = buf->len - offset;
402
403 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
404 WARN_ON(0);
405 return -ENOMEM;
406 }
407 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL);
408 if (!data)
409 return -ENOMEM;
410
411 /*
412 * For encryption, we want to read from the cleartext
413 * page cache pages, and write the encrypted data to
414 * the supplied xdr_buf pages.
415 */
416 save_pages = buf->pages;
417 if (encrypt)
418 buf->pages = pages;
419
420 ret = read_bytes_from_xdr_buf(buf, offset, data, len);
421 buf->pages = save_pages;
422 if (ret)
423 goto out;
424
425 sg_init_one(sg, data, len);
426
427 skcipher_request_set_sync_tfm(req, cipher);
428 skcipher_request_set_callback(req, 0, NULL, NULL);
429 skcipher_request_set_crypt(req, sg, sg, len, iv);
430
431 if (encrypt)
432 ret = crypto_skcipher_encrypt(req);
433 else
434 ret = crypto_skcipher_decrypt(req);
435
436 skcipher_request_zero(req);
437
438 if (ret)
439 goto out;
440
441 ret = write_bytes_to_xdr_buf(buf, offset, data, len);
442
443 #if IS_ENABLED(CONFIG_KUNIT)
444 /*
445 * CBC-CTS does not define an output IV but RFC 3962 defines it as the
446 * penultimate block of ciphertext, so copy that into the IV buffer
447 * before returning.
448 */
449 if (encrypt)
450 memcpy(iv, data, crypto_sync_skcipher_ivsize(cipher));
451 #endif
452
453 out:
454 kfree(data);
455 return ret;
456 }
457
458 /**
459 * krb5_cbc_cts_encrypt - encrypt in CBC mode with CTS
460 * @cts_tfm: CBC cipher with CTS
461 * @cbc_tfm: base CBC cipher
462 * @offset: starting byte offset for plaintext
463 * @buf: OUT: output buffer
464 * @pages: plaintext
465 * @iv: output CBC initialization vector, or NULL
466 * @ivsize: size of @iv, in octets
467 *
468 * To provide confidentiality, encrypt using cipher block chaining
469 * with ciphertext stealing. Message integrity is handled separately.
470 *
471 * Return values:
472 * %0: encryption successful
473 * negative errno: encryption could not be completed
474 */
475 VISIBLE_IF_KUNIT
krb5_cbc_cts_encrypt(struct crypto_sync_skcipher * cts_tfm,struct crypto_sync_skcipher * cbc_tfm,u32 offset,struct xdr_buf * buf,struct page ** pages,u8 * iv,unsigned int ivsize)476 int krb5_cbc_cts_encrypt(struct crypto_sync_skcipher *cts_tfm,
477 struct crypto_sync_skcipher *cbc_tfm,
478 u32 offset, struct xdr_buf *buf, struct page **pages,
479 u8 *iv, unsigned int ivsize)
480 {
481 u32 blocksize, nbytes, nblocks, cbcbytes;
482 struct encryptor_desc desc;
483 int err;
484
485 blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
486 nbytes = buf->len - offset;
487 nblocks = (nbytes + blocksize - 1) / blocksize;
488 cbcbytes = 0;
489 if (nblocks > 2)
490 cbcbytes = (nblocks - 2) * blocksize;
491
492 memset(desc.iv, 0, sizeof(desc.iv));
493
494 /* Handle block-sized chunks of plaintext with CBC. */
495 if (cbcbytes) {
496 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
497
498 desc.pos = offset;
499 desc.fragno = 0;
500 desc.fraglen = 0;
501 desc.pages = pages;
502 desc.outbuf = buf;
503 desc.req = req;
504
505 skcipher_request_set_sync_tfm(req, cbc_tfm);
506 skcipher_request_set_callback(req, 0, NULL, NULL);
507
508 sg_init_table(desc.infrags, 4);
509 sg_init_table(desc.outfrags, 4);
510
511 err = xdr_process_buf(buf, offset, cbcbytes, encryptor, &desc);
512 skcipher_request_zero(req);
513 if (err)
514 return err;
515 }
516
517 /* Remaining plaintext is handled with CBC-CTS. */
518 err = gss_krb5_cts_crypt(cts_tfm, buf, offset + cbcbytes,
519 desc.iv, pages, 1);
520 if (err)
521 return err;
522
523 if (unlikely(iv))
524 memcpy(iv, desc.iv, ivsize);
525 return 0;
526 }
527 EXPORT_SYMBOL_IF_KUNIT(krb5_cbc_cts_encrypt);
528
529 /**
530 * krb5_cbc_cts_decrypt - decrypt in CBC mode with CTS
531 * @cts_tfm: CBC cipher with CTS
532 * @cbc_tfm: base CBC cipher
533 * @offset: starting byte offset for plaintext
534 * @buf: OUT: output buffer
535 *
536 * Return values:
537 * %0: decryption successful
538 * negative errno: decryption could not be completed
539 */
540 VISIBLE_IF_KUNIT
krb5_cbc_cts_decrypt(struct crypto_sync_skcipher * cts_tfm,struct crypto_sync_skcipher * cbc_tfm,u32 offset,struct xdr_buf * buf)541 int krb5_cbc_cts_decrypt(struct crypto_sync_skcipher *cts_tfm,
542 struct crypto_sync_skcipher *cbc_tfm,
543 u32 offset, struct xdr_buf *buf)
544 {
545 u32 blocksize, nblocks, cbcbytes;
546 struct decryptor_desc desc;
547 int err;
548
549 blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
550 nblocks = (buf->len + blocksize - 1) / blocksize;
551 cbcbytes = 0;
552 if (nblocks > 2)
553 cbcbytes = (nblocks - 2) * blocksize;
554
555 memset(desc.iv, 0, sizeof(desc.iv));
556
557 /* Handle block-sized chunks of plaintext with CBC. */
558 if (cbcbytes) {
559 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
560
561 desc.fragno = 0;
562 desc.fraglen = 0;
563 desc.req = req;
564
565 skcipher_request_set_sync_tfm(req, cbc_tfm);
566 skcipher_request_set_callback(req, 0, NULL, NULL);
567
568 sg_init_table(desc.frags, 4);
569
570 err = xdr_process_buf(buf, 0, cbcbytes, decryptor, &desc);
571 skcipher_request_zero(req);
572 if (err)
573 return err;
574 }
575
576 /* Remaining plaintext is handled with CBC-CTS. */
577 return gss_krb5_cts_crypt(cts_tfm, buf, cbcbytes, desc.iv, NULL, 0);
578 }
579 EXPORT_SYMBOL_IF_KUNIT(krb5_cbc_cts_decrypt);
580
581 u32
gss_krb5_aes_encrypt(struct krb5_ctx * kctx,u32 offset,struct xdr_buf * buf,struct page ** pages)582 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
583 struct xdr_buf *buf, struct page **pages)
584 {
585 u32 err;
586 struct xdr_netobj hmac;
587 u8 *ecptr;
588 struct crypto_sync_skcipher *cipher, *aux_cipher;
589 struct crypto_ahash *ahash;
590 struct page **save_pages;
591 unsigned int conflen;
592
593 if (kctx->initiate) {
594 cipher = kctx->initiator_enc;
595 aux_cipher = kctx->initiator_enc_aux;
596 ahash = kctx->initiator_integ;
597 } else {
598 cipher = kctx->acceptor_enc;
599 aux_cipher = kctx->acceptor_enc_aux;
600 ahash = kctx->acceptor_integ;
601 }
602 conflen = crypto_sync_skcipher_blocksize(cipher);
603
604 /* hide the gss token header and insert the confounder */
605 offset += GSS_KRB5_TOK_HDR_LEN;
606 if (xdr_extend_head(buf, offset, conflen))
607 return GSS_S_FAILURE;
608 krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
609 offset -= GSS_KRB5_TOK_HDR_LEN;
610
611 if (buf->tail[0].iov_base != NULL) {
612 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
613 } else {
614 buf->tail[0].iov_base = buf->head[0].iov_base
615 + buf->head[0].iov_len;
616 buf->tail[0].iov_len = 0;
617 ecptr = buf->tail[0].iov_base;
618 }
619
620 /* copy plaintext gss token header after filler (if any) */
621 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
622 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
623 buf->len += GSS_KRB5_TOK_HDR_LEN;
624
625 hmac.len = kctx->gk5e->cksumlength;
626 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
627
628 /*
629 * When we are called, pages points to the real page cache
630 * data -- which we can't go and encrypt! buf->pages points
631 * to scratch pages which we are going to send off to the
632 * client/server. Swap in the plaintext pages to calculate
633 * the hmac.
634 */
635 save_pages = buf->pages;
636 buf->pages = pages;
637
638 err = gss_krb5_checksum(ahash, NULL, 0, buf,
639 offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
640 buf->pages = save_pages;
641 if (err)
642 return GSS_S_FAILURE;
643
644 err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
645 offset + GSS_KRB5_TOK_HDR_LEN,
646 buf, pages, NULL, 0);
647 if (err)
648 return GSS_S_FAILURE;
649
650 /* Now update buf to account for HMAC */
651 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
652 buf->len += kctx->gk5e->cksumlength;
653
654 return GSS_S_COMPLETE;
655 }
656
657 u32
gss_krb5_aes_decrypt(struct krb5_ctx * kctx,u32 offset,u32 len,struct xdr_buf * buf,u32 * headskip,u32 * tailskip)658 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
659 struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
660 {
661 struct crypto_sync_skcipher *cipher, *aux_cipher;
662 struct crypto_ahash *ahash;
663 struct xdr_netobj our_hmac_obj;
664 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
665 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
666 struct xdr_buf subbuf;
667 u32 ret = 0;
668
669 if (kctx->initiate) {
670 cipher = kctx->acceptor_enc;
671 aux_cipher = kctx->acceptor_enc_aux;
672 ahash = kctx->acceptor_integ;
673 } else {
674 cipher = kctx->initiator_enc;
675 aux_cipher = kctx->initiator_enc_aux;
676 ahash = kctx->initiator_integ;
677 }
678
679 /* create a segment skipping the header and leaving out the checksum */
680 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
681 (len - offset - GSS_KRB5_TOK_HDR_LEN -
682 kctx->gk5e->cksumlength));
683
684 ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
685 if (ret)
686 goto out_err;
687
688 our_hmac_obj.len = kctx->gk5e->cksumlength;
689 our_hmac_obj.data = our_hmac;
690 ret = gss_krb5_checksum(ahash, NULL, 0, &subbuf, 0, &our_hmac_obj);
691 if (ret)
692 goto out_err;
693
694 /* Get the packet's hmac value */
695 ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
696 pkt_hmac, kctx->gk5e->cksumlength);
697 if (ret)
698 goto out_err;
699
700 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
701 ret = GSS_S_BAD_SIG;
702 goto out_err;
703 }
704 *headskip = crypto_sync_skcipher_blocksize(cipher);
705 *tailskip = kctx->gk5e->cksumlength;
706 out_err:
707 if (ret && ret != GSS_S_BAD_SIG)
708 ret = GSS_S_FAILURE;
709 return ret;
710 }
711
712 /**
713 * krb5_etm_checksum - Compute a MAC for a GSS Wrap token
714 * @cipher: an initialized cipher transform
715 * @tfm: an initialized hash transform
716 * @body: xdr_buf containing an RPC message (body.len is the message length)
717 * @body_offset: byte offset into @body to start checksumming
718 * @cksumout: OUT: a buffer to be filled in with the computed HMAC
719 *
720 * Usually expressed as H = HMAC(K, IV | ciphertext)[1..h] .
721 *
722 * Caller provides the truncation length of the output token (h) in
723 * cksumout.len.
724 *
725 * Return values:
726 * %GSS_S_COMPLETE: Digest computed, @cksumout filled in
727 * %GSS_S_FAILURE: Call failed
728 */
729 VISIBLE_IF_KUNIT
krb5_etm_checksum(struct crypto_sync_skcipher * cipher,struct crypto_ahash * tfm,const struct xdr_buf * body,int body_offset,struct xdr_netobj * cksumout)730 u32 krb5_etm_checksum(struct crypto_sync_skcipher *cipher,
731 struct crypto_ahash *tfm, const struct xdr_buf *body,
732 int body_offset, struct xdr_netobj *cksumout)
733 {
734 unsigned int ivsize = crypto_sync_skcipher_ivsize(cipher);
735 struct ahash_request *req;
736 struct scatterlist sg[1];
737 u8 *iv, *checksumdata;
738 int err = -ENOMEM;
739
740 checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
741 if (!checksumdata)
742 return GSS_S_FAILURE;
743 /* For RPCSEC, the "initial cipher state" is always all zeroes. */
744 iv = kzalloc(ivsize, GFP_KERNEL);
745 if (!iv)
746 goto out_free_mem;
747
748 req = ahash_request_alloc(tfm, GFP_KERNEL);
749 if (!req)
750 goto out_free_mem;
751 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
752 err = crypto_ahash_init(req);
753 if (err)
754 goto out_free_ahash;
755
756 sg_init_one(sg, iv, ivsize);
757 ahash_request_set_crypt(req, sg, NULL, ivsize);
758 err = crypto_ahash_update(req);
759 if (err)
760 goto out_free_ahash;
761 err = xdr_process_buf(body, body_offset, body->len - body_offset,
762 checksummer, req);
763 if (err)
764 goto out_free_ahash;
765
766 ahash_request_set_crypt(req, NULL, checksumdata, 0);
767 err = crypto_ahash_final(req);
768 if (err)
769 goto out_free_ahash;
770 memcpy(cksumout->data, checksumdata, cksumout->len);
771
772 out_free_ahash:
773 ahash_request_free(req);
774 out_free_mem:
775 kfree(iv);
776 kfree_sensitive(checksumdata);
777 return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
778 }
779 EXPORT_SYMBOL_IF_KUNIT(krb5_etm_checksum);
780
781 /**
782 * krb5_etm_encrypt - Encrypt using the RFC 8009 rules
783 * @kctx: Kerberos context
784 * @offset: starting offset of the payload, in bytes
785 * @buf: OUT: send buffer to contain the encrypted payload
786 * @pages: plaintext payload
787 *
788 * The main difference with aes_encrypt is that "The HMAC is
789 * calculated over the cipher state concatenated with the AES
790 * output, instead of being calculated over the confounder and
791 * plaintext. This allows the message receiver to verify the
792 * integrity of the message before decrypting the message."
793 *
794 * RFC 8009 Section 5:
795 *
796 * encryption function: as follows, where E() is AES encryption in
797 * CBC-CS3 mode, and h is the size of truncated HMAC (128 bits or
798 * 192 bits as described above).
799 *
800 * N = random value of length 128 bits (the AES block size)
801 * IV = cipher state
802 * C = E(Ke, N | plaintext, IV)
803 * H = HMAC(Ki, IV | C)
804 * ciphertext = C | H[1..h]
805 *
806 * This encryption formula provides AEAD EtM with key separation.
807 *
808 * Return values:
809 * %GSS_S_COMPLETE: Encryption successful
810 * %GSS_S_FAILURE: Encryption failed
811 */
812 u32
krb5_etm_encrypt(struct krb5_ctx * kctx,u32 offset,struct xdr_buf * buf,struct page ** pages)813 krb5_etm_encrypt(struct krb5_ctx *kctx, u32 offset,
814 struct xdr_buf *buf, struct page **pages)
815 {
816 struct crypto_sync_skcipher *cipher, *aux_cipher;
817 struct crypto_ahash *ahash;
818 struct xdr_netobj hmac;
819 unsigned int conflen;
820 u8 *ecptr;
821 u32 err;
822
823 if (kctx->initiate) {
824 cipher = kctx->initiator_enc;
825 aux_cipher = kctx->initiator_enc_aux;
826 ahash = kctx->initiator_integ;
827 } else {
828 cipher = kctx->acceptor_enc;
829 aux_cipher = kctx->acceptor_enc_aux;
830 ahash = kctx->acceptor_integ;
831 }
832 conflen = crypto_sync_skcipher_blocksize(cipher);
833
834 offset += GSS_KRB5_TOK_HDR_LEN;
835 if (xdr_extend_head(buf, offset, conflen))
836 return GSS_S_FAILURE;
837 krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
838 offset -= GSS_KRB5_TOK_HDR_LEN;
839
840 if (buf->tail[0].iov_base) {
841 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
842 } else {
843 buf->tail[0].iov_base = buf->head[0].iov_base
844 + buf->head[0].iov_len;
845 buf->tail[0].iov_len = 0;
846 ecptr = buf->tail[0].iov_base;
847 }
848
849 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
850 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
851 buf->len += GSS_KRB5_TOK_HDR_LEN;
852
853 err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
854 offset + GSS_KRB5_TOK_HDR_LEN,
855 buf, pages, NULL, 0);
856 if (err)
857 return GSS_S_FAILURE;
858
859 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
860 hmac.len = kctx->gk5e->cksumlength;
861 err = krb5_etm_checksum(cipher, ahash,
862 buf, offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
863 if (err)
864 goto out_err;
865 buf->tail[0].iov_len += kctx->gk5e->cksumlength;
866 buf->len += kctx->gk5e->cksumlength;
867
868 return GSS_S_COMPLETE;
869
870 out_err:
871 return GSS_S_FAILURE;
872 }
873
874 /**
875 * krb5_etm_decrypt - Decrypt using the RFC 8009 rules
876 * @kctx: Kerberos context
877 * @offset: starting offset of the ciphertext, in bytes
878 * @len:
879 * @buf:
880 * @headskip: OUT: the enctype's confounder length, in octets
881 * @tailskip: OUT: the enctype's HMAC length, in octets
882 *
883 * RFC 8009 Section 5:
884 *
885 * decryption function: as follows, where D() is AES decryption in
886 * CBC-CS3 mode, and h is the size of truncated HMAC.
887 *
888 * (C, H) = ciphertext
889 * (Note: H is the last h bits of the ciphertext.)
890 * IV = cipher state
891 * if H != HMAC(Ki, IV | C)[1..h]
892 * stop, report error
893 * (N, P) = D(Ke, C, IV)
894 *
895 * Return values:
896 * %GSS_S_COMPLETE: Decryption successful
897 * %GSS_S_BAD_SIG: computed HMAC != received HMAC
898 * %GSS_S_FAILURE: Decryption failed
899 */
900 u32
krb5_etm_decrypt(struct krb5_ctx * kctx,u32 offset,u32 len,struct xdr_buf * buf,u32 * headskip,u32 * tailskip)901 krb5_etm_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
902 struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
903 {
904 struct crypto_sync_skcipher *cipher, *aux_cipher;
905 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
906 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
907 struct xdr_netobj our_hmac_obj;
908 struct crypto_ahash *ahash;
909 struct xdr_buf subbuf;
910 u32 ret = 0;
911
912 if (kctx->initiate) {
913 cipher = kctx->acceptor_enc;
914 aux_cipher = kctx->acceptor_enc_aux;
915 ahash = kctx->acceptor_integ;
916 } else {
917 cipher = kctx->initiator_enc;
918 aux_cipher = kctx->initiator_enc_aux;
919 ahash = kctx->initiator_integ;
920 }
921
922 /* Extract the ciphertext into @subbuf. */
923 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
924 (len - offset - GSS_KRB5_TOK_HDR_LEN -
925 kctx->gk5e->cksumlength));
926
927 our_hmac_obj.data = our_hmac;
928 our_hmac_obj.len = kctx->gk5e->cksumlength;
929 ret = krb5_etm_checksum(cipher, ahash, &subbuf, 0, &our_hmac_obj);
930 if (ret)
931 goto out_err;
932 ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
933 pkt_hmac, kctx->gk5e->cksumlength);
934 if (ret)
935 goto out_err;
936 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
937 ret = GSS_S_BAD_SIG;
938 goto out_err;
939 }
940
941 ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
942 if (ret) {
943 ret = GSS_S_FAILURE;
944 goto out_err;
945 }
946
947 *headskip = crypto_sync_skcipher_blocksize(cipher);
948 *tailskip = kctx->gk5e->cksumlength;
949 return GSS_S_COMPLETE;
950
951 out_err:
952 if (ret != GSS_S_BAD_SIG)
953 ret = GSS_S_FAILURE;
954 return ret;
955 }
956