1 /*
2 * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/evp.h>
11 #include <openssl/core_names.h>
12 #include <openssl/rand.h>
13 #include <openssl/ssl.h>
14 #include "internal/ssl3_cbc.h"
15 #include "../../ssl_local.h"
16 #include "../record_local.h"
17 #include "recmethod_local.h"
18
tls1_set_crypto_state(OSSL_RECORD_LAYER * rl,int level,unsigned char * key,size_t keylen,unsigned char * iv,size_t ivlen,unsigned char * mackey,size_t mackeylen,const EVP_CIPHER * ciph,size_t taglen,int mactype,const EVP_MD * md,COMP_METHOD * comp)19 static int tls1_set_crypto_state(OSSL_RECORD_LAYER *rl, int level,
20 unsigned char *key, size_t keylen,
21 unsigned char *iv, size_t ivlen,
22 unsigned char *mackey, size_t mackeylen,
23 const EVP_CIPHER *ciph,
24 size_t taglen,
25 int mactype,
26 const EVP_MD *md,
27 COMP_METHOD *comp)
28 {
29 EVP_CIPHER_CTX *ciph_ctx;
30 EVP_PKEY *mac_key;
31 int enc = (rl->direction == OSSL_RECORD_DIRECTION_WRITE) ? 1 : 0;
32
33 if (level != OSSL_RECORD_PROTECTION_LEVEL_APPLICATION)
34 return OSSL_RECORD_RETURN_FATAL;
35
36 if ((rl->enc_ctx = EVP_CIPHER_CTX_new()) == NULL) {
37 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_EVP_LIB);
38 return OSSL_RECORD_RETURN_FATAL;
39 }
40
41 ciph_ctx = rl->enc_ctx;
42
43 rl->md_ctx = EVP_MD_CTX_new();
44 if (rl->md_ctx == NULL) {
45 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
46 return OSSL_RECORD_RETURN_FATAL;
47 }
48 #ifndef OPENSSL_NO_COMP
49 if (comp != NULL) {
50 rl->compctx = COMP_CTX_new(comp);
51 if (rl->compctx == NULL) {
52 ERR_raise(ERR_LIB_SSL, SSL_R_COMPRESSION_LIBRARY_ERROR);
53 return OSSL_RECORD_RETURN_FATAL;
54 }
55 }
56 #endif
57
58 /*
59 * If we have an AEAD Cipher, then there is no separate MAC, so we can skip
60 * setting up the MAC key.
61 */
62 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) == 0) {
63 if (mactype == EVP_PKEY_HMAC) {
64 mac_key = EVP_PKEY_new_raw_private_key_ex(rl->libctx, "HMAC",
65 rl->propq, mackey,
66 mackeylen);
67 } else {
68 /*
69 * If its not HMAC then the only other types of MAC we support are
70 * the GOST MACs, so we need to use the old style way of creating
71 * a MAC key.
72 */
73 mac_key = EVP_PKEY_new_mac_key(mactype, NULL, mackey,
74 (int)mackeylen);
75 }
76 if (mac_key == NULL
77 || EVP_DigestSignInit_ex(rl->md_ctx, NULL, EVP_MD_get0_name(md),
78 rl->libctx, rl->propq, mac_key,
79 NULL)
80 <= 0) {
81 EVP_PKEY_free(mac_key);
82 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
83 return OSSL_RECORD_RETURN_FATAL;
84 }
85 EVP_PKEY_free(mac_key);
86 }
87
88 if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_GCM_MODE) {
89 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, NULL, enc)
90 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_GCM_SET_IV_FIXED,
91 (int)ivlen, iv)
92 <= 0) {
93 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
94 return OSSL_RECORD_RETURN_FATAL;
95 }
96 } else if (EVP_CIPHER_get_mode(ciph) == EVP_CIPH_CCM_MODE) {
97 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, NULL, NULL, enc)
98 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, 12,
99 NULL)
100 <= 0
101 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG,
102 (int)taglen, NULL)
103 <= 0
104 || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_CCM_SET_IV_FIXED,
105 (int)ivlen, iv)
106 <= 0
107 || !EVP_CipherInit_ex(ciph_ctx, NULL, NULL, key, NULL, enc)) {
108 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
109 return OSSL_RECORD_RETURN_FATAL;
110 }
111 } else {
112 if (!EVP_CipherInit_ex(ciph_ctx, ciph, NULL, key, iv, enc)) {
113 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
114 return OSSL_RECORD_RETURN_FATAL;
115 }
116 }
117 /* Needed for "composite" AEADs, such as RC4-HMAC-MD5 */
118 if ((EVP_CIPHER_get_flags(ciph) & EVP_CIPH_FLAG_AEAD_CIPHER) != 0
119 && mackeylen != 0
120 && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_MAC_KEY,
121 (int)mackeylen, mackey)
122 <= 0) {
123 ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
124 return OSSL_RECORD_RETURN_FATAL;
125 }
126
127 /*
128 * The cipher we actually ended up using in the EVP_CIPHER_CTX may be
129 * different to that in ciph if we have an ENGINE in use
130 */
131 if (EVP_CIPHER_get0_provider(EVP_CIPHER_CTX_get0_cipher(ciph_ctx)) != NULL
132 && !ossl_set_tls_provider_parameters(rl, ciph_ctx, ciph, md)) {
133 /* ERR_raise already called */
134 return OSSL_RECORD_RETURN_FATAL;
135 }
136
137 /* Calculate the explicit IV length */
138 if (RLAYER_USE_EXPLICIT_IV(rl)) {
139 int mode = EVP_CIPHER_CTX_get_mode(ciph_ctx);
140 int eivlen = 0;
141
142 if (mode == EVP_CIPH_CBC_MODE) {
143 eivlen = EVP_CIPHER_CTX_get_iv_length(ciph_ctx);
144 if (eivlen < 0) {
145 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_LIBRARY_BUG);
146 return OSSL_RECORD_RETURN_FATAL;
147 }
148 if (eivlen <= 1)
149 eivlen = 0;
150 } else if (mode == EVP_CIPH_GCM_MODE) {
151 /* Need explicit part of IV for GCM mode */
152 eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN;
153 } else if (mode == EVP_CIPH_CCM_MODE) {
154 eivlen = EVP_CCM_TLS_EXPLICIT_IV_LEN;
155 }
156 rl->eivlen = (size_t)eivlen;
157 }
158
159 return OSSL_RECORD_RETURN_SUCCESS;
160 }
161
162 #define MAX_PADDING 256
163 /*-
164 * tls1_cipher encrypts/decrypts |n_recs| in |recs|. Calls RLAYERfatal on
165 * internal error, but not otherwise. It is the responsibility of the caller to
166 * report a bad_record_mac - if appropriate (DTLS just drops the record).
167 *
168 * Returns:
169 * 0: if the record is publicly invalid, or an internal error, or AEAD
170 * decryption failed, or Encrypt-then-mac decryption failed.
171 * 1: Success or Mac-then-encrypt decryption failed (MAC will be randomised)
172 */
tls1_cipher(OSSL_RECORD_LAYER * rl,TLS_RL_RECORD * recs,size_t n_recs,int sending,SSL_MAC_BUF * macs,size_t macsize)173 static int tls1_cipher(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *recs,
174 size_t n_recs, int sending, SSL_MAC_BUF *macs,
175 size_t macsize)
176 {
177 EVP_CIPHER_CTX *ds;
178 size_t reclen[SSL_MAX_PIPELINES];
179 unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN];
180 unsigned char *data[SSL_MAX_PIPELINES];
181 int pad = 0, tmpr, provided;
182 size_t bs, ctr, padnum, loop;
183 unsigned char padval;
184 const EVP_CIPHER *enc;
185
186 if (n_recs == 0) {
187 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
188 return 0;
189 }
190
191 if (EVP_MD_CTX_get0_md(rl->md_ctx)) {
192 int n = EVP_MD_CTX_get_size(rl->md_ctx);
193
194 if (!ossl_assert(n >= 0)) {
195 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
196 return 0;
197 }
198 }
199 ds = rl->enc_ctx;
200 if (!ossl_assert(rl->enc_ctx != NULL)) {
201 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
202 return 0;
203 }
204
205 enc = EVP_CIPHER_CTX_get0_cipher(rl->enc_ctx);
206
207 if (sending) {
208 int ivlen;
209
210 /* For TLSv1.1 and later explicit IV */
211 if (RLAYER_USE_EXPLICIT_IV(rl)
212 && EVP_CIPHER_get_mode(enc) == EVP_CIPH_CBC_MODE)
213 ivlen = EVP_CIPHER_get_iv_length(enc);
214 else
215 ivlen = 0;
216 if (ivlen > 1) {
217 for (ctr = 0; ctr < n_recs; ctr++) {
218 if (recs[ctr].data != recs[ctr].input) {
219 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
220 return 0;
221 } else if (RAND_bytes_ex(rl->libctx, recs[ctr].input,
222 ivlen, 0)
223 <= 0) {
224 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
225 return 0;
226 }
227 }
228 }
229 }
230 if (!ossl_assert(enc != NULL)) {
231 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
232 return 0;
233 }
234
235 provided = (EVP_CIPHER_get0_provider(enc) != NULL);
236
237 bs = EVP_CIPHER_get_block_size(EVP_CIPHER_CTX_get0_cipher(ds));
238
239 if (bs == 0) {
240 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_BAD_CIPHER);
241 return 0;
242 }
243
244 if (n_recs > 1) {
245 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
246 & EVP_CIPH_FLAG_PIPELINE)
247 == 0) {
248 /*
249 * We shouldn't have been called with pipeline data if the
250 * cipher doesn't support pipelining
251 */
252 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
253 return 0;
254 }
255 }
256 for (ctr = 0; ctr < n_recs; ctr++) {
257 reclen[ctr] = recs[ctr].length;
258
259 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
260 & EVP_CIPH_FLAG_AEAD_CIPHER)
261 != 0) {
262 unsigned char *seq;
263
264 seq = rl->sequence;
265
266 if (rl->isdtls) {
267 unsigned char dtlsseq[8], *p = dtlsseq;
268
269 s2n(rl->epoch, p);
270 memcpy(p, &seq[2], 6);
271 memcpy(buf[ctr], dtlsseq, 8);
272 } else {
273 memcpy(buf[ctr], seq, 8);
274 if (!tls_increment_sequence_ctr(rl)) {
275 /* RLAYERfatal already called */
276 return 0;
277 }
278 }
279
280 buf[ctr][8] = recs[ctr].type;
281 buf[ctr][9] = (unsigned char)(rl->version >> 8);
282 buf[ctr][10] = (unsigned char)(rl->version);
283 buf[ctr][11] = (unsigned char)(recs[ctr].length >> 8);
284 buf[ctr][12] = (unsigned char)(recs[ctr].length & 0xff);
285 pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD,
286 EVP_AEAD_TLS1_AAD_LEN, buf[ctr]);
287 if (pad <= 0) {
288 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
289 return 0;
290 }
291
292 if (sending) {
293 reclen[ctr] += pad;
294 recs[ctr].length += pad;
295 }
296 } else if ((bs != 1) && sending && !provided) {
297 /*
298 * We only do this for legacy ciphers. Provided ciphers add the
299 * padding on the provider side.
300 */
301 padnum = bs - (reclen[ctr] % bs);
302
303 /* Add weird padding of up to 256 bytes */
304
305 if (padnum > MAX_PADDING) {
306 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
307 return 0;
308 }
309 /* we need to add 'padnum' padding bytes of value padval */
310 padval = (unsigned char)(padnum - 1);
311 for (loop = reclen[ctr]; loop < reclen[ctr] + padnum; loop++)
312 recs[ctr].input[loop] = padval;
313 reclen[ctr] += padnum;
314 recs[ctr].length += padnum;
315 }
316
317 if (!sending) {
318 if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) {
319 /* Publicly invalid */
320 return 0;
321 }
322 }
323 }
324 if (n_recs > 1) {
325 /* Set the output buffers */
326 for (ctr = 0; ctr < n_recs; ctr++)
327 data[ctr] = recs[ctr].data;
328
329 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS,
330 (int)n_recs, data)
331 <= 0) {
332 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
333 return 0;
334 }
335 /* Set the input buffers */
336 for (ctr = 0; ctr < n_recs; ctr++)
337 data[ctr] = recs[ctr].input;
338
339 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS,
340 (int)n_recs, data)
341 <= 0
342 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS,
343 (int)n_recs, reclen)
344 <= 0) {
345 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, SSL_R_PIPELINE_FAILURE);
346 return 0;
347 }
348 }
349
350 if (!rl->isdtls && rl->tlstree) {
351 int decrement_seq = 0;
352
353 /*
354 * When sending, seq is incremented after MAC calculation.
355 * So if we are in ETM mode, we use seq 'as is' in the ctrl-function.
356 * Otherwise we have to decrease it in the implementation
357 */
358 if (sending && !rl->use_etm)
359 decrement_seq = 1;
360
361 if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_TLSTREE, decrement_seq,
362 rl->sequence)
363 <= 0) {
364
365 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
366 return 0;
367 }
368 }
369
370 if (provided) {
371 int outlen;
372
373 /* Provided cipher - we do not support pipelining on this path */
374 if (n_recs > 1) {
375 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
376 return 0;
377 }
378
379 if (!EVP_CipherUpdate(ds, recs[0].data, &outlen, recs[0].input,
380 (unsigned int)reclen[0]))
381 return 0;
382 recs[0].length = outlen;
383
384 /*
385 * The length returned from EVP_CipherUpdate above is the actual
386 * payload length. We need to adjust the data/input ptr to skip over
387 * any explicit IV
388 */
389 if (!sending) {
390 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
391 recs[0].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
392 recs[0].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
393 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
394 recs[0].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
395 recs[0].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
396 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
397 recs[0].data += bs;
398 recs[0].input += bs;
399 recs[0].orig_len -= bs;
400 }
401
402 /* Now get a pointer to the MAC (if applicable) */
403 if (macs != NULL) {
404 OSSL_PARAM params[2], *p = params;
405
406 /* Get the MAC */
407 macs[0].alloced = 0;
408
409 *p++ = OSSL_PARAM_construct_octet_ptr(OSSL_CIPHER_PARAM_TLS_MAC,
410 (void **)&macs[0].mac,
411 macsize);
412 *p = OSSL_PARAM_construct_end();
413
414 if (!EVP_CIPHER_CTX_get_params(ds, params)) {
415 /* Shouldn't normally happen */
416 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR,
417 ERR_R_INTERNAL_ERROR);
418 return 0;
419 }
420 }
421 }
422 } else {
423 /* Legacy cipher */
424
425 tmpr = EVP_Cipher(ds, recs[0].data, recs[0].input,
426 (unsigned int)reclen[0]);
427 if ((EVP_CIPHER_get_flags(EVP_CIPHER_CTX_get0_cipher(ds))
428 & EVP_CIPH_FLAG_CUSTOM_CIPHER)
429 != 0
430 ? (tmpr < 0)
431 : (tmpr == 0)) {
432 /* AEAD can fail to verify MAC */
433 return 0;
434 }
435
436 if (!sending) {
437 for (ctr = 0; ctr < n_recs; ctr++) {
438 /* Adjust the record to remove the explicit IV/MAC/Tag */
439 if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_GCM_MODE) {
440 recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN;
441 recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN;
442 recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
443 } else if (EVP_CIPHER_get_mode(enc) == EVP_CIPH_CCM_MODE) {
444 recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN;
445 recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN;
446 recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN;
447 } else if (bs != 1 && RLAYER_USE_EXPLICIT_IV(rl)) {
448 if (recs[ctr].length < bs)
449 return 0;
450 recs[ctr].data += bs;
451 recs[ctr].input += bs;
452 recs[ctr].length -= bs;
453 recs[ctr].orig_len -= bs;
454 }
455
456 /*
457 * If using Mac-then-encrypt, then this will succeed but
458 * with a random MAC if padding is invalid
459 */
460 if (!tls1_cbc_remove_padding_and_mac(&recs[ctr].length,
461 recs[ctr].orig_len,
462 recs[ctr].data,
463 (macs != NULL) ? &macs[ctr].mac : NULL,
464 (macs != NULL) ? &macs[ctr].alloced
465 : NULL,
466 bs,
467 pad ? (size_t)pad : macsize,
468 (EVP_CIPHER_get_flags(enc)
469 & EVP_CIPH_FLAG_AEAD_CIPHER)
470 != 0,
471 rl->libctx))
472 return 0;
473 }
474 }
475 }
476 return 1;
477 }
478
tls1_mac(OSSL_RECORD_LAYER * rl,TLS_RL_RECORD * rec,unsigned char * md,int sending)479 static int tls1_mac(OSSL_RECORD_LAYER *rl, TLS_RL_RECORD *rec, unsigned char *md,
480 int sending)
481 {
482 unsigned char *seq = rl->sequence;
483 EVP_MD_CTX *hash;
484 size_t md_size;
485 EVP_MD_CTX *hmac = NULL, *mac_ctx;
486 unsigned char header[13];
487 int t;
488 int ret = 0;
489
490 hash = rl->md_ctx;
491
492 t = EVP_MD_CTX_get_size(hash);
493 if (!ossl_assert(t >= 0))
494 return 0;
495 md_size = t;
496
497 if (rl->stream_mac) {
498 mac_ctx = hash;
499 } else {
500 hmac = EVP_MD_CTX_new();
501 if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) {
502 goto end;
503 }
504 mac_ctx = hmac;
505 }
506
507 if (!rl->isdtls
508 && rl->tlstree
509 && EVP_MD_CTX_ctrl(mac_ctx, EVP_MD_CTRL_TLSTREE, 0, seq) <= 0)
510 goto end;
511
512 if (rl->isdtls) {
513 unsigned char dtlsseq[8], *p = dtlsseq;
514
515 s2n(rl->epoch, p);
516 memcpy(p, &seq[2], 6);
517
518 memcpy(header, dtlsseq, 8);
519 } else {
520 memcpy(header, seq, 8);
521 }
522
523 header[8] = rec->type;
524 header[9] = (unsigned char)(rl->version >> 8);
525 header[10] = (unsigned char)(rl->version);
526 header[11] = (unsigned char)(rec->length >> 8);
527 header[12] = (unsigned char)(rec->length & 0xff);
528
529 if (!sending && !rl->use_etm
530 && EVP_CIPHER_CTX_get_mode(rl->enc_ctx) == EVP_CIPH_CBC_MODE
531 && ssl3_cbc_record_digest_supported(mac_ctx)) {
532 OSSL_PARAM tls_hmac_params[2], *p = tls_hmac_params;
533
534 *p++ = OSSL_PARAM_construct_size_t(OSSL_MAC_PARAM_TLS_DATA_SIZE,
535 &rec->orig_len);
536 *p++ = OSSL_PARAM_construct_end();
537
538 if (!EVP_PKEY_CTX_set_params(EVP_MD_CTX_get_pkey_ctx(mac_ctx),
539 tls_hmac_params))
540 goto end;
541 }
542
543 if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0
544 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0
545 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0)
546 goto end;
547
548 OSSL_TRACE_BEGIN(TLS)
549 {
550 BIO_printf(trc_out, "seq:\n");
551 BIO_dump_indent(trc_out, seq, 8, 4);
552 BIO_printf(trc_out, "rec:\n");
553 BIO_dump_indent(trc_out, rec->data, rec->length, 4);
554 }
555 OSSL_TRACE_END(TLS);
556
557 if (!rl->isdtls && !tls_increment_sequence_ctr(rl)) {
558 /* RLAYERfatal already called */
559 goto end;
560 }
561
562 OSSL_TRACE_BEGIN(TLS)
563 {
564 BIO_printf(trc_out, "md:\n");
565 BIO_dump_indent(trc_out, md, md_size, 4);
566 }
567 OSSL_TRACE_END(TLS);
568 ret = 1;
569 end:
570 EVP_MD_CTX_free(hmac);
571 return ret;
572 }
573
574 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
575 #ifndef OPENSSL_NO_COMP
576 #define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
577 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
578 + SSL3_RT_HEADER_LENGTH \
579 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
580 #else
581 #define MAX_PREFIX_LEN ((SSL3_ALIGN_PAYLOAD - 1) \
582 + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
583 + SSL3_RT_HEADER_LENGTH)
584 #endif /* OPENSSL_NO_COMP */
585 #else
586 #ifndef OPENSSL_NO_COMP
587 #define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
588 + SSL3_RT_HEADER_LENGTH \
589 + SSL3_RT_MAX_COMPRESSED_OVERHEAD)
590 #else
591 #define MAX_PREFIX_LEN (SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD \
592 + SSL3_RT_HEADER_LENGTH)
593 #endif /* OPENSSL_NO_COMP */
594 #endif
595
596 /* This function is also used by the SSLv3 implementation */
tls1_allocate_write_buffers(OSSL_RECORD_LAYER * rl,OSSL_RECORD_TEMPLATE * templates,size_t numtempl,size_t * prefix)597 int tls1_allocate_write_buffers(OSSL_RECORD_LAYER *rl,
598 OSSL_RECORD_TEMPLATE *templates,
599 size_t numtempl, size_t *prefix)
600 {
601 /* Do we need to add an empty record prefix? */
602 *prefix = rl->need_empty_fragments
603 && templates[0].type == SSL3_RT_APPLICATION_DATA;
604
605 /*
606 * In the prefix case we can allocate a much smaller buffer. Otherwise we
607 * just allocate the default buffer size
608 */
609 if (!tls_setup_write_buffer(rl, numtempl + *prefix,
610 *prefix ? MAX_PREFIX_LEN : 0, 0)) {
611 /* RLAYERfatal() already called */
612 return 0;
613 }
614
615 return 1;
616 }
617
618 /* This function is also used by the SSLv3 implementation */
tls1_initialise_write_packets(OSSL_RECORD_LAYER * rl,OSSL_RECORD_TEMPLATE * templates,size_t numtempl,OSSL_RECORD_TEMPLATE * prefixtempl,WPACKET * pkt,TLS_BUFFER * bufs,size_t * wpinited)619 int tls1_initialise_write_packets(OSSL_RECORD_LAYER *rl,
620 OSSL_RECORD_TEMPLATE *templates,
621 size_t numtempl,
622 OSSL_RECORD_TEMPLATE *prefixtempl,
623 WPACKET *pkt,
624 TLS_BUFFER *bufs,
625 size_t *wpinited)
626 {
627 size_t align = 0;
628 TLS_BUFFER *wb;
629 size_t prefix;
630
631 /* Do we need to add an empty record prefix? */
632 prefix = rl->need_empty_fragments
633 && templates[0].type == SSL3_RT_APPLICATION_DATA;
634
635 if (prefix) {
636 /*
637 * countermeasure against known-IV weakness in CBC ciphersuites (see
638 * http://www.openssl.org/~bodo/tls-cbc.txt)
639 */
640 prefixtempl->buf = NULL;
641 prefixtempl->version = templates[0].version;
642 prefixtempl->buflen = 0;
643 prefixtempl->type = SSL3_RT_APPLICATION_DATA;
644
645 wb = &bufs[0];
646
647 #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD != 0
648 align = (size_t)TLS_BUFFER_get_buf(wb) + SSL3_RT_HEADER_LENGTH;
649 align = SSL3_ALIGN_PAYLOAD - 1
650 - ((align - 1) % SSL3_ALIGN_PAYLOAD);
651 #endif
652 TLS_BUFFER_set_offset(wb, align);
653
654 if (!WPACKET_init_static_len(&pkt[0], TLS_BUFFER_get_buf(wb),
655 TLS_BUFFER_get_len(wb), 0)) {
656 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
657 return 0;
658 }
659 *wpinited = 1;
660 if (!WPACKET_allocate_bytes(&pkt[0], align, NULL)) {
661 RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
662 return 0;
663 }
664 }
665
666 return tls_initialise_write_packets_default(rl, templates, numtempl,
667 NULL,
668 pkt + prefix, bufs + prefix,
669 wpinited);
670 }
671
672 /* TLSv1.0, TLSv1.1 and TLSv1.2 all use the same funcs */
673 const struct record_functions_st tls_1_funcs = {
674 tls1_set_crypto_state,
675 tls1_cipher,
676 tls1_mac,
677 tls_default_set_protocol_version,
678 tls_default_read_n,
679 tls_get_more_records,
680 tls_default_validate_record_header,
681 tls_default_post_process_record,
682 tls_get_max_records_multiblock,
683 tls_write_records_multiblock, /* Defined in tls_multib.c */
684 tls1_allocate_write_buffers,
685 tls1_initialise_write_packets,
686 NULL,
687 tls_prepare_record_header_default,
688 NULL,
689 tls_prepare_for_encryption_default,
690 tls_post_encryption_processing_default,
691 NULL
692 };
693
694 const struct record_functions_st dtls_1_funcs = {
695 tls1_set_crypto_state,
696 tls1_cipher,
697 tls1_mac,
698 tls_default_set_protocol_version,
699 tls_default_read_n,
700 dtls_get_more_records,
701 NULL,
702 NULL,
703 NULL,
704 tls_write_records_default,
705 /*
706 * Don't use tls1_allocate_write_buffers since that handles empty fragment
707 * records which aren't needed in DTLS. We just use the default allocation
708 * instead.
709 */
710 tls_allocate_write_buffers_default,
711 /* Don't use tls1_initialise_write_packets for same reason as above */
712 tls_initialise_write_packets_default,
713 NULL,
714 dtls_prepare_record_header,
715 NULL,
716 tls_prepare_for_encryption_default,
717 dtls_post_encryption_processing,
718 NULL
719 };
720