1 /*
2 * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 #include <openssl/ssl.h>
11 #include "internal/quic_record_rx.h"
12 #include "quic_record_shared.h"
13 #include "internal/common.h"
14 #include "internal/list.h"
15 #include "../ssl_local.h"
16
17 /*
18 * Mark a packet in a bitfield.
19 *
20 * pkt_idx: index of packet within datagram.
21 */
pkt_mark(uint64_t * bitf,size_t pkt_idx)22 static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
23 {
24 assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
25 *bitf |= ((uint64_t)1) << pkt_idx;
26 }
27
28 /* Returns 1 if a packet is in the bitfield. */
pkt_is_marked(const uint64_t * bitf,size_t pkt_idx)29 static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
30 {
31 assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
32 return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
33 }
34
35 /*
36 * RXE
37 * ===
38 *
39 * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
40 * network. One RXE is used per received QUIC packet.
41 */
42 typedef struct rxe_st RXE;
43
44 struct rxe_st {
45 OSSL_QRX_PKT pkt;
46 OSSL_LIST_MEMBER(rxe, RXE);
47 size_t data_len, alloc_len, refcount;
48
49 /* Extra fields for per-packet information. */
50 QUIC_PKT_HDR hdr; /* data/len are decrypted payload */
51
52 /* Decoded packet number. */
53 QUIC_PN pn;
54
55 /* Addresses copied from URXE. */
56 BIO_ADDR peer, local;
57
58 /* Time we received the packet (not when we processed it). */
59 OSSL_TIME time;
60
61 /* Total length of the datagram which contained this packet. */
62 size_t datagram_len;
63
64 /*
65 * The key epoch the packet was received with. Always 0 for non-1-RTT
66 * packets.
67 */
68 uint64_t key_epoch;
69
70 /*
71 * Monotonically increases with each datagram received.
72 * For diagnostic use only.
73 */
74 uint64_t datagram_id;
75
76 /*
77 * alloc_len allocated bytes (of which data_len bytes are valid) follow this
78 * structure.
79 */
80 };
81
82 DEFINE_LIST_OF(rxe, RXE);
83 typedef OSSL_LIST(rxe) RXE_LIST;
84
rxe_data(const RXE * e)85 static ossl_inline unsigned char *rxe_data(const RXE *e)
86 {
87 return (unsigned char *)(e + 1);
88 }
89
90 /*
91 * QRL
92 * ===
93 */
94 struct ossl_qrx_st {
95 OSSL_LIB_CTX *libctx;
96 const char *propq;
97
98 /* Demux to receive datagrams from. */
99 QUIC_DEMUX *demux;
100
101 /* Length of connection IDs used in short-header packets in bytes. */
102 size_t short_conn_id_len;
103
104 /* Maximum number of deferred datagrams buffered at any one time. */
105 size_t max_deferred;
106
107 /* Current count of deferred datagrams. */
108 size_t num_deferred;
109
110 /*
111 * List of URXEs which are filled with received encrypted data.
112 * These are returned to the DEMUX's free list as they are processed.
113 */
114 QUIC_URXE_LIST urx_pending;
115
116 /*
117 * List of URXEs which we could not decrypt immediately and which are being
118 * kept in case they can be decrypted later.
119 */
120 QUIC_URXE_LIST urx_deferred;
121
122 /*
123 * List of RXEs which are not currently in use. These are moved
124 * to the pending list as they are filled.
125 */
126 RXE_LIST rx_free;
127
128 /*
129 * List of RXEs which are filled with decrypted packets ready to be passed
130 * to the user. A RXE is removed from all lists inside the QRL when passed
131 * to the user, then returned to the free list when the user returns it.
132 */
133 RXE_LIST rx_pending;
134
135 /* Largest PN we have received and processed in a given PN space. */
136 QUIC_PN largest_pn[QUIC_PN_SPACE_NUM];
137
138 /* Per encryption-level state. */
139 OSSL_QRL_ENC_LEVEL_SET el_set;
140
141 /* Bytes we have received since this counter was last cleared. */
142 uint64_t bytes_received;
143
144 /*
145 * Number of forged packets we have received since the QRX was instantiated.
146 * Note that as per RFC 9001, this is connection-level state; it is not per
147 * EL and is not reset by a key update.
148 */
149 uint64_t forged_pkt_count;
150
151 /*
152 * The PN the current key epoch started at, inclusive.
153 */
154 uint64_t cur_epoch_start_pn;
155
156 /* Validation callback. */
157 ossl_qrx_late_validation_cb *validation_cb;
158 void *validation_cb_arg;
159
160 /* Key update callback. */
161 ossl_qrx_key_update_cb *key_update_cb;
162 void *key_update_cb_arg;
163
164 /* Initial key phase. For debugging use only; always 0 in real use. */
165 unsigned char init_key_phase_bit;
166
167 /* Are we allowed to process 1-RTT packets yet? */
168 unsigned char allow_1rtt;
169
170 /* Message callback related arguments */
171 ossl_msg_cb msg_callback;
172 void *msg_callback_arg;
173 SSL *msg_callback_ssl;
174 };
175
176 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len);
177 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
178 const QUIC_CONN_ID *first_dcid);
179 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
180 const unsigned char **pptr, size_t buf_len);
181 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe);
182 static RXE *qrx_reserve_rxe(RXE_LIST *rxl, RXE *rxe, size_t n);
183 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
184 const unsigned char *src,
185 size_t src_len, size_t *dec_len,
186 const unsigned char *aad, size_t aad_len,
187 QUIC_PN pn, uint32_t enc_level,
188 unsigned char key_phase_bit,
189 uint64_t *rx_key_epoch);
190 static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe);
191 static uint32_t rxe_determine_pn_space(RXE *rxe);
192 static void ignore_res(int x);
193
ossl_qrx_new(const OSSL_QRX_ARGS * args)194 OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
195 {
196 OSSL_QRX *qrx;
197 size_t i;
198
199 if (args->demux == NULL || args->max_deferred == 0)
200 return NULL;
201
202 qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
203 if (qrx == NULL)
204 return NULL;
205
206 for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
207 qrx->largest_pn[i] = args->init_largest_pn[i];
208
209 qrx->libctx = args->libctx;
210 qrx->propq = args->propq;
211 qrx->demux = args->demux;
212 qrx->short_conn_id_len = args->short_conn_id_len;
213 qrx->init_key_phase_bit = args->init_key_phase_bit;
214 qrx->max_deferred = args->max_deferred;
215 return qrx;
216 }
217
qrx_cleanup_rxl(RXE_LIST * l)218 static void qrx_cleanup_rxl(RXE_LIST *l)
219 {
220 RXE *e, *enext;
221
222 for (e = ossl_list_rxe_head(l); e != NULL; e = enext) {
223 enext = ossl_list_rxe_next(e);
224 ossl_list_rxe_remove(l, e);
225 OPENSSL_free(e);
226 }
227 }
228
qrx_cleanup_urxl(OSSL_QRX * qrx,QUIC_URXE_LIST * l)229 static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
230 {
231 QUIC_URXE *e, *enext;
232
233 for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
234 enext = ossl_list_urxe_next(e);
235 ossl_list_urxe_remove(l, e);
236 ossl_quic_demux_release_urxe(qrx->demux, e);
237 }
238 }
239
ossl_qrx_update_pn_space(OSSL_QRX * src,OSSL_QRX * dst)240 void ossl_qrx_update_pn_space(OSSL_QRX *src, OSSL_QRX *dst)
241 {
242 size_t i;
243
244 for (i = 0; i < QUIC_PN_SPACE_NUM; i++)
245 dst->largest_pn[i] = src->largest_pn[i];
246
247 return;
248 }
249
ossl_qrx_free(OSSL_QRX * qrx)250 void ossl_qrx_free(OSSL_QRX *qrx)
251 {
252 uint32_t i;
253
254 if (qrx == NULL)
255 return;
256
257 /* Free RXE queue data. */
258 qrx_cleanup_rxl(&qrx->rx_free);
259 qrx_cleanup_rxl(&qrx->rx_pending);
260 qrx_cleanup_urxl(qrx, &qrx->urx_pending);
261 qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
262
263 /* Drop keying material and crypto resources. */
264 for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
265 ossl_qrl_enc_level_set_discard(&qrx->el_set, i);
266
267 OPENSSL_free(qrx);
268 }
269
ossl_qrx_inject_urxe(OSSL_QRX * qrx,QUIC_URXE * urxe)270 void ossl_qrx_inject_urxe(OSSL_QRX *qrx, QUIC_URXE *urxe)
271 {
272 /* Initialize our own fields inside the URXE and add to the pending list. */
273 urxe->processed = 0;
274 urxe->hpr_removed = 0;
275 urxe->deferred = 0;
276 ossl_list_urxe_insert_tail(&qrx->urx_pending, urxe);
277
278 if (qrx->msg_callback != NULL)
279 qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM, urxe + 1,
280 urxe->data_len, qrx->msg_callback_ssl,
281 qrx->msg_callback_arg);
282 }
283
ossl_qrx_inject_pkt(OSSL_QRX * qrx,OSSL_QRX_PKT * pkt)284 void ossl_qrx_inject_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT *pkt)
285 {
286 RXE *rxe = (RXE *)pkt;
287
288 /*
289 * port_default_packet_handler() uses ossl_qrx_read_pkt()
290 * to get pkt. Such packet has refcount 1.
291 */
292 ossl_qrx_pkt_orphan(pkt);
293 if (ossl_assert(rxe->refcount == 0))
294 ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
295 }
296
297 /*
298 * qrx_validate_initial_pkt() is derived from qrx_process_pkt(). Unlike
299 * qrx_process_pkt() the qrx_validate_initial_pkt() function can process
300 * initial packet only. All other packets should be discarded. This allows
301 * port_default_packet_handler() to validate incoming packet. If packet
302 * is not valid, then port_default_packet_handler() must discard the
303 * packet instead of creating a new channel for it.
304 */
qrx_validate_initial_pkt(OSSL_QRX * qrx,QUIC_URXE * urxe,const QUIC_CONN_ID * first_dcid,size_t datagram_len)305 static int qrx_validate_initial_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
306 const QUIC_CONN_ID *first_dcid,
307 size_t datagram_len)
308 {
309 PACKET pkt, orig_pkt;
310 RXE *rxe;
311 size_t i = 0, aad_len = 0, dec_len = 0;
312 const unsigned char *sop;
313 unsigned char *dst;
314 QUIC_PKT_HDR_PTRS ptrs;
315 uint32_t pn_space;
316 OSSL_QRL_ENC_LEVEL *el = NULL;
317 uint64_t rx_key_epoch = UINT64_MAX;
318
319 if (!PACKET_buf_init(&pkt, ossl_quic_urxe_data(urxe), urxe->data_len))
320 return 0;
321
322 orig_pkt = pkt;
323 sop = PACKET_data(&pkt);
324
325 /*
326 * Get a free RXE. If we need to allocate a new one, use the packet length
327 * as a good ballpark figure.
328 */
329 rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(&pkt));
330 if (rxe == NULL)
331 return 0;
332
333 /*
334 * we expect INITIAL packet only, therefore it is OK to pass
335 * short_conn_id_len as 0.
336 */
337 if (!ossl_quic_wire_decode_pkt_hdr(&pkt,
338 0, /* short_conn_id_len */
339 1, /* need second decode */
340 0, /* nodata -> want to read data */
341 &rxe->hdr, &ptrs,
342 NULL))
343 goto malformed;
344
345 if (rxe->hdr.type != QUIC_PKT_TYPE_INITIAL)
346 goto malformed;
347
348 if (!qrx_validate_hdr_early(qrx, rxe, NULL))
349 goto malformed;
350
351 if (ossl_qrl_enc_level_set_have_el(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL) != 1)
352 goto malformed;
353
354 if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
355 const unsigned char *token = rxe->hdr.token;
356
357 /*
358 * This may change the value of rxe and change the value of the token
359 * pointer as well. So we must make a temporary copy of the pointer to
360 * the token, and then copy it back into the new location of the rxe
361 */
362 if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
363 goto malformed;
364
365 rxe->hdr.token = token;
366 }
367
368 pkt = orig_pkt;
369
370 el = ossl_qrl_enc_level_set_get(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL, 1);
371 assert(el != NULL); /* Already checked above */
372
373 if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
374 goto malformed;
375
376 /*
377 * We have removed header protection, so don't attempt to do it again if
378 * the packet gets deferred and processed again.
379 */
380 pkt_mark(&urxe->hpr_removed, 0);
381
382 /* Decode the now unprotected header. */
383 if (ossl_quic_wire_decode_pkt_hdr(&pkt, 0,
384 0, 0, &rxe->hdr, NULL, NULL)
385 != 1)
386 goto malformed;
387
388 /* Validate header and decode PN. */
389 if (!qrx_validate_hdr(qrx, rxe))
390 goto malformed;
391
392 /*
393 * The AAD data is the entire (unprotected) packet header including the PN.
394 * The packet header has been unprotected in place, so we can just reuse the
395 * PACKET buffer. The header ends where the payload begins.
396 */
397 aad_len = rxe->hdr.data - sop;
398
399 /* Ensure the RXE buffer size is adequate for our payload. */
400 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL)
401 goto malformed;
402
403 /*
404 * We decrypt the packet body to immediately after the token at the start of
405 * the RXE buffer (where present).
406 *
407 * Do the decryption from the PACKET (which points into URXE memory) to our
408 * RXE payload (single-copy decryption), then fixup the pointers in the
409 * header to point to our new buffer.
410 *
411 * If decryption fails this is considered a permanent error; we defer
412 * packets we don't yet have decryption keys for above, so if this fails,
413 * something has gone wrong with the handshake process or a packet has been
414 * corrupted.
415 */
416 dst = (unsigned char *)rxe_data(rxe) + i;
417 if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
418 &dec_len, sop, aad_len, rxe->pn, QUIC_ENC_LEVEL_INITIAL,
419 rxe->hdr.key_phase, &rx_key_epoch))
420 goto malformed;
421
422 /*
423 * -----------------------------------------------------
424 * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
425 * AND MUST BE TIMING-CHANNEL SAFE.
426 * -----------------------------------------------------
427 *
428 * At this point, we have successfully authenticated the AEAD tag and no
429 * longer need to worry about exposing the PN, PN length or Key Phase bit in
430 * timing channels. Invoke any configured validation callback to allow for
431 * rejection of duplicate PNs.
432 */
433 if (!qrx_validate_hdr_late(qrx, rxe))
434 goto malformed;
435
436 pkt_mark(&urxe->processed, 0);
437
438 /*
439 * Update header to point to the decrypted buffer, which may be shorter
440 * due to AEAD tags, block padding, etc.
441 */
442 rxe->hdr.data = dst;
443 rxe->hdr.len = dec_len;
444 rxe->data_len = dec_len;
445 rxe->datagram_len = datagram_len;
446 rxe->key_epoch = rx_key_epoch;
447
448 /* We processed the PN successfully, so update largest processed PN. */
449 pn_space = rxe_determine_pn_space(rxe);
450 if (rxe->pn > qrx->largest_pn[pn_space])
451 qrx->largest_pn[pn_space] = rxe->pn;
452
453 /* Copy across network addresses and RX time from URXE to RXE. */
454 rxe->peer = urxe->peer;
455 rxe->local = urxe->local;
456 rxe->time = urxe->time;
457 rxe->datagram_id = urxe->datagram_id;
458
459 /*
460 * The packet is decrypted, we are going to move it from
461 * rx_pending queue where it waits to be further processed
462 * by ch_rx().
463 */
464 ossl_list_rxe_remove(&qrx->rx_free, rxe);
465 ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
466
467 return 1;
468
469 malformed:
470 /* caller (port_default_packet_handler()) should discard urxe */
471 return 0;
472 }
473
ossl_qrx_validate_initial_packet(OSSL_QRX * qrx,QUIC_URXE * urxe,const QUIC_CONN_ID * dcid)474 int ossl_qrx_validate_initial_packet(OSSL_QRX *qrx, QUIC_URXE *urxe,
475 const QUIC_CONN_ID *dcid)
476 {
477 urxe->processed = 0;
478 urxe->hpr_removed = 0;
479 urxe->deferred = 0;
480
481 return qrx_validate_initial_pkt(qrx, urxe, dcid, urxe->data_len);
482 }
483
qrx_requeue_deferred(OSSL_QRX * qrx)484 static void qrx_requeue_deferred(OSSL_QRX *qrx)
485 {
486 QUIC_URXE *e;
487
488 while ((e = ossl_list_urxe_head(&qrx->urx_deferred)) != NULL) {
489 ossl_list_urxe_remove(&qrx->urx_deferred, e);
490 ossl_list_urxe_insert_tail(&qrx->urx_pending, e);
491 }
492 }
493
ossl_qrx_provide_secret(OSSL_QRX * qrx,uint32_t enc_level,uint32_t suite_id,EVP_MD * md,const unsigned char * secret,size_t secret_len)494 int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
495 uint32_t suite_id, EVP_MD *md,
496 const unsigned char *secret, size_t secret_len)
497 {
498 if (enc_level >= QUIC_ENC_LEVEL_NUM)
499 return 0;
500
501 if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
502 qrx->libctx,
503 qrx->propq,
504 enc_level,
505 suite_id,
506 md,
507 secret,
508 secret_len,
509 qrx->init_key_phase_bit,
510 /*is_tx=*/0))
511 return 0;
512
513 /*
514 * Any packets we previously could not decrypt, we may now be able to
515 * decrypt, so move any datagrams containing deferred packets from the
516 * deferred to the pending queue.
517 */
518 qrx_requeue_deferred(qrx);
519 return 1;
520 }
521
ossl_qrx_discard_enc_level(OSSL_QRX * qrx,uint32_t enc_level)522 int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
523 {
524 if (enc_level >= QUIC_ENC_LEVEL_NUM)
525 return 0;
526
527 ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level);
528 return 1;
529 }
530
531 /* Returns 1 if there are one or more pending RXEs. */
ossl_qrx_processed_read_pending(OSSL_QRX * qrx)532 int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
533 {
534 return !ossl_list_rxe_is_empty(&qrx->rx_pending);
535 }
536
537 /* Returns 1 if there are yet-unprocessed packets. */
ossl_qrx_unprocessed_read_pending(OSSL_QRX * qrx)538 int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
539 {
540 return !ossl_list_urxe_is_empty(&qrx->urx_pending)
541 || !ossl_list_urxe_is_empty(&qrx->urx_deferred);
542 }
543
544 /* Pop the next pending RXE. Returns NULL if no RXE is pending. */
qrx_pop_pending_rxe(OSSL_QRX * qrx)545 static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
546 {
547 RXE *rxe = ossl_list_rxe_head(&qrx->rx_pending);
548
549 if (rxe == NULL)
550 return NULL;
551
552 ossl_list_rxe_remove(&qrx->rx_pending, rxe);
553 return rxe;
554 }
555
556 /* Allocate a new RXE. */
qrx_alloc_rxe(size_t alloc_len)557 static RXE *qrx_alloc_rxe(size_t alloc_len)
558 {
559 RXE *rxe;
560
561 if (alloc_len >= SIZE_MAX - sizeof(RXE))
562 return NULL;
563
564 rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
565 if (rxe == NULL)
566 return NULL;
567
568 ossl_list_rxe_init_elem(rxe);
569 rxe->alloc_len = alloc_len;
570 rxe->data_len = 0;
571 rxe->refcount = 0;
572 return rxe;
573 }
574
575 /*
576 * Ensures there is at least one RXE in the RX free list, allocating a new entry
577 * if necessary. The returned RXE is in the RX free list; it is not popped.
578 *
579 * alloc_len is a hint which may be used to determine the RXE size if allocation
580 * is necessary. Returns NULL on allocation failure.
581 */
qrx_ensure_free_rxe(OSSL_QRX * qrx,size_t alloc_len)582 static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
583 {
584 RXE *rxe;
585
586 if (ossl_list_rxe_head(&qrx->rx_free) != NULL)
587 return ossl_list_rxe_head(&qrx->rx_free);
588
589 rxe = qrx_alloc_rxe(alloc_len);
590 if (rxe == NULL)
591 return NULL;
592
593 ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
594 return rxe;
595 }
596
597 /*
598 * Resize the data buffer attached to an RXE to be n bytes in size. The address
599 * of the RXE might change; the new address is returned, or NULL on failure, in
600 * which case the original RXE remains valid.
601 */
qrx_resize_rxe(RXE_LIST * rxl,RXE * rxe,size_t n)602 static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
603 {
604 RXE *rxe2, *p;
605
606 /* Should never happen. */
607 if (rxe == NULL)
608 return NULL;
609
610 if (n >= SIZE_MAX - sizeof(RXE))
611 return NULL;
612
613 /* Remove the item from the list to avoid accessing freed memory */
614 p = ossl_list_rxe_prev(rxe);
615 ossl_list_rxe_remove(rxl, rxe);
616
617 /* Should never resize an RXE which has been handed out. */
618 if (!ossl_assert(rxe->refcount == 0))
619 return NULL;
620
621 /*
622 * NOTE: We do not clear old memory, although it does contain decrypted
623 * data.
624 */
625 rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
626 if (rxe2 == NULL) {
627 /* Resize failed, restore old allocation. */
628 if (p == NULL)
629 ossl_list_rxe_insert_head(rxl, rxe);
630 else
631 ossl_list_rxe_insert_after(rxl, p, rxe);
632 return NULL;
633 }
634
635 if (p == NULL)
636 ossl_list_rxe_insert_head(rxl, rxe2);
637 else
638 ossl_list_rxe_insert_after(rxl, p, rxe2);
639
640 rxe2->alloc_len = n;
641 return rxe2;
642 }
643
644 /*
645 * Ensure the data buffer attached to an RXE is at least n bytes in size.
646 * Returns NULL on failure.
647 */
qrx_reserve_rxe(RXE_LIST * rxl,RXE * rxe,size_t n)648 static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
649 RXE *rxe, size_t n)
650 {
651 if (rxe->alloc_len >= n)
652 return rxe;
653
654 return qrx_resize_rxe(rxl, rxe, n);
655 }
656
657 /* Return a RXE handed out to the user back to our freelist. */
qrx_recycle_rxe(OSSL_QRX * qrx,RXE * rxe)658 static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
659 {
660 /* RXE should not be in any list */
661 assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
662 rxe->pkt.hdr = NULL;
663 rxe->pkt.peer = NULL;
664 rxe->pkt.local = NULL;
665 ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
666 }
667
668 /*
669 * Given a pointer to a pointer pointing to a buffer and the size of that
670 * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
671 * pointer may change due to realloc). *pi is the offset in bytes to copy the
672 * buffer to, and on success is updated to be the offset pointing after the
673 * copied buffer. *pptr is updated to point to the new location of the buffer.
674 */
qrx_relocate_buffer(OSSL_QRX * qrx,RXE ** prxe,size_t * pi,const unsigned char ** pptr,size_t buf_len)675 static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
676 const unsigned char **pptr, size_t buf_len)
677 {
678 RXE *rxe;
679 unsigned char *dst;
680
681 if (!buf_len)
682 return 1;
683
684 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
685 return 0;
686
687 *prxe = rxe;
688 dst = (unsigned char *)rxe_data(rxe) + *pi;
689
690 memcpy(dst, *pptr, buf_len);
691 *pi += buf_len;
692 *pptr = dst;
693 return 1;
694 }
695
qrx_determine_enc_level(const QUIC_PKT_HDR * hdr)696 static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
697 {
698 switch (hdr->type) {
699 case QUIC_PKT_TYPE_INITIAL:
700 return QUIC_ENC_LEVEL_INITIAL;
701 case QUIC_PKT_TYPE_HANDSHAKE:
702 return QUIC_ENC_LEVEL_HANDSHAKE;
703 case QUIC_PKT_TYPE_0RTT:
704 return QUIC_ENC_LEVEL_0RTT;
705 case QUIC_PKT_TYPE_1RTT:
706 return QUIC_ENC_LEVEL_1RTT;
707
708 default:
709 assert(0);
710 case QUIC_PKT_TYPE_RETRY:
711 case QUIC_PKT_TYPE_VERSION_NEG:
712 return QUIC_ENC_LEVEL_INITIAL; /* not used */
713 }
714 }
715
rxe_determine_pn_space(RXE * rxe)716 static uint32_t rxe_determine_pn_space(RXE *rxe)
717 {
718 uint32_t enc_level;
719
720 enc_level = qrx_determine_enc_level(&rxe->hdr);
721 return ossl_quic_enc_level_to_pn_space(enc_level);
722 }
723
qrx_validate_hdr_early(OSSL_QRX * qrx,RXE * rxe,const QUIC_CONN_ID * first_dcid)724 static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
725 const QUIC_CONN_ID *first_dcid)
726 {
727 /* Ensure version is what we want. */
728 if (rxe->hdr.version != QUIC_VERSION_1
729 && rxe->hdr.version != QUIC_VERSION_NONE)
730 return 0;
731
732 /* Clients should never receive 0-RTT packets. */
733 if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
734 return 0;
735
736 /* Version negotiation and retry packets must be the first packet. */
737 if (first_dcid != NULL && !ossl_quic_pkt_type_can_share_dgram(rxe->hdr.type))
738 return 0;
739
740 /*
741 * If this is not the first packet in a datagram, the destination connection
742 * ID must match the one in that packet.
743 */
744 if (first_dcid != NULL) {
745 if (!ossl_assert(first_dcid->id_len < QUIC_MAX_CONN_ID_LEN)
746 || !ossl_quic_conn_id_eq(first_dcid,
747 &rxe->hdr.dst_conn_id))
748 return 0;
749 }
750
751 return 1;
752 }
753
754 /* Validate header and decode PN. */
qrx_validate_hdr(OSSL_QRX * qrx,RXE * rxe)755 static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
756 {
757 int pn_space = rxe_determine_pn_space(rxe);
758
759 if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
760 qrx->largest_pn[pn_space],
761 &rxe->pn))
762 return 0;
763
764 return 1;
765 }
766
767 /* Late packet header validation. */
qrx_validate_hdr_late(OSSL_QRX * qrx,RXE * rxe)768 static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe)
769 {
770 int pn_space = rxe_determine_pn_space(rxe);
771
772 /*
773 * Allow our user to decide whether to discard the packet before we try and
774 * decrypt it.
775 */
776 if (qrx->validation_cb != NULL
777 && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
778 return 0;
779
780 return 1;
781 }
782
783 /*
784 * Retrieves the correct cipher context for an EL and key phase. Writes the key
785 * epoch number actually used for packet decryption to *rx_key_epoch.
786 */
qrx_get_cipher_ctx_idx(OSSL_QRX * qrx,OSSL_QRL_ENC_LEVEL * el,uint32_t enc_level,unsigned char key_phase_bit,uint64_t * rx_key_epoch,int * is_old_key)787 static size_t qrx_get_cipher_ctx_idx(OSSL_QRX *qrx, OSSL_QRL_ENC_LEVEL *el,
788 uint32_t enc_level,
789 unsigned char key_phase_bit,
790 uint64_t *rx_key_epoch,
791 int *is_old_key)
792 {
793 size_t idx;
794
795 *is_old_key = 0;
796
797 if (enc_level != QUIC_ENC_LEVEL_1RTT) {
798 *rx_key_epoch = 0;
799 return 0;
800 }
801
802 if (!ossl_assert(key_phase_bit <= 1))
803 return SIZE_MAX;
804
805 /*
806 * RFC 9001 requires that we not create timing channels which could reveal
807 * the decrypted value of the Key Phase bit. We usually handle this by
808 * keeping the cipher contexts for both the current and next key epochs
809 * around, so that we just select a cipher context blindly using the key
810 * phase bit, which is time-invariant.
811 *
812 * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
813 * suggests an implementation strategy to avoid creating a timing channel in
814 * this case:
815 *
816 * Endpoints can use randomized packet protection keys in place of
817 * discarded keys when key updates are not yet permitted.
818 *
819 * Rather than use a randomised key, we simply use our existing key as it
820 * will fail AEAD verification anyway. This avoids the need to keep around a
821 * dedicated garbage key.
822 *
823 * Note: Accessing different cipher contexts is technically not
824 * timing-channel safe due to microarchitectural side channels, but this is
825 * the best we can reasonably do and appears to be directly suggested by the
826 * RFC.
827 */
828 idx = (el->state == QRL_EL_STATE_PROV_COOLDOWN ? el->key_epoch & 1
829 : key_phase_bit);
830
831 /*
832 * We also need to determine the key epoch number which this index
833 * corresponds to. This is so we can report the key epoch number in the
834 * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
835 * for a packet to be sent using a given key epoch's keys.
836 */
837 switch (el->state) {
838 case QRL_EL_STATE_PROV_NORMAL:
839 /*
840 * If we are in the NORMAL state, usually the KP bit will match the LSB
841 * of our key epoch, meaning no new key update is being signalled. If it
842 * does not match, this means the packet (purports to) belong to
843 * the next key epoch.
844 *
845 * IMPORTANT: The AEAD tag has not been verified yet when this function
846 * is called, so this code must be timing-channel safe, hence use of
847 * XOR. Moreover, the value output below is not yet authenticated.
848 */
849 *rx_key_epoch
850 = el->key_epoch + ((el->key_epoch & 1) ^ (uint64_t)key_phase_bit);
851 break;
852
853 case QRL_EL_STATE_PROV_UPDATING:
854 /*
855 * If we are in the UPDATING state, usually the KP bit will match the
856 * LSB of our key epoch. If it does not match, this means that the
857 * packet (purports to) belong to the previous key epoch.
858 *
859 * As above, must be timing-channel safe.
860 */
861 *is_old_key = (el->key_epoch & 1) ^ (uint64_t)key_phase_bit;
862 *rx_key_epoch = el->key_epoch - (uint64_t)*is_old_key;
863 break;
864
865 case QRL_EL_STATE_PROV_COOLDOWN:
866 /*
867 * If we are in COOLDOWN, there is only one key epoch we can possibly
868 * decrypt with, so just try that. If AEAD decryption fails, the
869 * value we output here isn't used anyway.
870 */
871 *rx_key_epoch = el->key_epoch;
872 break;
873 }
874
875 return idx;
876 }
877
878 /*
879 * Tries to decrypt a packet payload.
880 *
881 * Returns 1 on success or 0 on failure (which is permanent). The payload is
882 * decrypted from src and written to dst. The buffer dst must be of at least
883 * src_len bytes in length. The actual length of the output in bytes is written
884 * to *dec_len on success, which will always be equal to or less than (usually
885 * less than) src_len.
886 */
qrx_decrypt_pkt_body(OSSL_QRX * qrx,unsigned char * dst,const unsigned char * src,size_t src_len,size_t * dec_len,const unsigned char * aad,size_t aad_len,QUIC_PN pn,uint32_t enc_level,unsigned char key_phase_bit,uint64_t * rx_key_epoch)887 static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
888 const unsigned char *src,
889 size_t src_len, size_t *dec_len,
890 const unsigned char *aad, size_t aad_len,
891 QUIC_PN pn, uint32_t enc_level,
892 unsigned char key_phase_bit,
893 uint64_t *rx_key_epoch)
894 {
895 int l = 0, l2 = 0, is_old_key, nonce_len;
896 unsigned char nonce[EVP_MAX_IV_LENGTH];
897 size_t i, cctx_idx;
898 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
899 enc_level, 1);
900 EVP_CIPHER_CTX *cctx;
901
902 if (src_len > INT_MAX || aad_len > INT_MAX)
903 return 0;
904
905 /* We should not have been called if we do not have key material. */
906 if (!ossl_assert(el != NULL))
907 return 0;
908
909 if (el->tag_len >= src_len)
910 return 0;
911
912 /*
913 * If we have failed to authenticate a certain number of ciphertexts, refuse
914 * to decrypt any more ciphertexts.
915 */
916 if (qrx->forged_pkt_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
917 return 0;
918
919 cctx_idx = qrx_get_cipher_ctx_idx(qrx, el, enc_level, key_phase_bit,
920 rx_key_epoch, &is_old_key);
921 if (!ossl_assert(cctx_idx < OSSL_NELEM(el->cctx)))
922 return 0;
923
924 if (is_old_key && pn >= qrx->cur_epoch_start_pn)
925 /*
926 * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
927 * a given PN, it MUST discard all packets in the same PN space with
928 * higher PNs if they cannot be successfully unprotected with the same
929 * key, or -- if there is a key update -- a subsequent packet protection
930 * key.
931 *
932 * In other words, once a PN x triggers a KU, it is invalid for us to
933 * receive a packet with a newer PN y (y > x) using the old keys.
934 */
935 return 0;
936
937 cctx = el->cctx[cctx_idx];
938
939 /* Construct nonce (nonce=IV ^ PN). */
940 nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
941 if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN)))
942 return 0;
943
944 memcpy(nonce, el->iv[cctx_idx], nonce_len);
945 for (i = 0; i < sizeof(QUIC_PN); ++i)
946 nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
947
948 /* type and key will already have been setup; feed the IV. */
949 if (EVP_CipherInit_ex(cctx, NULL,
950 NULL, NULL, nonce, /*enc=*/0)
951 != 1)
952 return 0;
953
954 /* Feed the AEAD tag we got so the cipher can validate it. */
955 if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_SET_TAG,
956 el->tag_len,
957 (unsigned char *)src + src_len - el->tag_len)
958 != 1)
959 return 0;
960
961 /* Feed AAD data. */
962 if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
963 return 0;
964
965 /* Feed encrypted packet body. */
966 if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
967 return 0;
968
969 #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
970 /*
971 * Throw away what we just decrypted and just use the ciphertext instead
972 * (which should be unencrypted)
973 */
974 memcpy(dst, src, l);
975
976 /* Pretend to authenticate the tag but ignore it */
977 if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
978 /* We don't care */
979 }
980 #else
981 /* Ensure authentication succeeded. */
982 if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
983 /* Authentication failed, increment failed auth counter. */
984 ++qrx->forged_pkt_count;
985 return 0;
986 }
987 #endif
988
989 *dec_len = l;
990 return 1;
991 }
992
ignore_res(int x)993 static ossl_inline void ignore_res(int x)
994 {
995 /* No-op. */
996 }
997
qrx_key_update_initiated(OSSL_QRX * qrx,QUIC_PN pn)998 static void qrx_key_update_initiated(OSSL_QRX *qrx, QUIC_PN pn)
999 {
1000 if (!ossl_qrl_enc_level_set_key_update(&qrx->el_set, QUIC_ENC_LEVEL_1RTT))
1001 /* We are already in RXKU, so we don't call the callback again. */
1002 return;
1003
1004 qrx->cur_epoch_start_pn = pn;
1005
1006 if (qrx->key_update_cb != NULL)
1007 qrx->key_update_cb(pn, qrx->key_update_cb_arg);
1008 }
1009
1010 /* Process a single packet in a datagram. */
qrx_process_pkt(OSSL_QRX * qrx,QUIC_URXE * urxe,PACKET * pkt,size_t pkt_idx,QUIC_CONN_ID * first_dcid,size_t datagram_len)1011 static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
1012 PACKET *pkt, size_t pkt_idx,
1013 QUIC_CONN_ID *first_dcid,
1014 size_t datagram_len)
1015 {
1016 RXE *rxe;
1017 const unsigned char *eop = NULL;
1018 size_t i, aad_len = 0, dec_len = 0;
1019 PACKET orig_pkt = *pkt;
1020 const unsigned char *sop = PACKET_data(pkt);
1021 unsigned char *dst;
1022 char need_second_decode = 0, already_processed = 0;
1023 QUIC_PKT_HDR_PTRS ptrs;
1024 uint32_t pn_space, enc_level;
1025 OSSL_QRL_ENC_LEVEL *el = NULL;
1026 uint64_t rx_key_epoch = UINT64_MAX;
1027
1028 /*
1029 * Get a free RXE. If we need to allocate a new one, use the packet length
1030 * as a good ballpark figure.
1031 */
1032 rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
1033 if (rxe == NULL)
1034 return 0;
1035
1036 /* Have we already processed this packet? */
1037 if (pkt_is_marked(&urxe->processed, pkt_idx))
1038 already_processed = 1;
1039
1040 /*
1041 * Decode the header into the RXE structure. We first decrypt and read the
1042 * unprotected part of the packet header (unless we already removed header
1043 * protection, in which case we decode all of it).
1044 */
1045 need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
1046 if (!ossl_quic_wire_decode_pkt_hdr(pkt,
1047 qrx->short_conn_id_len,
1048 need_second_decode, 0, &rxe->hdr, &ptrs,
1049 NULL))
1050 goto malformed;
1051
1052 /*
1053 * Our successful decode above included an intelligible length and the
1054 * PACKET is now pointing to the end of the QUIC packet.
1055 */
1056 eop = PACKET_data(pkt);
1057
1058 /*
1059 * Make a note of the first packet's DCID so we can later ensure the
1060 * destination connection IDs of all packets in a datagram match.
1061 */
1062 if (pkt_idx == 0)
1063 *first_dcid = rxe->hdr.dst_conn_id;
1064
1065 /*
1066 * Early header validation. Since we now know the packet length, we can also
1067 * now skip over it if we already processed it.
1068 */
1069 if (already_processed
1070 || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : first_dcid))
1071 /*
1072 * Already processed packets are handled identically to malformed
1073 * packets; i.e., they are ignored.
1074 */
1075 goto malformed;
1076
1077 if (!ossl_quic_pkt_type_is_encrypted(rxe->hdr.type)) {
1078 /*
1079 * Version negotiation and retry packets are a special case. They do not
1080 * contain a payload which needs decrypting and have no header
1081 * protection.
1082 */
1083
1084 /* Just copy the payload from the URXE to the RXE. */
1085 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
1086 /*
1087 * Allocation failure. EOP will be pointing to the end of the
1088 * datagram so processing of this datagram will end here.
1089 */
1090 goto malformed;
1091
1092 /* We are now committed to returning the packet. */
1093 memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
1094 pkt_mark(&urxe->processed, pkt_idx);
1095
1096 rxe->hdr.data = rxe_data(rxe);
1097 rxe->pn = QUIC_PN_INVALID;
1098
1099 rxe->data_len = rxe->hdr.len;
1100 rxe->datagram_len = datagram_len;
1101 rxe->key_epoch = 0;
1102 rxe->peer = urxe->peer;
1103 rxe->local = urxe->local;
1104 rxe->time = urxe->time;
1105 rxe->datagram_id = urxe->datagram_id;
1106
1107 /* Move RXE to pending. */
1108 ossl_list_rxe_remove(&qrx->rx_free, rxe);
1109 ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
1110 return 0; /* success, did not defer */
1111 }
1112
1113 /* Determine encryption level of packet. */
1114 enc_level = qrx_determine_enc_level(&rxe->hdr);
1115
1116 /* If we do not have keying material for this encryption level yet, defer. */
1117 switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
1118 case 1:
1119 /* We have keys. */
1120 if (enc_level == QUIC_ENC_LEVEL_1RTT && !qrx->allow_1rtt)
1121 /*
1122 * But we cannot process 1-RTT packets until the handshake is
1123 * completed (RFC 9000 s. 5.7).
1124 */
1125 goto cannot_decrypt;
1126
1127 break;
1128 case 0:
1129 /* No keys yet. */
1130 goto cannot_decrypt;
1131 default:
1132 /* We already discarded keys for this EL, we will never process this.*/
1133 goto malformed;
1134 }
1135
1136 /*
1137 * We will copy any token included in the packet to the start of our RXE
1138 * data buffer (so that we don't reference the URXE buffer any more and can
1139 * recycle it). Track our position in the RXE buffer by index instead of
1140 * pointer as the pointer may change as reallocs occur.
1141 */
1142 i = 0;
1143
1144 /*
1145 * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
1146 * also has fields pointing into the PACKET buffer which will be going away
1147 * soon (the URXE will be reused for another incoming packet).
1148 *
1149 * Firstly, relocate some of these fields into the RXE as needed.
1150 *
1151 * Relocate token buffer and fix pointer.
1152 */
1153 if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
1154 const unsigned char *token = rxe->hdr.token;
1155
1156 /*
1157 * This may change the value of rxe and change the value of the token
1158 * pointer as well. So we must make a temporary copy of the pointer to
1159 * the token, and then copy it back into the new location of the rxe
1160 */
1161 if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
1162 goto malformed;
1163
1164 rxe->hdr.token = token;
1165 }
1166
1167 /* Now remove header protection. */
1168 *pkt = orig_pkt;
1169
1170 el = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
1171 assert(el != NULL); /* Already checked above */
1172
1173 if (need_second_decode) {
1174 if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
1175 goto malformed;
1176
1177 /*
1178 * We have removed header protection, so don't attempt to do it again if
1179 * the packet gets deferred and processed again.
1180 */
1181 pkt_mark(&urxe->hpr_removed, pkt_idx);
1182
1183 /* Decode the now unprotected header. */
1184 if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
1185 0, 0, &rxe->hdr, NULL, NULL)
1186 != 1)
1187 goto malformed;
1188 }
1189
1190 /* Validate header and decode PN. */
1191 if (!qrx_validate_hdr(qrx, rxe))
1192 goto malformed;
1193
1194 if (qrx->msg_callback != NULL)
1195 qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, sop,
1196 eop - sop - rxe->hdr.len, qrx->msg_callback_ssl,
1197 qrx->msg_callback_arg);
1198
1199 /*
1200 * The AAD data is the entire (unprotected) packet header including the PN.
1201 * The packet header has been unprotected in place, so we can just reuse the
1202 * PACKET buffer. The header ends where the payload begins.
1203 */
1204 aad_len = rxe->hdr.data - sop;
1205
1206 /* Ensure the RXE buffer size is adequate for our payload. */
1207 if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
1208 /*
1209 * Allocation failure, treat as malformed and do not bother processing
1210 * any further packets in the datagram as they are likely to also
1211 * encounter allocation failures.
1212 */
1213 eop = NULL;
1214 goto malformed;
1215 }
1216
1217 /*
1218 * We decrypt the packet body to immediately after the token at the start of
1219 * the RXE buffer (where present).
1220 *
1221 * Do the decryption from the PACKET (which points into URXE memory) to our
1222 * RXE payload (single-copy decryption), then fixup the pointers in the
1223 * header to point to our new buffer.
1224 *
1225 * If decryption fails this is considered a permanent error; we defer
1226 * packets we don't yet have decryption keys for above, so if this fails,
1227 * something has gone wrong with the handshake process or a packet has been
1228 * corrupted.
1229 */
1230 dst = (unsigned char *)rxe_data(rxe) + i;
1231 if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
1232 &dec_len, sop, aad_len, rxe->pn, enc_level,
1233 rxe->hdr.key_phase, &rx_key_epoch))
1234 goto malformed;
1235
1236 /*
1237 * -----------------------------------------------------
1238 * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
1239 * AND MUST BE TIMING-CHANNEL SAFE.
1240 * -----------------------------------------------------
1241 *
1242 * At this point, we have successfully authenticated the AEAD tag and no
1243 * longer need to worry about exposing the PN, PN length or Key Phase bit in
1244 * timing channels. Invoke any configured validation callback to allow for
1245 * rejection of duplicate PNs.
1246 */
1247 if (!qrx_validate_hdr_late(qrx, rxe))
1248 goto malformed;
1249
1250 /* Check for a Key Phase bit differing from our expectation. */
1251 if (rxe->hdr.type == QUIC_PKT_TYPE_1RTT
1252 && rxe->hdr.key_phase != (el->key_epoch & 1))
1253 qrx_key_update_initiated(qrx, rxe->pn);
1254
1255 /*
1256 * We have now successfully decrypted the packet payload. If there are
1257 * additional packets in the datagram, it is possible we will fail to
1258 * decrypt them and need to defer them until we have some key material we
1259 * don't currently possess. If this happens, the URXE will be moved to the
1260 * deferred queue. Since a URXE corresponds to one datagram, which may
1261 * contain multiple packets, we must ensure any packets we have already
1262 * processed in the URXE are not processed again (this is an RFC
1263 * requirement). We do this by marking the nth packet in the datagram as
1264 * processed.
1265 *
1266 * We are now committed to returning this decrypted packet to the user,
1267 * meaning we now consider the packet processed and must mark it
1268 * accordingly.
1269 */
1270 pkt_mark(&urxe->processed, pkt_idx);
1271
1272 /*
1273 * Update header to point to the decrypted buffer, which may be shorter
1274 * due to AEAD tags, block padding, etc.
1275 */
1276 rxe->hdr.data = dst;
1277 rxe->hdr.len = dec_len;
1278 rxe->data_len = dec_len;
1279 rxe->datagram_len = datagram_len;
1280 rxe->key_epoch = rx_key_epoch;
1281
1282 /* We processed the PN successfully, so update largest processed PN. */
1283 pn_space = rxe_determine_pn_space(rxe);
1284 if (rxe->pn > qrx->largest_pn[pn_space])
1285 qrx->largest_pn[pn_space] = rxe->pn;
1286
1287 /* Copy across network addresses and RX time from URXE to RXE. */
1288 rxe->peer = urxe->peer;
1289 rxe->local = urxe->local;
1290 rxe->time = urxe->time;
1291 rxe->datagram_id = urxe->datagram_id;
1292
1293 /* Move RXE to pending. */
1294 ossl_list_rxe_remove(&qrx->rx_free, rxe);
1295 ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
1296 return 0; /* success, did not defer; not distinguished from failure */
1297
1298 cannot_decrypt:
1299 /*
1300 * We cannot process this packet right now (but might be able to later). We
1301 * MUST attempt to process any other packets in the datagram, so defer it
1302 * and skip over it.
1303 */
1304 assert(eop != NULL && eop >= PACKET_data(pkt));
1305 /*
1306 * We don't care if this fails as it will just result in the packet being at
1307 * the end of the datagram buffer.
1308 */
1309 ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1310 return 1; /* deferred */
1311
1312 malformed:
1313 if (eop != NULL) {
1314 /*
1315 * This packet cannot be processed and will never be processable. We
1316 * were at least able to decode its header and determine its length, so
1317 * we can skip over it and try to process any subsequent packets in the
1318 * datagram.
1319 *
1320 * Mark as processed as an optimization.
1321 */
1322 assert(eop >= PACKET_data(pkt));
1323 pkt_mark(&urxe->processed, pkt_idx);
1324 /* We don't care if this fails (see above) */
1325 ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
1326 } else {
1327 /*
1328 * This packet cannot be processed and will never be processable.
1329 * Because even its header is not intelligible, we cannot examine any
1330 * further packets in the datagram because its length cannot be
1331 * discerned.
1332 *
1333 * Advance over the entire remainder of the datagram, and mark it as
1334 * processed as an optimization.
1335 */
1336 pkt_mark(&urxe->processed, pkt_idx);
1337 /* We don't care if this fails (see above) */
1338 ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
1339 }
1340 return 0; /* failure, did not defer; not distinguished from success */
1341 }
1342
1343 /* Process a datagram which was received. */
qrx_process_datagram(OSSL_QRX * qrx,QUIC_URXE * e,const unsigned char * data,size_t data_len)1344 static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
1345 const unsigned char *data,
1346 size_t data_len)
1347 {
1348 int have_deferred = 0;
1349 PACKET pkt;
1350 size_t pkt_idx = 0;
1351 QUIC_CONN_ID first_dcid = { 255 };
1352
1353 qrx->bytes_received += data_len;
1354
1355 if (!PACKET_buf_init(&pkt, data, data_len))
1356 return 0;
1357
1358 for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
1359 /*
1360 * A packet smaller than the minimum possible QUIC packet size is not
1361 * considered valid. We also ignore more than a certain number of
1362 * packets within the same datagram.
1363 */
1364 if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
1365 || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
1366 break;
1367
1368 /*
1369 * We note whether packet processing resulted in a deferral since
1370 * this means we need to move the URXE to the deferred list rather
1371 * than the free list after we're finished dealing with it for now.
1372 *
1373 * However, we don't otherwise care here whether processing succeeded or
1374 * failed, as the RFC says even if a packet in a datagram is malformed,
1375 * we should still try to process any packets following it.
1376 *
1377 * In the case where the packet is so malformed we can't determine its
1378 * length, qrx_process_pkt will take care of advancing to the end of
1379 * the packet, so we will exit the loop automatically in this case.
1380 */
1381 if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_dcid, data_len))
1382 have_deferred = 1;
1383 }
1384
1385 /* Only report whether there were any deferrals. */
1386 return have_deferred;
1387 }
1388
1389 /* Process a single pending URXE. */
qrx_process_one_urxe(OSSL_QRX * qrx,QUIC_URXE * e)1390 static int qrx_process_one_urxe(OSSL_QRX *qrx, QUIC_URXE *e)
1391 {
1392 int was_deferred;
1393
1394 /* The next URXE we process should be at the head of the pending list. */
1395 if (!ossl_assert(e == ossl_list_urxe_head(&qrx->urx_pending)))
1396 return 0;
1397
1398 /*
1399 * Attempt to process the datagram. The return value indicates only if
1400 * processing of the datagram was deferred. If we failed to process the
1401 * datagram, we do not attempt to process it again and silently eat the
1402 * error.
1403 */
1404 was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
1405 e->data_len);
1406
1407 /*
1408 * Remove the URXE from the pending list and return it to
1409 * either the free or deferred list.
1410 */
1411 ossl_list_urxe_remove(&qrx->urx_pending, e);
1412 if (was_deferred > 0 && (e->deferred || qrx->num_deferred < qrx->max_deferred)) {
1413 ossl_list_urxe_insert_tail(&qrx->urx_deferred, e);
1414 if (!e->deferred) {
1415 e->deferred = 1;
1416 ++qrx->num_deferred;
1417 }
1418 } else {
1419 if (e->deferred) {
1420 e->deferred = 0;
1421 --qrx->num_deferred;
1422 }
1423 ossl_quic_demux_release_urxe(qrx->demux, e);
1424 }
1425
1426 return 1;
1427 }
1428
1429 /* Process any pending URXEs to generate pending RXEs. */
qrx_process_pending_urxl(OSSL_QRX * qrx)1430 static int qrx_process_pending_urxl(OSSL_QRX *qrx)
1431 {
1432 QUIC_URXE *e;
1433
1434 while ((e = ossl_list_urxe_head(&qrx->urx_pending)) != NULL)
1435 if (!qrx_process_one_urxe(qrx, e))
1436 return 0;
1437
1438 return 1;
1439 }
1440
ossl_qrx_read_pkt(OSSL_QRX * qrx,OSSL_QRX_PKT ** ppkt)1441 int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT **ppkt)
1442 {
1443 RXE *rxe;
1444
1445 if (!ossl_qrx_processed_read_pending(qrx)) {
1446 if (!qrx_process_pending_urxl(qrx))
1447 return 0;
1448
1449 if (!ossl_qrx_processed_read_pending(qrx))
1450 return 0;
1451 }
1452
1453 rxe = qrx_pop_pending_rxe(qrx);
1454 if (!ossl_assert(rxe != NULL))
1455 return 0;
1456
1457 assert(rxe->refcount == 0);
1458 rxe->refcount = 1;
1459
1460 rxe->pkt.hdr = &rxe->hdr;
1461 rxe->pkt.pn = rxe->pn;
1462 rxe->pkt.time = rxe->time;
1463 rxe->pkt.datagram_len = rxe->datagram_len;
1464 rxe->pkt.peer
1465 = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
1466 rxe->pkt.local
1467 = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
1468 rxe->pkt.key_epoch = rxe->key_epoch;
1469 rxe->pkt.datagram_id = rxe->datagram_id;
1470 rxe->pkt.qrx = qrx;
1471 *ppkt = &rxe->pkt;
1472
1473 return 1;
1474 }
1475
ossl_qrx_pkt_release(OSSL_QRX_PKT * pkt)1476 void ossl_qrx_pkt_release(OSSL_QRX_PKT *pkt)
1477 {
1478 RXE *rxe;
1479
1480 if (pkt == NULL)
1481 return;
1482
1483 rxe = (RXE *)pkt;
1484 assert(rxe->refcount > 0);
1485 if (--rxe->refcount == 0)
1486 qrx_recycle_rxe(pkt->qrx, rxe);
1487 }
1488
ossl_qrx_pkt_orphan(OSSL_QRX_PKT * pkt)1489 void ossl_qrx_pkt_orphan(OSSL_QRX_PKT *pkt)
1490 {
1491 RXE *rxe;
1492
1493 if (pkt == NULL)
1494 return;
1495 rxe = (RXE *)pkt;
1496 assert(rxe->refcount > 0);
1497 rxe->refcount--;
1498 assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
1499 return;
1500 }
1501
ossl_qrx_pkt_up_ref(OSSL_QRX_PKT * pkt)1502 void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT *pkt)
1503 {
1504 RXE *rxe = (RXE *)pkt;
1505
1506 assert(rxe->refcount > 0);
1507 ++rxe->refcount;
1508 }
1509
ossl_qrx_get_bytes_received(OSSL_QRX * qrx,int clear)1510 uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
1511 {
1512 uint64_t v = qrx->bytes_received;
1513
1514 if (clear)
1515 qrx->bytes_received = 0;
1516
1517 return v;
1518 }
1519
ossl_qrx_set_late_validation_cb(OSSL_QRX * qrx,ossl_qrx_late_validation_cb * cb,void * cb_arg)1520 int ossl_qrx_set_late_validation_cb(OSSL_QRX *qrx,
1521 ossl_qrx_late_validation_cb *cb,
1522 void *cb_arg)
1523 {
1524 qrx->validation_cb = cb;
1525 qrx->validation_cb_arg = cb_arg;
1526 return 1;
1527 }
1528
ossl_qrx_set_key_update_cb(OSSL_QRX * qrx,ossl_qrx_key_update_cb * cb,void * cb_arg)1529 int ossl_qrx_set_key_update_cb(OSSL_QRX *qrx,
1530 ossl_qrx_key_update_cb *cb,
1531 void *cb_arg)
1532 {
1533 qrx->key_update_cb = cb;
1534 qrx->key_update_cb_arg = cb_arg;
1535 return 1;
1536 }
1537
ossl_qrx_get_key_epoch(OSSL_QRX * qrx)1538 uint64_t ossl_qrx_get_key_epoch(OSSL_QRX *qrx)
1539 {
1540 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1541 QUIC_ENC_LEVEL_1RTT, 1);
1542
1543 return el == NULL ? UINT64_MAX : el->key_epoch;
1544 }
1545
ossl_qrx_key_update_timeout(OSSL_QRX * qrx,int normal)1546 int ossl_qrx_key_update_timeout(OSSL_QRX *qrx, int normal)
1547 {
1548 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1549 QUIC_ENC_LEVEL_1RTT, 1);
1550
1551 if (el == NULL)
1552 return 0;
1553
1554 if (el->state == QRL_EL_STATE_PROV_UPDATING
1555 && !ossl_qrl_enc_level_set_key_update_done(&qrx->el_set,
1556 QUIC_ENC_LEVEL_1RTT))
1557 return 0;
1558
1559 if (normal && el->state == QRL_EL_STATE_PROV_COOLDOWN
1560 && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx->el_set,
1561 QUIC_ENC_LEVEL_1RTT))
1562 return 0;
1563
1564 return 1;
1565 }
1566
ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX * qrx)1567 uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX *qrx)
1568 {
1569 return qrx->forged_pkt_count;
1570 }
1571
ossl_qrx_get_max_forged_pkt_count(OSSL_QRX * qrx,uint32_t enc_level)1572 uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX *qrx,
1573 uint32_t enc_level)
1574 {
1575 OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
1576 enc_level, 1);
1577
1578 return el == NULL ? UINT64_MAX
1579 : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
1580 }
1581
ossl_qrx_allow_1rtt_processing(OSSL_QRX * qrx)1582 void ossl_qrx_allow_1rtt_processing(OSSL_QRX *qrx)
1583 {
1584 if (qrx->allow_1rtt)
1585 return;
1586
1587 qrx->allow_1rtt = 1;
1588 qrx_requeue_deferred(qrx);
1589 }
1590
ossl_qrx_set_msg_callback(OSSL_QRX * qrx,ossl_msg_cb msg_callback,SSL * msg_callback_ssl)1591 void ossl_qrx_set_msg_callback(OSSL_QRX *qrx, ossl_msg_cb msg_callback,
1592 SSL *msg_callback_ssl)
1593 {
1594 qrx->msg_callback = msg_callback;
1595 qrx->msg_callback_ssl = msg_callback_ssl;
1596 }
1597
ossl_qrx_set_msg_callback_arg(OSSL_QRX * qrx,void * msg_callback_arg)1598 void ossl_qrx_set_msg_callback_arg(OSSL_QRX *qrx, void *msg_callback_arg)
1599 {
1600 qrx->msg_callback_arg = msg_callback_arg;
1601 }
1602
ossl_qrx_get_short_hdr_conn_id_len(OSSL_QRX * qrx)1603 size_t ossl_qrx_get_short_hdr_conn_id_len(OSSL_QRX *qrx)
1604 {
1605 return qrx->short_conn_id_len;
1606 }
1607