xref: /linux/net/tls/tls_sw.c (revision a55f7f5f29b32c2c53cc291899cf9b0c25a07f7c)
1 /*
2  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4  * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5  * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6  * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7  * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  */
37 
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
44 
45 #include <net/strparser.h>
46 #include <net/tls.h>
47 #include <trace/events/sock.h>
48 
49 #include "tls.h"
50 
51 struct tls_decrypt_arg {
52 	struct_group(inargs,
53 	bool zc;
54 	bool async;
55 	bool async_done;
56 	u8 tail;
57 	);
58 
59 	struct sk_buff *skb;
60 };
61 
62 struct tls_decrypt_ctx {
63 	struct sock *sk;
64 	u8 iv[TLS_MAX_IV_SIZE];
65 	u8 aad[TLS_MAX_AAD_SIZE];
66 	u8 tail;
67 	bool free_sgout;
68 	struct scatterlist sg[];
69 };
70 
tls_err_abort(struct sock * sk,int err)71 noinline void tls_err_abort(struct sock *sk, int err)
72 {
73 	WARN_ON_ONCE(err >= 0);
74 	/* sk->sk_err should contain a positive error code. */
75 	WRITE_ONCE(sk->sk_err, -err);
76 	/* Paired with smp_rmb() in tcp_poll() */
77 	smp_wmb();
78 	sk_error_report(sk);
79 }
80 
__skb_nsg(struct sk_buff * skb,int offset,int len,unsigned int recursion_level)81 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
82                      unsigned int recursion_level)
83 {
84         int start = skb_headlen(skb);
85         int i, chunk = start - offset;
86         struct sk_buff *frag_iter;
87         int elt = 0;
88 
89         if (unlikely(recursion_level >= 24))
90                 return -EMSGSIZE;
91 
92         if (chunk > 0) {
93                 if (chunk > len)
94                         chunk = len;
95                 elt++;
96                 len -= chunk;
97                 if (len == 0)
98                         return elt;
99                 offset += chunk;
100         }
101 
102         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103                 int end;
104 
105                 WARN_ON(start > offset + len);
106 
107                 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
108                 chunk = end - offset;
109                 if (chunk > 0) {
110                         if (chunk > len)
111                                 chunk = len;
112                         elt++;
113                         len -= chunk;
114                         if (len == 0)
115                                 return elt;
116                         offset += chunk;
117                 }
118                 start = end;
119         }
120 
121         if (unlikely(skb_has_frag_list(skb))) {
122                 skb_walk_frags(skb, frag_iter) {
123                         int end, ret;
124 
125                         WARN_ON(start > offset + len);
126 
127                         end = start + frag_iter->len;
128                         chunk = end - offset;
129                         if (chunk > 0) {
130                                 if (chunk > len)
131                                         chunk = len;
132                                 ret = __skb_nsg(frag_iter, offset - start, chunk,
133                                                 recursion_level + 1);
134                                 if (unlikely(ret < 0))
135                                         return ret;
136                                 elt += ret;
137                                 len -= chunk;
138                                 if (len == 0)
139                                         return elt;
140                                 offset += chunk;
141                         }
142                         start = end;
143                 }
144         }
145         BUG_ON(len);
146         return elt;
147 }
148 
149 /* Return the number of scatterlist elements required to completely map the
150  * skb, or -EMSGSIZE if the recursion depth is exceeded.
151  */
skb_nsg(struct sk_buff * skb,int offset,int len)152 static int skb_nsg(struct sk_buff *skb, int offset, int len)
153 {
154         return __skb_nsg(skb, offset, len, 0);
155 }
156 
tls_padding_length(struct tls_prot_info * prot,struct sk_buff * skb,struct tls_decrypt_arg * darg)157 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
158 			      struct tls_decrypt_arg *darg)
159 {
160 	struct strp_msg *rxm = strp_msg(skb);
161 	struct tls_msg *tlm = tls_msg(skb);
162 	int sub = 0;
163 
164 	/* Determine zero-padding length */
165 	if (prot->version == TLS_1_3_VERSION) {
166 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
167 		char content_type = darg->zc ? darg->tail : 0;
168 		int err;
169 
170 		while (content_type == 0) {
171 			if (offset < prot->prepend_size)
172 				return -EBADMSG;
173 			err = skb_copy_bits(skb, rxm->offset + offset,
174 					    &content_type, 1);
175 			if (err)
176 				return err;
177 			if (content_type)
178 				break;
179 			sub++;
180 			offset--;
181 		}
182 		tlm->control = content_type;
183 	}
184 	return sub;
185 }
186 
tls_decrypt_done(void * data,int err)187 static void tls_decrypt_done(void *data, int err)
188 {
189 	struct aead_request *aead_req = data;
190 	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
191 	struct scatterlist *sgout = aead_req->dst;
192 	struct tls_sw_context_rx *ctx;
193 	struct tls_decrypt_ctx *dctx;
194 	struct tls_context *tls_ctx;
195 	struct scatterlist *sg;
196 	unsigned int pages;
197 	struct sock *sk;
198 	int aead_size;
199 
200 	/* If requests get too backlogged crypto API returns -EBUSY and calls
201 	 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
202 	 * to make waiting for backlog to flush with crypto_wait_req() easier.
203 	 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
204 	 * -EINPROGRESS -> 0.
205 	 * We have a single struct crypto_async_request per direction, this
206 	 * scheme doesn't help us, so just ignore the first ->complete().
207 	 */
208 	if (err == -EINPROGRESS)
209 		return;
210 
211 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
212 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
213 	dctx = (void *)((u8 *)aead_req + aead_size);
214 
215 	sk = dctx->sk;
216 	tls_ctx = tls_get_ctx(sk);
217 	ctx = tls_sw_ctx_rx(tls_ctx);
218 
219 	/* Propagate if there was an err */
220 	if (err) {
221 		if (err == -EBADMSG)
222 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
223 		ctx->async_wait.err = err;
224 		tls_err_abort(sk, err);
225 	}
226 
227 	/* Free the destination pages if skb was not decrypted inplace */
228 	if (dctx->free_sgout) {
229 		/* Skip the first S/G entry as it points to AAD */
230 		for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
231 			if (!sg)
232 				break;
233 			put_page(sg_page(sg));
234 		}
235 	}
236 
237 	kfree(aead_req);
238 
239 	if (atomic_dec_and_test(&ctx->decrypt_pending))
240 		complete(&ctx->async_wait.completion);
241 }
242 
tls_decrypt_async_wait(struct tls_sw_context_rx * ctx)243 static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
244 {
245 	if (!atomic_dec_and_test(&ctx->decrypt_pending))
246 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
247 	atomic_inc(&ctx->decrypt_pending);
248 
249 	__skb_queue_purge(&ctx->async_hold);
250 	return ctx->async_wait.err;
251 }
252 
tls_do_decryption(struct sock * sk,struct scatterlist * sgin,struct scatterlist * sgout,char * iv_recv,size_t data_len,struct aead_request * aead_req,struct tls_decrypt_arg * darg)253 static int tls_do_decryption(struct sock *sk,
254 			     struct scatterlist *sgin,
255 			     struct scatterlist *sgout,
256 			     char *iv_recv,
257 			     size_t data_len,
258 			     struct aead_request *aead_req,
259 			     struct tls_decrypt_arg *darg)
260 {
261 	struct tls_context *tls_ctx = tls_get_ctx(sk);
262 	struct tls_prot_info *prot = &tls_ctx->prot_info;
263 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
264 	int ret;
265 
266 	aead_request_set_tfm(aead_req, ctx->aead_recv);
267 	aead_request_set_ad(aead_req, prot->aad_size);
268 	aead_request_set_crypt(aead_req, sgin, sgout,
269 			       data_len + prot->tag_size,
270 			       (u8 *)iv_recv);
271 
272 	if (darg->async) {
273 		aead_request_set_callback(aead_req,
274 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
275 					  tls_decrypt_done, aead_req);
276 		DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
277 		atomic_inc(&ctx->decrypt_pending);
278 	} else {
279 		DECLARE_CRYPTO_WAIT(wait);
280 
281 		aead_request_set_callback(aead_req,
282 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
283 					  crypto_req_done, &wait);
284 		ret = crypto_aead_decrypt(aead_req);
285 		if (ret == -EINPROGRESS || ret == -EBUSY)
286 			ret = crypto_wait_req(ret, &wait);
287 		return ret;
288 	}
289 
290 	ret = crypto_aead_decrypt(aead_req);
291 	if (ret == -EINPROGRESS)
292 		return 0;
293 
294 	if (ret == -EBUSY) {
295 		ret = tls_decrypt_async_wait(ctx);
296 		darg->async_done = true;
297 		/* all completions have run, we're not doing async anymore */
298 		darg->async = false;
299 		return ret;
300 	}
301 
302 	atomic_dec(&ctx->decrypt_pending);
303 	darg->async = false;
304 
305 	return ret;
306 }
307 
tls_trim_both_msgs(struct sock * sk,int target_size)308 static void tls_trim_both_msgs(struct sock *sk, int target_size)
309 {
310 	struct tls_context *tls_ctx = tls_get_ctx(sk);
311 	struct tls_prot_info *prot = &tls_ctx->prot_info;
312 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
313 	struct tls_rec *rec = ctx->open_rec;
314 
315 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
316 	if (target_size > 0)
317 		target_size += prot->overhead_size;
318 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
319 }
320 
tls_alloc_encrypted_msg(struct sock * sk,int len)321 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
322 {
323 	struct tls_context *tls_ctx = tls_get_ctx(sk);
324 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
325 	struct tls_rec *rec = ctx->open_rec;
326 	struct sk_msg *msg_en = &rec->msg_encrypted;
327 
328 	return sk_msg_alloc(sk, msg_en, len, 0);
329 }
330 
tls_clone_plaintext_msg(struct sock * sk,int required)331 static int tls_clone_plaintext_msg(struct sock *sk, int required)
332 {
333 	struct tls_context *tls_ctx = tls_get_ctx(sk);
334 	struct tls_prot_info *prot = &tls_ctx->prot_info;
335 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
336 	struct tls_rec *rec = ctx->open_rec;
337 	struct sk_msg *msg_pl = &rec->msg_plaintext;
338 	struct sk_msg *msg_en = &rec->msg_encrypted;
339 	int skip, len;
340 
341 	/* We add page references worth len bytes from encrypted sg
342 	 * at the end of plaintext sg. It is guaranteed that msg_en
343 	 * has enough required room (ensured by caller).
344 	 */
345 	len = required - msg_pl->sg.size;
346 
347 	/* Skip initial bytes in msg_en's data to be able to use
348 	 * same offset of both plain and encrypted data.
349 	 */
350 	skip = prot->prepend_size + msg_pl->sg.size;
351 
352 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
353 }
354 
tls_get_rec(struct sock * sk)355 static struct tls_rec *tls_get_rec(struct sock *sk)
356 {
357 	struct tls_context *tls_ctx = tls_get_ctx(sk);
358 	struct tls_prot_info *prot = &tls_ctx->prot_info;
359 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
360 	struct sk_msg *msg_pl, *msg_en;
361 	struct tls_rec *rec;
362 	int mem_size;
363 
364 	mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
365 
366 	rec = kzalloc(mem_size, sk->sk_allocation);
367 	if (!rec)
368 		return NULL;
369 
370 	msg_pl = &rec->msg_plaintext;
371 	msg_en = &rec->msg_encrypted;
372 
373 	sk_msg_init(msg_pl);
374 	sk_msg_init(msg_en);
375 
376 	sg_init_table(rec->sg_aead_in, 2);
377 	sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
378 	sg_unmark_end(&rec->sg_aead_in[1]);
379 
380 	sg_init_table(rec->sg_aead_out, 2);
381 	sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
382 	sg_unmark_end(&rec->sg_aead_out[1]);
383 
384 	rec->sk = sk;
385 
386 	return rec;
387 }
388 
tls_free_rec(struct sock * sk,struct tls_rec * rec)389 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
390 {
391 	sk_msg_free(sk, &rec->msg_encrypted);
392 	sk_msg_free(sk, &rec->msg_plaintext);
393 	kfree(rec);
394 }
395 
tls_free_open_rec(struct sock * sk)396 static void tls_free_open_rec(struct sock *sk)
397 {
398 	struct tls_context *tls_ctx = tls_get_ctx(sk);
399 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
400 	struct tls_rec *rec = ctx->open_rec;
401 
402 	if (rec) {
403 		tls_free_rec(sk, rec);
404 		ctx->open_rec = NULL;
405 	}
406 }
407 
tls_tx_records(struct sock * sk,int flags)408 int tls_tx_records(struct sock *sk, int flags)
409 {
410 	struct tls_context *tls_ctx = tls_get_ctx(sk);
411 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
412 	struct tls_rec *rec, *tmp;
413 	struct sk_msg *msg_en;
414 	int tx_flags, rc = 0;
415 
416 	if (tls_is_partially_sent_record(tls_ctx)) {
417 		rec = list_first_entry(&ctx->tx_list,
418 				       struct tls_rec, list);
419 
420 		if (flags == -1)
421 			tx_flags = rec->tx_flags;
422 		else
423 			tx_flags = flags;
424 
425 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
426 		if (rc)
427 			goto tx_err;
428 
429 		/* Full record has been transmitted.
430 		 * Remove the head of tx_list
431 		 */
432 		list_del(&rec->list);
433 		sk_msg_free(sk, &rec->msg_plaintext);
434 		kfree(rec);
435 	}
436 
437 	/* Tx all ready records */
438 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
439 		if (READ_ONCE(rec->tx_ready)) {
440 			if (flags == -1)
441 				tx_flags = rec->tx_flags;
442 			else
443 				tx_flags = flags;
444 
445 			msg_en = &rec->msg_encrypted;
446 			rc = tls_push_sg(sk, tls_ctx,
447 					 &msg_en->sg.data[msg_en->sg.curr],
448 					 0, tx_flags);
449 			if (rc)
450 				goto tx_err;
451 
452 			list_del(&rec->list);
453 			sk_msg_free(sk, &rec->msg_plaintext);
454 			kfree(rec);
455 		} else {
456 			break;
457 		}
458 	}
459 
460 tx_err:
461 	if (rc < 0 && rc != -EAGAIN)
462 		tls_err_abort(sk, rc);
463 
464 	return rc;
465 }
466 
tls_encrypt_done(void * data,int err)467 static void tls_encrypt_done(void *data, int err)
468 {
469 	struct tls_sw_context_tx *ctx;
470 	struct tls_context *tls_ctx;
471 	struct tls_prot_info *prot;
472 	struct tls_rec *rec = data;
473 	struct scatterlist *sge;
474 	struct sk_msg *msg_en;
475 	struct sock *sk;
476 
477 	if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */
478 		return;
479 
480 	msg_en = &rec->msg_encrypted;
481 
482 	sk = rec->sk;
483 	tls_ctx = tls_get_ctx(sk);
484 	prot = &tls_ctx->prot_info;
485 	ctx = tls_sw_ctx_tx(tls_ctx);
486 
487 	sge = sk_msg_elem(msg_en, msg_en->sg.curr);
488 	sge->offset -= prot->prepend_size;
489 	sge->length += prot->prepend_size;
490 
491 	/* Check if error is previously set on socket */
492 	if (err || sk->sk_err) {
493 		rec = NULL;
494 
495 		/* If err is already set on socket, return the same code */
496 		if (sk->sk_err) {
497 			ctx->async_wait.err = -sk->sk_err;
498 		} else {
499 			ctx->async_wait.err = err;
500 			tls_err_abort(sk, err);
501 		}
502 	}
503 
504 	if (rec) {
505 		struct tls_rec *first_rec;
506 
507 		/* Mark the record as ready for transmission */
508 		smp_store_mb(rec->tx_ready, true);
509 
510 		/* If received record is at head of tx_list, schedule tx */
511 		first_rec = list_first_entry(&ctx->tx_list,
512 					     struct tls_rec, list);
513 		if (rec == first_rec) {
514 			/* Schedule the transmission */
515 			if (!test_and_set_bit(BIT_TX_SCHEDULED,
516 					      &ctx->tx_bitmask))
517 				schedule_delayed_work(&ctx->tx_work.work, 1);
518 		}
519 	}
520 
521 	if (atomic_dec_and_test(&ctx->encrypt_pending))
522 		complete(&ctx->async_wait.completion);
523 }
524 
tls_encrypt_async_wait(struct tls_sw_context_tx * ctx)525 static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
526 {
527 	if (!atomic_dec_and_test(&ctx->encrypt_pending))
528 		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
529 	atomic_inc(&ctx->encrypt_pending);
530 
531 	return ctx->async_wait.err;
532 }
533 
tls_do_encryption(struct sock * sk,struct tls_context * tls_ctx,struct tls_sw_context_tx * ctx,struct aead_request * aead_req,size_t data_len,u32 start)534 static int tls_do_encryption(struct sock *sk,
535 			     struct tls_context *tls_ctx,
536 			     struct tls_sw_context_tx *ctx,
537 			     struct aead_request *aead_req,
538 			     size_t data_len, u32 start)
539 {
540 	struct tls_prot_info *prot = &tls_ctx->prot_info;
541 	struct tls_rec *rec = ctx->open_rec;
542 	struct sk_msg *msg_en = &rec->msg_encrypted;
543 	struct scatterlist *sge = sk_msg_elem(msg_en, start);
544 	int rc, iv_offset = 0;
545 
546 	/* For CCM based ciphers, first byte of IV is a constant */
547 	switch (prot->cipher_type) {
548 	case TLS_CIPHER_AES_CCM_128:
549 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
550 		iv_offset = 1;
551 		break;
552 	case TLS_CIPHER_SM4_CCM:
553 		rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
554 		iv_offset = 1;
555 		break;
556 	}
557 
558 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
559 	       prot->iv_size + prot->salt_size);
560 
561 	tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
562 			    tls_ctx->tx.rec_seq);
563 
564 	sge->offset += prot->prepend_size;
565 	sge->length -= prot->prepend_size;
566 
567 	msg_en->sg.curr = start;
568 
569 	aead_request_set_tfm(aead_req, ctx->aead_send);
570 	aead_request_set_ad(aead_req, prot->aad_size);
571 	aead_request_set_crypt(aead_req, rec->sg_aead_in,
572 			       rec->sg_aead_out,
573 			       data_len, rec->iv_data);
574 
575 	aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
576 				  tls_encrypt_done, rec);
577 
578 	/* Add the record in tx_list */
579 	list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
580 	DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1);
581 	atomic_inc(&ctx->encrypt_pending);
582 
583 	rc = crypto_aead_encrypt(aead_req);
584 	if (rc == -EBUSY) {
585 		rc = tls_encrypt_async_wait(ctx);
586 		rc = rc ?: -EINPROGRESS;
587 		/*
588 		 * The async callback tls_encrypt_done() has already
589 		 * decremented encrypt_pending and restored the sge on
590 		 * both success and error. Skip the synchronous cleanup
591 		 * below on error, just remove the record and return.
592 		 */
593 		if (rc != -EINPROGRESS) {
594 			list_del(&rec->list);
595 			return rc;
596 		}
597 	}
598 	if (!rc || rc != -EINPROGRESS) {
599 		atomic_dec(&ctx->encrypt_pending);
600 		sge->offset -= prot->prepend_size;
601 		sge->length += prot->prepend_size;
602 	}
603 
604 	if (!rc) {
605 		WRITE_ONCE(rec->tx_ready, true);
606 	} else if (rc != -EINPROGRESS) {
607 		list_del(&rec->list);
608 		return rc;
609 	}
610 
611 	/* Unhook the record from context if encryption is not failure */
612 	ctx->open_rec = NULL;
613 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
614 	return rc;
615 }
616 
tls_split_open_record(struct sock * sk,struct tls_rec * from,struct tls_rec ** to,struct sk_msg * msg_opl,struct sk_msg * msg_oen,u32 split_point,u32 tx_overhead_size,u32 * orig_end)617 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
618 				 struct tls_rec **to, struct sk_msg *msg_opl,
619 				 struct sk_msg *msg_oen, u32 split_point,
620 				 u32 tx_overhead_size, u32 *orig_end)
621 {
622 	u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
623 	struct scatterlist *sge, *osge, *nsge;
624 	u32 orig_size = msg_opl->sg.size;
625 	struct scatterlist tmp = { };
626 	struct sk_msg *msg_npl;
627 	struct tls_rec *new;
628 	int ret;
629 
630 	new = tls_get_rec(sk);
631 	if (!new)
632 		return -ENOMEM;
633 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
634 			   tx_overhead_size, 0);
635 	if (ret < 0) {
636 		tls_free_rec(sk, new);
637 		return ret;
638 	}
639 
640 	*orig_end = msg_opl->sg.end;
641 	i = msg_opl->sg.start;
642 	sge = sk_msg_elem(msg_opl, i);
643 	while (apply && sge->length) {
644 		if (sge->length > apply) {
645 			u32 len = sge->length - apply;
646 
647 			get_page(sg_page(sge));
648 			sg_set_page(&tmp, sg_page(sge), len,
649 				    sge->offset + apply);
650 			sge->length = apply;
651 			bytes += apply;
652 			apply = 0;
653 		} else {
654 			apply -= sge->length;
655 			bytes += sge->length;
656 		}
657 
658 		sk_msg_iter_var_next(i);
659 		if (i == msg_opl->sg.end)
660 			break;
661 		sge = sk_msg_elem(msg_opl, i);
662 	}
663 
664 	msg_opl->sg.end = i;
665 	msg_opl->sg.curr = i;
666 	msg_opl->sg.copybreak = 0;
667 	msg_opl->apply_bytes = 0;
668 	msg_opl->sg.size = bytes;
669 
670 	msg_npl = &new->msg_plaintext;
671 	msg_npl->apply_bytes = apply;
672 	msg_npl->sg.size = orig_size - bytes;
673 
674 	j = msg_npl->sg.start;
675 	nsge = sk_msg_elem(msg_npl, j);
676 	if (tmp.length) {
677 		memcpy(nsge, &tmp, sizeof(*nsge));
678 		sk_msg_iter_var_next(j);
679 		nsge = sk_msg_elem(msg_npl, j);
680 	}
681 
682 	osge = sk_msg_elem(msg_opl, i);
683 	while (osge->length) {
684 		memcpy(nsge, osge, sizeof(*nsge));
685 		sg_unmark_end(nsge);
686 		sk_msg_iter_var_next(i);
687 		sk_msg_iter_var_next(j);
688 		if (i == *orig_end)
689 			break;
690 		osge = sk_msg_elem(msg_opl, i);
691 		nsge = sk_msg_elem(msg_npl, j);
692 	}
693 
694 	msg_npl->sg.end = j;
695 	msg_npl->sg.curr = j;
696 	msg_npl->sg.copybreak = 0;
697 
698 	*to = new;
699 	return 0;
700 }
701 
tls_merge_open_record(struct sock * sk,struct tls_rec * to,struct tls_rec * from,u32 orig_end)702 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
703 				  struct tls_rec *from, u32 orig_end)
704 {
705 	struct sk_msg *msg_npl = &from->msg_plaintext;
706 	struct sk_msg *msg_opl = &to->msg_plaintext;
707 	struct scatterlist *osge, *nsge;
708 	u32 i, j;
709 
710 	i = msg_opl->sg.end;
711 	sk_msg_iter_var_prev(i);
712 	j = msg_npl->sg.start;
713 
714 	osge = sk_msg_elem(msg_opl, i);
715 	nsge = sk_msg_elem(msg_npl, j);
716 
717 	if (sg_page(osge) == sg_page(nsge) &&
718 	    osge->offset + osge->length == nsge->offset) {
719 		osge->length += nsge->length;
720 		put_page(sg_page(nsge));
721 	}
722 
723 	msg_opl->sg.end = orig_end;
724 	msg_opl->sg.curr = orig_end;
725 	msg_opl->sg.copybreak = 0;
726 	msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
727 	msg_opl->sg.size += msg_npl->sg.size;
728 
729 	sk_msg_free(sk, &to->msg_encrypted);
730 	sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
731 
732 	kfree(from);
733 }
734 
tls_push_record(struct sock * sk,int flags,unsigned char record_type)735 static int tls_push_record(struct sock *sk, int flags,
736 			   unsigned char record_type)
737 {
738 	struct tls_context *tls_ctx = tls_get_ctx(sk);
739 	struct tls_prot_info *prot = &tls_ctx->prot_info;
740 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
741 	struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
742 	u32 i, split_point, orig_end;
743 	struct sk_msg *msg_pl, *msg_en;
744 	struct aead_request *req;
745 	bool split;
746 	int rc;
747 
748 	if (!rec)
749 		return 0;
750 
751 	msg_pl = &rec->msg_plaintext;
752 	msg_en = &rec->msg_encrypted;
753 
754 	split_point = msg_pl->apply_bytes;
755 	split = split_point && split_point < msg_pl->sg.size;
756 	if (unlikely((!split &&
757 		      msg_pl->sg.size +
758 		      prot->overhead_size > msg_en->sg.size) ||
759 		     (split &&
760 		      split_point +
761 		      prot->overhead_size > msg_en->sg.size))) {
762 		split = true;
763 		split_point = msg_en->sg.size;
764 	}
765 	if (split) {
766 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
767 					   split_point, prot->overhead_size,
768 					   &orig_end);
769 		if (rc < 0)
770 			return rc;
771 		/* This can happen if above tls_split_open_record allocates
772 		 * a single large encryption buffer instead of two smaller
773 		 * ones. In this case adjust pointers and continue without
774 		 * split.
775 		 */
776 		if (!msg_pl->sg.size) {
777 			tls_merge_open_record(sk, rec, tmp, orig_end);
778 			msg_pl = &rec->msg_plaintext;
779 			msg_en = &rec->msg_encrypted;
780 			split = false;
781 		}
782 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
783 			    prot->overhead_size);
784 	}
785 
786 	rec->tx_flags = flags;
787 	req = &rec->aead_req;
788 
789 	i = msg_pl->sg.end;
790 	sk_msg_iter_var_prev(i);
791 
792 	rec->content_type = record_type;
793 	if (prot->version == TLS_1_3_VERSION) {
794 		/* Add content type to end of message.  No padding added */
795 		sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
796 		sg_mark_end(&rec->sg_content_type);
797 		sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
798 			 &rec->sg_content_type);
799 	} else {
800 		sg_mark_end(sk_msg_elem(msg_pl, i));
801 	}
802 
803 	if (msg_pl->sg.end < msg_pl->sg.start) {
804 		sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
805 			 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
806 			 msg_pl->sg.data);
807 	}
808 
809 	i = msg_pl->sg.start;
810 	sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
811 
812 	i = msg_en->sg.end;
813 	sk_msg_iter_var_prev(i);
814 	sg_mark_end(sk_msg_elem(msg_en, i));
815 
816 	i = msg_en->sg.start;
817 	sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
818 
819 	tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
820 		     tls_ctx->tx.rec_seq, record_type, prot);
821 
822 	tls_fill_prepend(tls_ctx,
823 			 page_address(sg_page(&msg_en->sg.data[i])) +
824 			 msg_en->sg.data[i].offset,
825 			 msg_pl->sg.size + prot->tail_size,
826 			 record_type);
827 
828 	tls_ctx->pending_open_record_frags = false;
829 
830 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
831 			       msg_pl->sg.size + prot->tail_size, i);
832 	if (rc < 0) {
833 		if (rc != -EINPROGRESS) {
834 			tls_err_abort(sk, -EBADMSG);
835 			if (split) {
836 				tls_ctx->pending_open_record_frags = true;
837 				tls_merge_open_record(sk, rec, tmp, orig_end);
838 			}
839 		}
840 		ctx->async_capable = 1;
841 		return rc;
842 	} else if (split) {
843 		msg_pl = &tmp->msg_plaintext;
844 		msg_en = &tmp->msg_encrypted;
845 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
846 		tls_ctx->pending_open_record_frags = true;
847 		ctx->open_rec = tmp;
848 	}
849 
850 	return tls_tx_records(sk, flags);
851 }
852 
bpf_exec_tx_verdict(struct sk_msg * msg,struct sock * sk,bool full_record,u8 record_type,ssize_t * copied,int flags)853 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
854 			       bool full_record, u8 record_type,
855 			       ssize_t *copied, int flags)
856 {
857 	struct tls_context *tls_ctx = tls_get_ctx(sk);
858 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
859 	struct sk_msg msg_redir = { };
860 	struct sk_psock *psock;
861 	struct sock *sk_redir;
862 	struct tls_rec *rec;
863 	bool enospc, policy, redir_ingress;
864 	int err = 0, send;
865 	u32 delta = 0;
866 
867 	policy = !(flags & MSG_SENDPAGE_NOPOLICY);
868 	psock = sk_psock_get(sk);
869 	if (!psock || !policy) {
870 		err = tls_push_record(sk, flags, record_type);
871 		if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
872 			*copied -= sk_msg_free(sk, msg);
873 			tls_free_open_rec(sk);
874 			err = -sk->sk_err;
875 		}
876 		if (psock)
877 			sk_psock_put(sk, psock);
878 		return err;
879 	}
880 more_data:
881 	enospc = sk_msg_full(msg);
882 	if (psock->eval == __SK_NONE) {
883 		delta = msg->sg.size;
884 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
885 		delta -= msg->sg.size;
886 
887 		if ((s32)delta > 0) {
888 			/* It indicates that we executed bpf_msg_pop_data(),
889 			 * causing the plaintext data size to decrease.
890 			 * Therefore the encrypted data size also needs to
891 			 * correspondingly decrease. We only need to subtract
892 			 * delta to calculate the new ciphertext length since
893 			 * ktls does not support block encryption.
894 			 */
895 			struct sk_msg *enc = &ctx->open_rec->msg_encrypted;
896 
897 			sk_msg_trim(sk, enc, enc->sg.size - delta);
898 		}
899 	}
900 	if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
901 	    !enospc && !full_record) {
902 		err = -ENOSPC;
903 		goto out_err;
904 	}
905 	msg->cork_bytes = 0;
906 	send = msg->sg.size;
907 	if (msg->apply_bytes && msg->apply_bytes < send)
908 		send = msg->apply_bytes;
909 
910 	switch (psock->eval) {
911 	case __SK_PASS:
912 		err = tls_push_record(sk, flags, record_type);
913 		if (err && err != -EINPROGRESS && sk->sk_err == EBADMSG) {
914 			*copied -= sk_msg_free(sk, msg);
915 			tls_free_open_rec(sk);
916 			err = -sk->sk_err;
917 			goto out_err;
918 		}
919 		break;
920 	case __SK_REDIRECT:
921 		redir_ingress = psock->redir_ingress;
922 		sk_redir = psock->sk_redir;
923 		memcpy(&msg_redir, msg, sizeof(*msg));
924 		if (msg->apply_bytes < send)
925 			msg->apply_bytes = 0;
926 		else
927 			msg->apply_bytes -= send;
928 		sk_msg_return_zero(sk, msg, send);
929 		msg->sg.size -= send;
930 		release_sock(sk);
931 		err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
932 					    &msg_redir, send, flags);
933 		lock_sock(sk);
934 		if (err < 0) {
935 			/* Regardless of whether the data represented by
936 			 * msg_redir is sent successfully, we have already
937 			 * uncharged it via sk_msg_return_zero(). The
938 			 * msg->sg.size represents the remaining unprocessed
939 			 * data, which needs to be uncharged here.
940 			 */
941 			sk_mem_uncharge(sk, msg->sg.size);
942 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
943 			msg->sg.size = 0;
944 		}
945 		if (msg->sg.size == 0)
946 			tls_free_open_rec(sk);
947 		break;
948 	case __SK_DROP:
949 	default:
950 		sk_msg_free_partial(sk, msg, send);
951 		if (msg->apply_bytes < send)
952 			msg->apply_bytes = 0;
953 		else
954 			msg->apply_bytes -= send;
955 		if (msg->sg.size == 0)
956 			tls_free_open_rec(sk);
957 		*copied -= (send + delta);
958 		err = -EACCES;
959 	}
960 
961 	if (likely(!err)) {
962 		bool reset_eval = !ctx->open_rec;
963 
964 		rec = ctx->open_rec;
965 		if (rec) {
966 			msg = &rec->msg_plaintext;
967 			if (!msg->apply_bytes)
968 				reset_eval = true;
969 		}
970 		if (reset_eval) {
971 			psock->eval = __SK_NONE;
972 			if (psock->sk_redir) {
973 				sock_put(psock->sk_redir);
974 				psock->sk_redir = NULL;
975 			}
976 		}
977 		if (rec)
978 			goto more_data;
979 	}
980  out_err:
981 	sk_psock_put(sk, psock);
982 	return err;
983 }
984 
tls_sw_push_pending_record(struct sock * sk,int flags)985 static int tls_sw_push_pending_record(struct sock *sk, int flags)
986 {
987 	struct tls_context *tls_ctx = tls_get_ctx(sk);
988 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
989 	struct tls_rec *rec = ctx->open_rec;
990 	struct sk_msg *msg_pl;
991 	size_t copied;
992 
993 	if (!rec)
994 		return 0;
995 
996 	msg_pl = &rec->msg_plaintext;
997 	copied = msg_pl->sg.size;
998 	if (!copied)
999 		return 0;
1000 
1001 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
1002 				   &copied, flags);
1003 }
1004 
tls_sw_sendmsg_splice(struct sock * sk,struct msghdr * msg,struct sk_msg * msg_pl,size_t try_to_copy,ssize_t * copied)1005 static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg,
1006 				 struct sk_msg *msg_pl, size_t try_to_copy,
1007 				 ssize_t *copied)
1008 {
1009 	struct page *page = NULL, **pages = &page;
1010 
1011 	do {
1012 		ssize_t part;
1013 		size_t off;
1014 
1015 		part = iov_iter_extract_pages(&msg->msg_iter, &pages,
1016 					      try_to_copy, 1, 0, &off);
1017 		if (part <= 0)
1018 			return part ?: -EIO;
1019 
1020 		if (WARN_ON_ONCE(!sendpage_ok(page))) {
1021 			iov_iter_revert(&msg->msg_iter, part);
1022 			return -EIO;
1023 		}
1024 
1025 		sk_msg_page_add(msg_pl, page, part, off);
1026 		msg_pl->sg.copybreak = 0;
1027 		msg_pl->sg.curr = msg_pl->sg.end;
1028 		sk_mem_charge(sk, part);
1029 		*copied += part;
1030 		try_to_copy -= part;
1031 	} while (try_to_copy && !sk_msg_full(msg_pl));
1032 
1033 	return 0;
1034 }
1035 
tls_sw_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1036 static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg,
1037 				 size_t size)
1038 {
1039 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1040 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1041 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1042 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1043 	bool async_capable = ctx->async_capable;
1044 	unsigned char record_type = TLS_RECORD_TYPE_DATA;
1045 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
1046 	bool eor = !(msg->msg_flags & MSG_MORE);
1047 	size_t try_to_copy;
1048 	ssize_t copied = 0;
1049 	struct sk_msg *msg_pl, *msg_en;
1050 	struct tls_rec *rec;
1051 	int required_size;
1052 	int num_async = 0;
1053 	bool full_record;
1054 	int record_room;
1055 	int num_zc = 0;
1056 	int orig_size;
1057 	int ret = 0;
1058 
1059 	if (!eor && (msg->msg_flags & MSG_EOR))
1060 		return -EINVAL;
1061 
1062 	if (unlikely(msg->msg_controllen)) {
1063 		ret = tls_process_cmsg(sk, msg, &record_type);
1064 		if (ret) {
1065 			if (ret == -EINPROGRESS)
1066 				num_async++;
1067 			else if (ret != -EAGAIN)
1068 				goto end;
1069 		}
1070 	}
1071 
1072 	while (msg_data_left(msg)) {
1073 		if (sk->sk_err) {
1074 			ret = -sk->sk_err;
1075 			goto send_end;
1076 		}
1077 
1078 		if (ctx->open_rec)
1079 			rec = ctx->open_rec;
1080 		else
1081 			rec = ctx->open_rec = tls_get_rec(sk);
1082 		if (!rec) {
1083 			ret = -ENOMEM;
1084 			goto send_end;
1085 		}
1086 
1087 		msg_pl = &rec->msg_plaintext;
1088 		msg_en = &rec->msg_encrypted;
1089 
1090 		orig_size = msg_pl->sg.size;
1091 		full_record = false;
1092 		try_to_copy = msg_data_left(msg);
1093 		record_room = tls_ctx->tx_max_payload_len - msg_pl->sg.size;
1094 		if (try_to_copy >= record_room) {
1095 			try_to_copy = record_room;
1096 			full_record = true;
1097 		}
1098 
1099 		required_size = msg_pl->sg.size + try_to_copy +
1100 				prot->overhead_size;
1101 
1102 		if (!sk_stream_memory_free(sk))
1103 			goto wait_for_sndbuf;
1104 
1105 alloc_encrypted:
1106 		ret = tls_alloc_encrypted_msg(sk, required_size);
1107 		if (ret) {
1108 			if (ret != -ENOSPC)
1109 				goto wait_for_memory;
1110 
1111 			/* Adjust try_to_copy according to the amount that was
1112 			 * actually allocated. The difference is due
1113 			 * to max sg elements limit
1114 			 */
1115 			try_to_copy -= required_size - msg_en->sg.size;
1116 			full_record = true;
1117 		}
1118 
1119 		if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) {
1120 			ret = tls_sw_sendmsg_splice(sk, msg, msg_pl,
1121 						    try_to_copy, &copied);
1122 			if (ret < 0)
1123 				goto send_end;
1124 			tls_ctx->pending_open_record_frags = true;
1125 
1126 			if (sk_msg_full(msg_pl)) {
1127 				full_record = true;
1128 				sk_msg_trim(sk, msg_en,
1129 					    msg_pl->sg.size + prot->overhead_size);
1130 			}
1131 
1132 			if (full_record || eor)
1133 				goto copied;
1134 			continue;
1135 		}
1136 
1137 		if (!is_kvec && (full_record || eor) && !async_capable) {
1138 			u32 first = msg_pl->sg.end;
1139 
1140 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1141 							msg_pl, try_to_copy);
1142 			if (ret)
1143 				goto fallback_to_reg_send;
1144 
1145 			num_zc++;
1146 			copied += try_to_copy;
1147 
1148 			sk_msg_sg_copy_set(msg_pl, first);
1149 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1150 						  record_type, &copied,
1151 						  msg->msg_flags);
1152 			if (ret) {
1153 				if (ret == -EINPROGRESS)
1154 					num_async++;
1155 				else if (ret == -ENOMEM)
1156 					goto wait_for_memory;
1157 				else if (ctx->open_rec && ret == -ENOSPC) {
1158 					if (msg_pl->cork_bytes) {
1159 						ret = 0;
1160 						goto send_end;
1161 					}
1162 					goto rollback_iter;
1163 				} else if (ret != -EAGAIN)
1164 					goto send_end;
1165 			}
1166 
1167 			/* Transmit if any encryptions have completed */
1168 			if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1169 				cancel_delayed_work(&ctx->tx_work.work);
1170 				tls_tx_records(sk, msg->msg_flags);
1171 			}
1172 
1173 			continue;
1174 rollback_iter:
1175 			copied -= try_to_copy;
1176 			sk_msg_sg_copy_clear(msg_pl, first);
1177 			iov_iter_revert(&msg->msg_iter,
1178 					msg_pl->sg.size - orig_size);
1179 fallback_to_reg_send:
1180 			sk_msg_trim(sk, msg_pl, orig_size);
1181 		}
1182 
1183 		required_size = msg_pl->sg.size + try_to_copy;
1184 
1185 		ret = tls_clone_plaintext_msg(sk, required_size);
1186 		if (ret) {
1187 			if (ret != -ENOSPC)
1188 				goto send_end;
1189 
1190 			/* Adjust try_to_copy according to the amount that was
1191 			 * actually allocated. The difference is due
1192 			 * to max sg elements limit
1193 			 */
1194 			try_to_copy -= required_size - msg_pl->sg.size;
1195 			full_record = true;
1196 			sk_msg_trim(sk, msg_en,
1197 				    msg_pl->sg.size + prot->overhead_size);
1198 		}
1199 
1200 		if (try_to_copy) {
1201 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1202 						       msg_pl, try_to_copy);
1203 			if (ret < 0)
1204 				goto trim_sgl;
1205 		}
1206 
1207 		/* Open records defined only if successfully copied, otherwise
1208 		 * we would trim the sg but not reset the open record frags.
1209 		 */
1210 		tls_ctx->pending_open_record_frags = true;
1211 		copied += try_to_copy;
1212 copied:
1213 		if (full_record || eor) {
1214 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1215 						  record_type, &copied,
1216 						  msg->msg_flags);
1217 			if (ret) {
1218 				if (ret == -EINPROGRESS)
1219 					num_async++;
1220 				else if (ret == -ENOMEM)
1221 					goto wait_for_memory;
1222 				else if (ret != -EAGAIN) {
1223 					if (ret == -ENOSPC)
1224 						ret = 0;
1225 					goto send_end;
1226 				}
1227 			}
1228 
1229 			/* Transmit if any encryptions have completed */
1230 			if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1231 				cancel_delayed_work(&ctx->tx_work.work);
1232 				tls_tx_records(sk, msg->msg_flags);
1233 			}
1234 		}
1235 
1236 		continue;
1237 
1238 wait_for_sndbuf:
1239 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1240 wait_for_memory:
1241 		ret = sk_stream_wait_memory(sk, &timeo);
1242 		if (ret) {
1243 trim_sgl:
1244 			if (ctx->open_rec)
1245 				tls_trim_both_msgs(sk, orig_size);
1246 			goto send_end;
1247 		}
1248 
1249 		if (ctx->open_rec && msg_en->sg.size < required_size)
1250 			goto alloc_encrypted;
1251 	}
1252 
1253 send_end:
1254 	if (!num_async) {
1255 		goto end;
1256 	} else if (num_zc || eor) {
1257 		int err;
1258 
1259 		/* Wait for pending encryptions to get completed */
1260 		err = tls_encrypt_async_wait(ctx);
1261 		if (err) {
1262 			ret = err;
1263 			copied = 0;
1264 		}
1265 	}
1266 
1267 	/* Transmit if any encryptions have completed */
1268 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1269 		cancel_delayed_work(&ctx->tx_work.work);
1270 		tls_tx_records(sk, msg->msg_flags);
1271 	}
1272 
1273 end:
1274 	ret = sk_stream_error(sk, msg->msg_flags, ret);
1275 	return copied > 0 ? copied : ret;
1276 }
1277 
tls_sw_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1278 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1279 {
1280 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1281 	int ret;
1282 
1283 	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1284 			       MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | MSG_EOR |
1285 			       MSG_SENDPAGE_NOPOLICY))
1286 		return -EOPNOTSUPP;
1287 
1288 	ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1289 	if (ret)
1290 		return ret;
1291 	lock_sock(sk);
1292 	ret = tls_sw_sendmsg_locked(sk, msg, size);
1293 	release_sock(sk);
1294 	mutex_unlock(&tls_ctx->tx_lock);
1295 	return ret;
1296 }
1297 
1298 /*
1299  * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1300  */
tls_sw_splice_eof(struct socket * sock)1301 void tls_sw_splice_eof(struct socket *sock)
1302 {
1303 	struct sock *sk = sock->sk;
1304 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1305 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1306 	struct tls_rec *rec;
1307 	struct sk_msg *msg_pl;
1308 	ssize_t copied = 0;
1309 	bool retrying = false;
1310 	int ret = 0;
1311 
1312 	if (!ctx->open_rec)
1313 		return;
1314 
1315 	mutex_lock(&tls_ctx->tx_lock);
1316 	lock_sock(sk);
1317 
1318 retry:
1319 	/* same checks as in tls_sw_push_pending_record() */
1320 	rec = ctx->open_rec;
1321 	if (!rec)
1322 		goto unlock;
1323 
1324 	msg_pl = &rec->msg_plaintext;
1325 	if (msg_pl->sg.size == 0)
1326 		goto unlock;
1327 
1328 	/* Check the BPF advisor and perform transmission. */
1329 	ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1330 				  &copied, 0);
1331 	switch (ret) {
1332 	case 0:
1333 	case -EAGAIN:
1334 		if (retrying)
1335 			goto unlock;
1336 		retrying = true;
1337 		goto retry;
1338 	case -EINPROGRESS:
1339 		break;
1340 	default:
1341 		goto unlock;
1342 	}
1343 
1344 	/* Wait for pending encryptions to get completed */
1345 	if (tls_encrypt_async_wait(ctx))
1346 		goto unlock;
1347 
1348 	/* Transmit if any encryptions have completed */
1349 	if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1350 		cancel_delayed_work(&ctx->tx_work.work);
1351 		tls_tx_records(sk, 0);
1352 	}
1353 
1354 unlock:
1355 	release_sock(sk);
1356 	mutex_unlock(&tls_ctx->tx_lock);
1357 }
1358 
1359 static int
tls_rx_rec_wait(struct sock * sk,struct sk_psock * psock,bool nonblock,bool released)1360 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1361 		bool released)
1362 {
1363 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1364 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1365 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1366 	int ret = 0;
1367 	long timeo;
1368 
1369 	/* a rekey is pending, let userspace deal with it */
1370 	if (unlikely(ctx->key_update_pending))
1371 		return -EKEYEXPIRED;
1372 
1373 	timeo = sock_rcvtimeo(sk, nonblock);
1374 
1375 	while (!tls_strp_msg_ready(ctx)) {
1376 		if (!sk_psock_queue_empty(psock))
1377 			return 0;
1378 
1379 		if (sk->sk_err)
1380 			return sock_error(sk);
1381 
1382 		if (ret < 0)
1383 			return ret;
1384 
1385 		if (!skb_queue_empty(&sk->sk_receive_queue)) {
1386 			tls_strp_check_rcv(&ctx->strp);
1387 			if (tls_strp_msg_ready(ctx))
1388 				break;
1389 		}
1390 
1391 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1392 			return 0;
1393 
1394 		if (sock_flag(sk, SOCK_DONE))
1395 			return 0;
1396 
1397 		if (!timeo)
1398 			return -EAGAIN;
1399 
1400 		released = true;
1401 		add_wait_queue(sk_sleep(sk), &wait);
1402 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1403 		ret = sk_wait_event(sk, &timeo,
1404 				    tls_strp_msg_ready(ctx) ||
1405 				    !sk_psock_queue_empty(psock),
1406 				    &wait);
1407 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1408 		remove_wait_queue(sk_sleep(sk), &wait);
1409 
1410 		/* Handle signals */
1411 		if (signal_pending(current))
1412 			return sock_intr_errno(timeo);
1413 	}
1414 
1415 	if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
1416 		return tls_rx_rec_wait(sk, psock, nonblock, false);
1417 
1418 	return 1;
1419 }
1420 
tls_setup_from_iter(struct iov_iter * from,int length,int * pages_used,struct scatterlist * to,int to_max_pages)1421 static int tls_setup_from_iter(struct iov_iter *from,
1422 			       int length, int *pages_used,
1423 			       struct scatterlist *to,
1424 			       int to_max_pages)
1425 {
1426 	int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1427 	struct page *pages[MAX_SKB_FRAGS];
1428 	unsigned int size = 0;
1429 	ssize_t copied, use;
1430 	size_t offset;
1431 
1432 	while (length > 0) {
1433 		i = 0;
1434 		maxpages = to_max_pages - num_elem;
1435 		if (maxpages == 0) {
1436 			rc = -EFAULT;
1437 			goto out;
1438 		}
1439 		copied = iov_iter_get_pages2(from, pages,
1440 					    length,
1441 					    maxpages, &offset);
1442 		if (copied <= 0) {
1443 			rc = -EFAULT;
1444 			goto out;
1445 		}
1446 
1447 		length -= copied;
1448 		size += copied;
1449 		while (copied) {
1450 			use = min_t(int, copied, PAGE_SIZE - offset);
1451 
1452 			sg_set_page(&to[num_elem],
1453 				    pages[i], use, offset);
1454 			sg_unmark_end(&to[num_elem]);
1455 			/* We do not uncharge memory from this API */
1456 
1457 			offset = 0;
1458 			copied -= use;
1459 
1460 			i++;
1461 			num_elem++;
1462 		}
1463 	}
1464 	/* Mark the end in the last sg entry if newly added */
1465 	if (num_elem > *pages_used)
1466 		sg_mark_end(&to[num_elem - 1]);
1467 out:
1468 	if (rc)
1469 		iov_iter_revert(from, size);
1470 	*pages_used = num_elem;
1471 
1472 	return rc;
1473 }
1474 
1475 static struct sk_buff *
tls_alloc_clrtxt_skb(struct sock * sk,struct sk_buff * skb,unsigned int full_len)1476 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1477 		     unsigned int full_len)
1478 {
1479 	struct strp_msg *clr_rxm;
1480 	struct sk_buff *clr_skb;
1481 	int err;
1482 
1483 	clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1484 				       &err, sk->sk_allocation);
1485 	if (!clr_skb)
1486 		return NULL;
1487 
1488 	skb_copy_header(clr_skb, skb);
1489 	clr_skb->len = full_len;
1490 	clr_skb->data_len = full_len;
1491 
1492 	clr_rxm = strp_msg(clr_skb);
1493 	clr_rxm->offset = 0;
1494 
1495 	return clr_skb;
1496 }
1497 
1498 /* Decrypt handlers
1499  *
1500  * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1501  * They must transform the darg in/out argument are as follows:
1502  *       |          Input            |         Output
1503  * -------------------------------------------------------------------
1504  *    zc | Zero-copy decrypt allowed | Zero-copy performed
1505  * async | Async decrypt allowed     | Async crypto used / in progress
1506  *   skb |            *              | Output skb
1507  *
1508  * If ZC decryption was performed darg.skb will point to the input skb.
1509  */
1510 
1511 /* This function decrypts the input skb into either out_iov or in out_sg
1512  * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1513  * zero-copy mode needs to be tried or not. With zero-copy mode, either
1514  * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1515  * NULL, then the decryption happens inside skb buffers itself, i.e.
1516  * zero-copy gets disabled and 'darg->zc' is updated.
1517  */
tls_decrypt_sg(struct sock * sk,struct iov_iter * out_iov,struct scatterlist * out_sg,struct tls_decrypt_arg * darg)1518 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1519 			  struct scatterlist *out_sg,
1520 			  struct tls_decrypt_arg *darg)
1521 {
1522 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1523 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1524 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1525 	int n_sgin, n_sgout, aead_size, err, pages = 0;
1526 	struct sk_buff *skb = tls_strp_msg(ctx);
1527 	const struct strp_msg *rxm = strp_msg(skb);
1528 	const struct tls_msg *tlm = tls_msg(skb);
1529 	struct aead_request *aead_req;
1530 	struct scatterlist *sgin = NULL;
1531 	struct scatterlist *sgout = NULL;
1532 	const int data_len = rxm->full_len - prot->overhead_size;
1533 	int tail_pages = !!prot->tail_size;
1534 	struct tls_decrypt_ctx *dctx;
1535 	struct sk_buff *clear_skb;
1536 	int iv_offset = 0;
1537 	u8 *mem;
1538 
1539 	n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1540 			 rxm->full_len - prot->prepend_size);
1541 	if (n_sgin < 1)
1542 		return n_sgin ?: -EBADMSG;
1543 
1544 	if (darg->zc && (out_iov || out_sg)) {
1545 		clear_skb = NULL;
1546 
1547 		if (out_iov)
1548 			n_sgout = 1 + tail_pages +
1549 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1550 		else
1551 			n_sgout = sg_nents(out_sg);
1552 	} else {
1553 		darg->zc = false;
1554 
1555 		clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1556 		if (!clear_skb)
1557 			return -ENOMEM;
1558 
1559 		n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1560 	}
1561 
1562 	/* Increment to accommodate AAD */
1563 	n_sgin = n_sgin + 1;
1564 
1565 	/* Allocate a single block of memory which contains
1566 	 *   aead_req || tls_decrypt_ctx.
1567 	 * Both structs are variable length.
1568 	 */
1569 	aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1570 	aead_size = ALIGN(aead_size, __alignof__(*dctx));
1571 	mem = kmalloc(aead_size + struct_size(dctx, sg, size_add(n_sgin, n_sgout)),
1572 		      sk->sk_allocation);
1573 	if (!mem) {
1574 		err = -ENOMEM;
1575 		goto exit_free_skb;
1576 	}
1577 
1578 	/* Segment the allocated memory */
1579 	aead_req = (struct aead_request *)mem;
1580 	dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1581 	dctx->sk = sk;
1582 	sgin = &dctx->sg[0];
1583 	sgout = &dctx->sg[n_sgin];
1584 
1585 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
1586 	switch (prot->cipher_type) {
1587 	case TLS_CIPHER_AES_CCM_128:
1588 		dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1589 		iv_offset = 1;
1590 		break;
1591 	case TLS_CIPHER_SM4_CCM:
1592 		dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1593 		iv_offset = 1;
1594 		break;
1595 	}
1596 
1597 	/* Prepare IV */
1598 	if (prot->version == TLS_1_3_VERSION ||
1599 	    prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1600 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1601 		       prot->iv_size + prot->salt_size);
1602 	} else {
1603 		err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1604 				    &dctx->iv[iv_offset] + prot->salt_size,
1605 				    prot->iv_size);
1606 		if (err < 0)
1607 			goto exit_free;
1608 		memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1609 	}
1610 	tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1611 
1612 	/* Prepare AAD */
1613 	tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1614 		     prot->tail_size,
1615 		     tls_ctx->rx.rec_seq, tlm->control, prot);
1616 
1617 	/* Prepare sgin */
1618 	sg_init_table(sgin, n_sgin);
1619 	sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1620 	err = skb_to_sgvec(skb, &sgin[1],
1621 			   rxm->offset + prot->prepend_size,
1622 			   rxm->full_len - prot->prepend_size);
1623 	if (err < 0)
1624 		goto exit_free;
1625 
1626 	if (clear_skb) {
1627 		sg_init_table(sgout, n_sgout);
1628 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1629 
1630 		err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1631 				   data_len + prot->tail_size);
1632 		if (err < 0)
1633 			goto exit_free;
1634 	} else if (out_iov) {
1635 		sg_init_table(sgout, n_sgout);
1636 		sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1637 
1638 		err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1639 					  (n_sgout - 1 - tail_pages));
1640 		if (err < 0)
1641 			goto exit_free_pages;
1642 
1643 		if (prot->tail_size) {
1644 			sg_unmark_end(&sgout[pages]);
1645 			sg_set_buf(&sgout[pages + 1], &dctx->tail,
1646 				   prot->tail_size);
1647 			sg_mark_end(&sgout[pages + 1]);
1648 		}
1649 	} else if (out_sg) {
1650 		memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1651 	}
1652 	dctx->free_sgout = !!pages;
1653 
1654 	/* Prepare and submit AEAD request */
1655 	err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1656 				data_len + prot->tail_size, aead_req, darg);
1657 	if (err) {
1658 		if (darg->async_done)
1659 			goto exit_free_skb;
1660 		goto exit_free_pages;
1661 	}
1662 
1663 	darg->skb = clear_skb ?: tls_strp_msg(ctx);
1664 	clear_skb = NULL;
1665 
1666 	if (unlikely(darg->async)) {
1667 		err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1668 		if (err) {
1669 			err = tls_decrypt_async_wait(ctx);
1670 			darg->async = false;
1671 		}
1672 		return err;
1673 	}
1674 
1675 	if (unlikely(darg->async_done))
1676 		return 0;
1677 
1678 	if (prot->tail_size)
1679 		darg->tail = dctx->tail;
1680 
1681 exit_free_pages:
1682 	/* Release the pages in case iov was mapped to pages */
1683 	for (; pages > 0; pages--)
1684 		put_page(sg_page(&sgout[pages]));
1685 exit_free:
1686 	kfree(mem);
1687 exit_free_skb:
1688 	consume_skb(clear_skb);
1689 	return err;
1690 }
1691 
1692 static int
tls_decrypt_sw(struct sock * sk,struct tls_context * tls_ctx,struct msghdr * msg,struct tls_decrypt_arg * darg)1693 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1694 	       struct msghdr *msg, struct tls_decrypt_arg *darg)
1695 {
1696 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1697 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1698 	struct strp_msg *rxm;
1699 	int pad, err;
1700 
1701 	err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1702 	if (err < 0) {
1703 		if (err == -EBADMSG)
1704 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1705 		return err;
1706 	}
1707 	/* keep going even for ->async, the code below is TLS 1.3 */
1708 
1709 	/* If opportunistic TLS 1.3 ZC failed retry without ZC */
1710 	if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1711 		     darg->tail != TLS_RECORD_TYPE_DATA)) {
1712 		darg->zc = false;
1713 		if (!darg->tail)
1714 			TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1715 		TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1716 		return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1717 	}
1718 
1719 	pad = tls_padding_length(prot, darg->skb, darg);
1720 	if (pad < 0) {
1721 		if (darg->skb != tls_strp_msg(ctx))
1722 			consume_skb(darg->skb);
1723 		return pad;
1724 	}
1725 
1726 	rxm = strp_msg(darg->skb);
1727 	rxm->full_len -= pad;
1728 
1729 	return 0;
1730 }
1731 
1732 static int
tls_decrypt_device(struct sock * sk,struct msghdr * msg,struct tls_context * tls_ctx,struct tls_decrypt_arg * darg)1733 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1734 		   struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1735 {
1736 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1737 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1738 	struct strp_msg *rxm;
1739 	int pad, err;
1740 
1741 	if (tls_ctx->rx_conf != TLS_HW)
1742 		return 0;
1743 
1744 	err = tls_device_decrypted(sk, tls_ctx);
1745 	if (err <= 0)
1746 		return err;
1747 
1748 	pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1749 	if (pad < 0)
1750 		return pad;
1751 
1752 	darg->async = false;
1753 	darg->skb = tls_strp_msg(ctx);
1754 	/* ->zc downgrade check, in case TLS 1.3 gets here */
1755 	darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1756 		      tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1757 
1758 	rxm = strp_msg(darg->skb);
1759 	rxm->full_len -= pad;
1760 
1761 	if (!darg->zc) {
1762 		/* Non-ZC case needs a real skb */
1763 		darg->skb = tls_strp_msg_detach(ctx);
1764 		if (!darg->skb)
1765 			return -ENOMEM;
1766 	} else {
1767 		unsigned int off, len;
1768 
1769 		/* In ZC case nobody cares about the output skb.
1770 		 * Just copy the data here. Note the skb is not fully trimmed.
1771 		 */
1772 		off = rxm->offset + prot->prepend_size;
1773 		len = rxm->full_len - prot->overhead_size;
1774 
1775 		err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1776 		if (err)
1777 			return err;
1778 	}
1779 	return 1;
1780 }
1781 
tls_check_pending_rekey(struct sock * sk,struct tls_context * ctx,struct sk_buff * skb)1782 static int tls_check_pending_rekey(struct sock *sk, struct tls_context *ctx,
1783 				   struct sk_buff *skb)
1784 {
1785 	const struct strp_msg *rxm = strp_msg(skb);
1786 	const struct tls_msg *tlm = tls_msg(skb);
1787 	char hs_type;
1788 	int err;
1789 
1790 	if (likely(tlm->control != TLS_RECORD_TYPE_HANDSHAKE))
1791 		return 0;
1792 
1793 	if (rxm->full_len < 1)
1794 		return 0;
1795 
1796 	err = skb_copy_bits(skb, rxm->offset, &hs_type, 1);
1797 	if (err < 0) {
1798 		DEBUG_NET_WARN_ON_ONCE(1);
1799 		return err;
1800 	}
1801 
1802 	if (hs_type == TLS_HANDSHAKE_KEYUPDATE) {
1803 		struct tls_sw_context_rx *rx_ctx = ctx->priv_ctx_rx;
1804 
1805 		WRITE_ONCE(rx_ctx->key_update_pending, true);
1806 		TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXREKEYRECEIVED);
1807 	}
1808 
1809 	return 0;
1810 }
1811 
tls_rx_one_record(struct sock * sk,struct msghdr * msg,struct tls_decrypt_arg * darg)1812 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1813 			     struct tls_decrypt_arg *darg)
1814 {
1815 	struct tls_context *tls_ctx = tls_get_ctx(sk);
1816 	struct tls_prot_info *prot = &tls_ctx->prot_info;
1817 	struct strp_msg *rxm;
1818 	int err;
1819 
1820 	err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1821 	if (!err)
1822 		err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1823 	if (err < 0)
1824 		return err;
1825 
1826 	rxm = strp_msg(darg->skb);
1827 	rxm->offset += prot->prepend_size;
1828 	rxm->full_len -= prot->overhead_size;
1829 	tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1830 
1831 	return tls_check_pending_rekey(sk, tls_ctx, darg->skb);
1832 }
1833 
decrypt_skb(struct sock * sk,struct scatterlist * sgout)1834 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1835 {
1836 	struct tls_decrypt_arg darg = { .zc = true, };
1837 
1838 	return tls_decrypt_sg(sk, NULL, sgout, &darg);
1839 }
1840 
1841 /* All records returned from a recvmsg() call must have the same type.
1842  * 0 is not a valid content type. Use it as "no type reported, yet".
1843  */
tls_record_content_type(struct msghdr * msg,struct tls_msg * tlm,u8 * control)1844 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1845 				   u8 *control)
1846 {
1847 	int err;
1848 
1849 	if (!*control) {
1850 		*control = tlm->control;
1851 		if (!*control)
1852 			return -EBADMSG;
1853 
1854 		err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1855 			       sizeof(*control), control);
1856 		if (*control != TLS_RECORD_TYPE_DATA) {
1857 			if (err || msg->msg_flags & MSG_CTRUNC)
1858 				return -EIO;
1859 		}
1860 	} else if (*control != tlm->control) {
1861 		return 0;
1862 	}
1863 
1864 	return 1;
1865 }
1866 
tls_rx_rec_done(struct tls_sw_context_rx * ctx)1867 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1868 {
1869 	tls_strp_msg_done(&ctx->strp);
1870 }
1871 
1872 /* This function traverses the rx_list in tls receive context to copies the
1873  * decrypted records into the buffer provided by caller zero copy is not
1874  * true. Further, the records are removed from the rx_list if it is not a peek
1875  * case and the record has been consumed completely.
1876  */
process_rx_list(struct tls_sw_context_rx * ctx,struct msghdr * msg,u8 * control,size_t skip,size_t len,bool is_peek,bool * more)1877 static int process_rx_list(struct tls_sw_context_rx *ctx,
1878 			   struct msghdr *msg,
1879 			   u8 *control,
1880 			   size_t skip,
1881 			   size_t len,
1882 			   bool is_peek,
1883 			   bool *more)
1884 {
1885 	struct sk_buff *skb = skb_peek(&ctx->rx_list);
1886 	struct tls_msg *tlm;
1887 	ssize_t copied = 0;
1888 	int err;
1889 
1890 	while (skip && skb) {
1891 		struct strp_msg *rxm = strp_msg(skb);
1892 		tlm = tls_msg(skb);
1893 
1894 		err = tls_record_content_type(msg, tlm, control);
1895 		if (err <= 0)
1896 			goto more;
1897 
1898 		if (skip < rxm->full_len)
1899 			break;
1900 
1901 		skip = skip - rxm->full_len;
1902 		skb = skb_peek_next(skb, &ctx->rx_list);
1903 	}
1904 
1905 	while (len && skb) {
1906 		struct sk_buff *next_skb;
1907 		struct strp_msg *rxm = strp_msg(skb);
1908 		int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1909 
1910 		tlm = tls_msg(skb);
1911 
1912 		err = tls_record_content_type(msg, tlm, control);
1913 		if (err <= 0)
1914 			goto more;
1915 
1916 		err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1917 					    msg, chunk);
1918 		if (err < 0)
1919 			goto more;
1920 
1921 		len = len - chunk;
1922 		copied = copied + chunk;
1923 
1924 		/* Consume the data from record if it is non-peek case*/
1925 		if (!is_peek) {
1926 			rxm->offset = rxm->offset + chunk;
1927 			rxm->full_len = rxm->full_len - chunk;
1928 
1929 			/* Return if there is unconsumed data in the record */
1930 			if (rxm->full_len - skip)
1931 				break;
1932 		}
1933 
1934 		/* The remaining skip-bytes must lie in 1st record in rx_list.
1935 		 * So from the 2nd record, 'skip' should be 0.
1936 		 */
1937 		skip = 0;
1938 
1939 		if (msg)
1940 			msg->msg_flags |= MSG_EOR;
1941 
1942 		next_skb = skb_peek_next(skb, &ctx->rx_list);
1943 
1944 		if (!is_peek) {
1945 			__skb_unlink(skb, &ctx->rx_list);
1946 			consume_skb(skb);
1947 		}
1948 
1949 		skb = next_skb;
1950 	}
1951 	err = 0;
1952 
1953 out:
1954 	return copied ? : err;
1955 more:
1956 	if (more)
1957 		*more = true;
1958 	goto out;
1959 }
1960 
1961 static bool
tls_read_flush_backlog(struct sock * sk,struct tls_prot_info * prot,size_t len_left,size_t decrypted,ssize_t done,size_t * flushed_at)1962 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1963 		       size_t len_left, size_t decrypted, ssize_t done,
1964 		       size_t *flushed_at)
1965 {
1966 	size_t max_rec;
1967 
1968 	if (len_left <= decrypted)
1969 		return false;
1970 
1971 	max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1972 	if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1973 		return false;
1974 
1975 	*flushed_at = done;
1976 	return sk_flush_backlog(sk);
1977 }
1978 
tls_rx_reader_acquire(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)1979 static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
1980 				 bool nonblock)
1981 {
1982 	long timeo;
1983 	int ret;
1984 
1985 	timeo = sock_rcvtimeo(sk, nonblock);
1986 
1987 	while (unlikely(ctx->reader_present)) {
1988 		DEFINE_WAIT_FUNC(wait, woken_wake_function);
1989 
1990 		ctx->reader_contended = 1;
1991 
1992 		add_wait_queue(&ctx->wq, &wait);
1993 		ret = sk_wait_event(sk, &timeo,
1994 				    !READ_ONCE(ctx->reader_present), &wait);
1995 		remove_wait_queue(&ctx->wq, &wait);
1996 
1997 		if (timeo <= 0)
1998 			return -EAGAIN;
1999 		if (signal_pending(current))
2000 			return sock_intr_errno(timeo);
2001 		if (ret < 0)
2002 			return ret;
2003 	}
2004 
2005 	WRITE_ONCE(ctx->reader_present, 1);
2006 
2007 	return 0;
2008 }
2009 
tls_rx_reader_lock(struct sock * sk,struct tls_sw_context_rx * ctx,bool nonblock)2010 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
2011 			      bool nonblock)
2012 {
2013 	int err;
2014 
2015 	lock_sock(sk);
2016 	err = tls_rx_reader_acquire(sk, ctx, nonblock);
2017 	if (err)
2018 		release_sock(sk);
2019 	return err;
2020 }
2021 
tls_rx_reader_release(struct sock * sk,struct tls_sw_context_rx * ctx)2022 static void tls_rx_reader_release(struct sock *sk, struct tls_sw_context_rx *ctx)
2023 {
2024 	if (unlikely(ctx->reader_contended)) {
2025 		if (wq_has_sleeper(&ctx->wq))
2026 			wake_up(&ctx->wq);
2027 		else
2028 			ctx->reader_contended = 0;
2029 
2030 		WARN_ON_ONCE(!ctx->reader_present);
2031 	}
2032 
2033 	WRITE_ONCE(ctx->reader_present, 0);
2034 }
2035 
tls_rx_reader_unlock(struct sock * sk,struct tls_sw_context_rx * ctx)2036 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
2037 {
2038 	tls_rx_reader_release(sk, ctx);
2039 	release_sock(sk);
2040 }
2041 
tls_sw_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2042 int tls_sw_recvmsg(struct sock *sk,
2043 		   struct msghdr *msg,
2044 		   size_t len,
2045 		   int flags,
2046 		   int *addr_len)
2047 {
2048 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2049 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2050 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2051 	ssize_t decrypted = 0, async_copy_bytes = 0;
2052 	struct sk_psock *psock;
2053 	unsigned char control = 0;
2054 	size_t flushed_at = 0;
2055 	struct strp_msg *rxm;
2056 	struct tls_msg *tlm;
2057 	ssize_t copied = 0;
2058 	ssize_t peeked = 0;
2059 	bool async = false;
2060 	int target, err;
2061 	bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2062 	bool is_peek = flags & MSG_PEEK;
2063 	bool rx_more = false;
2064 	bool released = true;
2065 	bool bpf_strp_enabled;
2066 	bool zc_capable;
2067 
2068 	if (unlikely(flags & MSG_ERRQUEUE))
2069 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2070 
2071 	err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2072 	if (err < 0)
2073 		return err;
2074 	psock = sk_psock_get(sk);
2075 	bpf_strp_enabled = sk_psock_strp_enabled(psock);
2076 
2077 	/* If crypto failed the connection is broken */
2078 	err = ctx->async_wait.err;
2079 	if (err)
2080 		goto end;
2081 
2082 	/* Process pending decrypted records. It must be non-zero-copy */
2083 	err = process_rx_list(ctx, msg, &control, 0, len, is_peek, &rx_more);
2084 	if (err < 0)
2085 		goto end;
2086 
2087 	/* process_rx_list() will set @control if it processed any records */
2088 	copied = err;
2089 	if (len <= copied || rx_more ||
2090 	    (control && control != TLS_RECORD_TYPE_DATA))
2091 		goto end;
2092 
2093 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2094 	len = len - copied;
2095 
2096 	zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2097 		ctx->zc_capable;
2098 	decrypted = 0;
2099 	while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2100 		struct tls_decrypt_arg darg;
2101 		int to_decrypt, chunk;
2102 
2103 		err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2104 				      released);
2105 		if (err <= 0) {
2106 			if (psock) {
2107 				chunk = sk_msg_recvmsg(sk, psock, msg, len,
2108 						       flags);
2109 				if (chunk > 0) {
2110 					decrypted += chunk;
2111 					len -= chunk;
2112 					continue;
2113 				}
2114 			}
2115 			goto recv_end;
2116 		}
2117 
2118 		memset(&darg.inargs, 0, sizeof(darg.inargs));
2119 
2120 		rxm = strp_msg(tls_strp_msg(ctx));
2121 		tlm = tls_msg(tls_strp_msg(ctx));
2122 
2123 		to_decrypt = rxm->full_len - prot->overhead_size;
2124 
2125 		if (zc_capable && to_decrypt <= len &&
2126 		    tlm->control == TLS_RECORD_TYPE_DATA)
2127 			darg.zc = true;
2128 
2129 		/* Do not use async mode if record is non-data */
2130 		if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2131 			darg.async = ctx->async_capable;
2132 		else
2133 			darg.async = false;
2134 
2135 		err = tls_rx_one_record(sk, msg, &darg);
2136 		if (err < 0) {
2137 			tls_err_abort(sk, -EBADMSG);
2138 			goto recv_end;
2139 		}
2140 
2141 		async |= darg.async;
2142 
2143 		/* If the type of records being processed is not known yet,
2144 		 * set it to record type just dequeued. If it is already known,
2145 		 * but does not match the record type just dequeued, go to end.
2146 		 * We always get record type here since for tls1.2, record type
2147 		 * is known just after record is dequeued from stream parser.
2148 		 * For tls1.3, we disable async.
2149 		 */
2150 		err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2151 		if (err <= 0) {
2152 			DEBUG_NET_WARN_ON_ONCE(darg.zc);
2153 			tls_rx_rec_done(ctx);
2154 put_on_rx_list_err:
2155 			__skb_queue_tail(&ctx->rx_list, darg.skb);
2156 			goto recv_end;
2157 		}
2158 
2159 		/* periodically flush backlog, and feed strparser */
2160 		released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2161 						  decrypted + copied,
2162 						  &flushed_at);
2163 
2164 		/* TLS 1.3 may have updated the length by more than overhead */
2165 		rxm = strp_msg(darg.skb);
2166 		chunk = rxm->full_len;
2167 		tls_rx_rec_done(ctx);
2168 
2169 		if (!darg.zc) {
2170 			bool partially_consumed = chunk > len;
2171 			struct sk_buff *skb = darg.skb;
2172 
2173 			DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2174 
2175 			if (async) {
2176 				/* TLS 1.2-only, to_decrypt must be text len */
2177 				chunk = min_t(int, to_decrypt, len);
2178 				async_copy_bytes += chunk;
2179 put_on_rx_list:
2180 				decrypted += chunk;
2181 				len -= chunk;
2182 				__skb_queue_tail(&ctx->rx_list, skb);
2183 				if (unlikely(control != TLS_RECORD_TYPE_DATA))
2184 					break;
2185 				continue;
2186 			}
2187 
2188 			if (bpf_strp_enabled) {
2189 				released = true;
2190 				err = sk_psock_tls_strp_read(psock, skb);
2191 				if (err != __SK_PASS) {
2192 					rxm->offset = rxm->offset + rxm->full_len;
2193 					rxm->full_len = 0;
2194 					if (err == __SK_DROP)
2195 						consume_skb(skb);
2196 					continue;
2197 				}
2198 			}
2199 
2200 			if (partially_consumed)
2201 				chunk = len;
2202 
2203 			err = skb_copy_datagram_msg(skb, rxm->offset,
2204 						    msg, chunk);
2205 			if (err < 0)
2206 				goto put_on_rx_list_err;
2207 
2208 			if (is_peek) {
2209 				peeked += chunk;
2210 				goto put_on_rx_list;
2211 			}
2212 
2213 			if (partially_consumed) {
2214 				rxm->offset += chunk;
2215 				rxm->full_len -= chunk;
2216 				goto put_on_rx_list;
2217 			}
2218 
2219 			consume_skb(skb);
2220 		}
2221 
2222 		decrypted += chunk;
2223 		len -= chunk;
2224 
2225 		/* Return full control message to userspace before trying
2226 		 * to parse another message type
2227 		 */
2228 		msg->msg_flags |= MSG_EOR;
2229 		if (control != TLS_RECORD_TYPE_DATA)
2230 			break;
2231 	}
2232 
2233 recv_end:
2234 	if (async) {
2235 		int ret;
2236 
2237 		/* Wait for all previously submitted records to be decrypted */
2238 		ret = tls_decrypt_async_wait(ctx);
2239 
2240 		if (ret) {
2241 			if (err >= 0 || err == -EINPROGRESS)
2242 				err = ret;
2243 			goto end;
2244 		}
2245 
2246 		/* Drain records from the rx_list & copy if required */
2247 		if (is_peek)
2248 			err = process_rx_list(ctx, msg, &control, copied + peeked,
2249 					      decrypted - peeked, is_peek, NULL);
2250 		else
2251 			err = process_rx_list(ctx, msg, &control, 0,
2252 					      async_copy_bytes, is_peek, NULL);
2253 
2254 		/* we could have copied less than we wanted, and possibly nothing */
2255 		decrypted += max(err, 0) - async_copy_bytes;
2256 	}
2257 
2258 	copied += decrypted;
2259 
2260 end:
2261 	tls_rx_reader_unlock(sk, ctx);
2262 	if (psock)
2263 		sk_psock_put(sk, psock);
2264 	return copied ? : err;
2265 }
2266 
tls_sw_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)2267 ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos,
2268 			   struct pipe_inode_info *pipe,
2269 			   size_t len, unsigned int flags)
2270 {
2271 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2272 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2273 	struct strp_msg *rxm = NULL;
2274 	struct sock *sk = sock->sk;
2275 	struct tls_msg *tlm;
2276 	struct sk_buff *skb;
2277 	ssize_t copied = 0;
2278 	int chunk;
2279 	int err;
2280 
2281 	err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2282 	if (err < 0)
2283 		return err;
2284 
2285 	if (!skb_queue_empty(&ctx->rx_list)) {
2286 		skb = __skb_dequeue(&ctx->rx_list);
2287 	} else {
2288 		struct tls_decrypt_arg darg;
2289 
2290 		err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2291 				      true);
2292 		if (err <= 0)
2293 			goto splice_read_end;
2294 
2295 		memset(&darg.inargs, 0, sizeof(darg.inargs));
2296 
2297 		err = tls_rx_one_record(sk, NULL, &darg);
2298 		if (err < 0) {
2299 			tls_err_abort(sk, -EBADMSG);
2300 			goto splice_read_end;
2301 		}
2302 
2303 		tls_rx_rec_done(ctx);
2304 		skb = darg.skb;
2305 	}
2306 
2307 	rxm = strp_msg(skb);
2308 	tlm = tls_msg(skb);
2309 
2310 	/* splice does not support reading control messages */
2311 	if (tlm->control != TLS_RECORD_TYPE_DATA) {
2312 		err = -EINVAL;
2313 		goto splice_requeue;
2314 	}
2315 
2316 	chunk = min_t(unsigned int, rxm->full_len, len);
2317 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2318 	if (copied < 0)
2319 		goto splice_requeue;
2320 
2321 	if (chunk < rxm->full_len) {
2322 		rxm->offset += len;
2323 		rxm->full_len -= len;
2324 		goto splice_requeue;
2325 	}
2326 
2327 	consume_skb(skb);
2328 
2329 splice_read_end:
2330 	tls_rx_reader_unlock(sk, ctx);
2331 	return copied ? : err;
2332 
2333 splice_requeue:
2334 	__skb_queue_head(&ctx->rx_list, skb);
2335 	goto splice_read_end;
2336 }
2337 
tls_sw_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t read_actor)2338 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc,
2339 		     sk_read_actor_t read_actor)
2340 {
2341 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2342 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2343 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2344 	struct strp_msg *rxm = NULL;
2345 	struct sk_buff *skb = NULL;
2346 	struct sk_psock *psock;
2347 	size_t flushed_at = 0;
2348 	bool released = true;
2349 	struct tls_msg *tlm;
2350 	ssize_t copied = 0;
2351 	ssize_t decrypted;
2352 	int err, used;
2353 
2354 	psock = sk_psock_get(sk);
2355 	if (psock) {
2356 		sk_psock_put(sk, psock);
2357 		return -EINVAL;
2358 	}
2359 	err = tls_rx_reader_acquire(sk, ctx, true);
2360 	if (err < 0)
2361 		return err;
2362 
2363 	/* If crypto failed the connection is broken */
2364 	err = ctx->async_wait.err;
2365 	if (err)
2366 		goto read_sock_end;
2367 
2368 	decrypted = 0;
2369 	do {
2370 		if (!skb_queue_empty(&ctx->rx_list)) {
2371 			skb = __skb_dequeue(&ctx->rx_list);
2372 			rxm = strp_msg(skb);
2373 			tlm = tls_msg(skb);
2374 		} else {
2375 			struct tls_decrypt_arg darg;
2376 
2377 			err = tls_rx_rec_wait(sk, NULL, true, released);
2378 			if (err <= 0)
2379 				goto read_sock_end;
2380 
2381 			memset(&darg.inargs, 0, sizeof(darg.inargs));
2382 
2383 			err = tls_rx_one_record(sk, NULL, &darg);
2384 			if (err < 0) {
2385 				tls_err_abort(sk, -EBADMSG);
2386 				goto read_sock_end;
2387 			}
2388 
2389 			released = tls_read_flush_backlog(sk, prot, INT_MAX,
2390 							  0, decrypted,
2391 							  &flushed_at);
2392 			skb = darg.skb;
2393 			rxm = strp_msg(skb);
2394 			tlm = tls_msg(skb);
2395 			decrypted += rxm->full_len;
2396 
2397 			tls_rx_rec_done(ctx);
2398 		}
2399 
2400 		/* read_sock does not support reading control messages */
2401 		if (tlm->control != TLS_RECORD_TYPE_DATA) {
2402 			err = -EINVAL;
2403 			goto read_sock_requeue;
2404 		}
2405 
2406 		used = read_actor(desc, skb, rxm->offset, rxm->full_len);
2407 		if (used <= 0) {
2408 			if (!copied)
2409 				err = used;
2410 			goto read_sock_requeue;
2411 		}
2412 		copied += used;
2413 		if (used < rxm->full_len) {
2414 			rxm->offset += used;
2415 			rxm->full_len -= used;
2416 			if (!desc->count)
2417 				goto read_sock_requeue;
2418 		} else {
2419 			consume_skb(skb);
2420 			if (!desc->count)
2421 				skb = NULL;
2422 		}
2423 	} while (skb);
2424 
2425 read_sock_end:
2426 	tls_rx_reader_release(sk, ctx);
2427 	return copied ? : err;
2428 
2429 read_sock_requeue:
2430 	__skb_queue_head(&ctx->rx_list, skb);
2431 	goto read_sock_end;
2432 }
2433 
tls_sw_sock_is_readable(struct sock * sk)2434 bool tls_sw_sock_is_readable(struct sock *sk)
2435 {
2436 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2437 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2438 	bool ingress_empty = true;
2439 	struct sk_psock *psock;
2440 
2441 	rcu_read_lock();
2442 	psock = sk_psock(sk);
2443 	if (psock)
2444 		ingress_empty = list_empty(&psock->ingress_msg);
2445 	rcu_read_unlock();
2446 
2447 	return !ingress_empty || tls_strp_msg_ready(ctx) ||
2448 		!skb_queue_empty(&ctx->rx_list);
2449 }
2450 
tls_rx_msg_size(struct tls_strparser * strp,struct sk_buff * skb)2451 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2452 {
2453 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2454 	struct tls_prot_info *prot = &tls_ctx->prot_info;
2455 	char header[TLS_HEADER_SIZE + TLS_MAX_IV_SIZE];
2456 	size_t cipher_overhead;
2457 	size_t data_len = 0;
2458 	int ret;
2459 
2460 	/* Verify that we have a full TLS header, or wait for more data */
2461 	if (strp->stm.offset + prot->prepend_size > skb->len)
2462 		return 0;
2463 
2464 	/* Sanity-check size of on-stack buffer. */
2465 	if (WARN_ON(prot->prepend_size > sizeof(header))) {
2466 		ret = -EINVAL;
2467 		goto read_failure;
2468 	}
2469 
2470 	/* Linearize header to local buffer */
2471 	ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2472 	if (ret < 0)
2473 		goto read_failure;
2474 
2475 	strp->mark = header[0];
2476 
2477 	data_len = ((header[4] & 0xFF) | (header[3] << 8));
2478 
2479 	cipher_overhead = prot->tag_size;
2480 	if (prot->version != TLS_1_3_VERSION &&
2481 	    prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2482 		cipher_overhead += prot->iv_size;
2483 
2484 	if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2485 	    prot->tail_size) {
2486 		ret = -EMSGSIZE;
2487 		goto read_failure;
2488 	}
2489 	if (data_len < cipher_overhead) {
2490 		ret = -EBADMSG;
2491 		goto read_failure;
2492 	}
2493 
2494 	/* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2495 	if (header[1] != TLS_1_2_VERSION_MINOR ||
2496 	    header[2] != TLS_1_2_VERSION_MAJOR) {
2497 		ret = -EINVAL;
2498 		goto read_failure;
2499 	}
2500 
2501 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2502 				     TCP_SKB_CB(skb)->seq + strp->stm.offset);
2503 	return data_len + TLS_HEADER_SIZE;
2504 
2505 read_failure:
2506 	tls_strp_abort_strp(strp, ret);
2507 	return ret;
2508 }
2509 
tls_rx_msg_ready(struct tls_strparser * strp)2510 void tls_rx_msg_ready(struct tls_strparser *strp)
2511 {
2512 	struct tls_sw_context_rx *ctx;
2513 
2514 	ctx = container_of(strp, struct tls_sw_context_rx, strp);
2515 	ctx->saved_data_ready(strp->sk);
2516 }
2517 
tls_data_ready(struct sock * sk)2518 static void tls_data_ready(struct sock *sk)
2519 {
2520 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2521 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2522 	struct sk_psock *psock;
2523 	gfp_t alloc_save;
2524 
2525 	trace_sk_data_ready(sk);
2526 
2527 	alloc_save = sk->sk_allocation;
2528 	sk->sk_allocation = GFP_ATOMIC;
2529 	tls_strp_data_ready(&ctx->strp);
2530 	sk->sk_allocation = alloc_save;
2531 
2532 	psock = sk_psock_get(sk);
2533 	if (psock) {
2534 		if (!list_empty(&psock->ingress_msg))
2535 			ctx->saved_data_ready(sk);
2536 		sk_psock_put(sk, psock);
2537 	}
2538 }
2539 
tls_sw_cancel_work_tx(struct tls_context * tls_ctx)2540 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2541 {
2542 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2543 
2544 	set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2545 	set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2546 	disable_delayed_work_sync(&ctx->tx_work.work);
2547 }
2548 
tls_sw_release_resources_tx(struct sock * sk)2549 void tls_sw_release_resources_tx(struct sock *sk)
2550 {
2551 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2552 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2553 	struct tls_rec *rec, *tmp;
2554 
2555 	/* Wait for any pending async encryptions to complete */
2556 	tls_encrypt_async_wait(ctx);
2557 
2558 	tls_tx_records(sk, -1);
2559 
2560 	/* Free up un-sent records in tx_list. First, free
2561 	 * the partially sent record if any at head of tx_list.
2562 	 */
2563 	if (tls_ctx->partially_sent_record) {
2564 		tls_free_partial_record(sk, tls_ctx);
2565 		rec = list_first_entry(&ctx->tx_list,
2566 				       struct tls_rec, list);
2567 		list_del(&rec->list);
2568 		sk_msg_free(sk, &rec->msg_plaintext);
2569 		kfree(rec);
2570 	}
2571 
2572 	list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2573 		list_del(&rec->list);
2574 		sk_msg_free(sk, &rec->msg_encrypted);
2575 		sk_msg_free(sk, &rec->msg_plaintext);
2576 		kfree(rec);
2577 	}
2578 
2579 	crypto_free_aead(ctx->aead_send);
2580 	tls_free_open_rec(sk);
2581 }
2582 
tls_sw_free_ctx_tx(struct tls_context * tls_ctx)2583 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2584 {
2585 	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2586 
2587 	kfree(ctx);
2588 }
2589 
tls_sw_release_resources_rx(struct sock * sk)2590 void tls_sw_release_resources_rx(struct sock *sk)
2591 {
2592 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2593 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2594 
2595 	if (ctx->aead_recv) {
2596 		__skb_queue_purge(&ctx->rx_list);
2597 		crypto_free_aead(ctx->aead_recv);
2598 		tls_strp_stop(&ctx->strp);
2599 		/* If tls_sw_strparser_arm() was not called (cleanup paths)
2600 		 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2601 		 * never swapped.
2602 		 */
2603 		if (ctx->saved_data_ready) {
2604 			write_lock_bh(&sk->sk_callback_lock);
2605 			sk->sk_data_ready = ctx->saved_data_ready;
2606 			write_unlock_bh(&sk->sk_callback_lock);
2607 		}
2608 	}
2609 }
2610 
tls_sw_strparser_done(struct tls_context * tls_ctx)2611 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2612 {
2613 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2614 
2615 	tls_strp_done(&ctx->strp);
2616 }
2617 
tls_sw_free_ctx_rx(struct tls_context * tls_ctx)2618 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2619 {
2620 	struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2621 
2622 	kfree(ctx);
2623 }
2624 
tls_sw_free_resources_rx(struct sock * sk)2625 void tls_sw_free_resources_rx(struct sock *sk)
2626 {
2627 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2628 
2629 	tls_sw_release_resources_rx(sk);
2630 	tls_sw_free_ctx_rx(tls_ctx);
2631 }
2632 
2633 /* The work handler to transmitt the encrypted records in tx_list */
tx_work_handler(struct work_struct * work)2634 static void tx_work_handler(struct work_struct *work)
2635 {
2636 	struct delayed_work *delayed_work = to_delayed_work(work);
2637 	struct tx_work *tx_work = container_of(delayed_work,
2638 					       struct tx_work, work);
2639 	struct sock *sk = tx_work->sk;
2640 	struct tls_context *tls_ctx = tls_get_ctx(sk);
2641 	struct tls_sw_context_tx *ctx;
2642 
2643 	if (unlikely(!tls_ctx))
2644 		return;
2645 
2646 	ctx = tls_sw_ctx_tx(tls_ctx);
2647 	if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2648 		return;
2649 
2650 	if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2651 		return;
2652 
2653 	if (mutex_trylock(&tls_ctx->tx_lock)) {
2654 		lock_sock(sk);
2655 		tls_tx_records(sk, -1);
2656 		release_sock(sk);
2657 		mutex_unlock(&tls_ctx->tx_lock);
2658 	} else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2659 		/* Someone is holding the tx_lock, they will likely run Tx
2660 		 * and cancel the work on their way out of the lock section.
2661 		 * Schedule a long delay just in case.
2662 		 */
2663 		schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2664 	}
2665 }
2666 
tls_is_tx_ready(struct tls_sw_context_tx * ctx)2667 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2668 {
2669 	struct tls_rec *rec;
2670 
2671 	rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2672 	if (!rec)
2673 		return false;
2674 
2675 	return READ_ONCE(rec->tx_ready);
2676 }
2677 
tls_sw_write_space(struct sock * sk,struct tls_context * ctx)2678 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2679 {
2680 	struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2681 
2682 	/* Schedule the transmission if tx list is ready */
2683 	if (tls_is_tx_ready(tx_ctx) &&
2684 	    !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2685 		schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2686 }
2687 
tls_sw_strparser_arm(struct sock * sk,struct tls_context * tls_ctx)2688 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2689 {
2690 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2691 
2692 	write_lock_bh(&sk->sk_callback_lock);
2693 	rx_ctx->saved_data_ready = sk->sk_data_ready;
2694 	sk->sk_data_ready = tls_data_ready;
2695 	write_unlock_bh(&sk->sk_callback_lock);
2696 }
2697 
tls_update_rx_zc_capable(struct tls_context * tls_ctx)2698 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2699 {
2700 	struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2701 
2702 	rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2703 		tls_ctx->prot_info.version != TLS_1_3_VERSION;
2704 }
2705 
init_ctx_tx(struct tls_context * ctx,struct sock * sk)2706 static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk)
2707 {
2708 	struct tls_sw_context_tx *sw_ctx_tx;
2709 
2710 	if (!ctx->priv_ctx_tx) {
2711 		sw_ctx_tx = kzalloc_obj(*sw_ctx_tx);
2712 		if (!sw_ctx_tx)
2713 			return NULL;
2714 	} else {
2715 		sw_ctx_tx = ctx->priv_ctx_tx;
2716 	}
2717 
2718 	crypto_init_wait(&sw_ctx_tx->async_wait);
2719 	atomic_set(&sw_ctx_tx->encrypt_pending, 1);
2720 	INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2721 	INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2722 	sw_ctx_tx->tx_work.sk = sk;
2723 
2724 	return sw_ctx_tx;
2725 }
2726 
init_ctx_rx(struct tls_context * ctx)2727 static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx)
2728 {
2729 	struct tls_sw_context_rx *sw_ctx_rx;
2730 
2731 	if (!ctx->priv_ctx_rx) {
2732 		sw_ctx_rx = kzalloc_obj(*sw_ctx_rx);
2733 		if (!sw_ctx_rx)
2734 			return NULL;
2735 	} else {
2736 		sw_ctx_rx = ctx->priv_ctx_rx;
2737 	}
2738 
2739 	crypto_init_wait(&sw_ctx_rx->async_wait);
2740 	atomic_set(&sw_ctx_rx->decrypt_pending, 1);
2741 	init_waitqueue_head(&sw_ctx_rx->wq);
2742 	skb_queue_head_init(&sw_ctx_rx->rx_list);
2743 	skb_queue_head_init(&sw_ctx_rx->async_hold);
2744 
2745 	return sw_ctx_rx;
2746 }
2747 
init_prot_info(struct tls_prot_info * prot,const struct tls_crypto_info * crypto_info,const struct tls_cipher_desc * cipher_desc)2748 int init_prot_info(struct tls_prot_info *prot,
2749 		   const struct tls_crypto_info *crypto_info,
2750 		   const struct tls_cipher_desc *cipher_desc)
2751 {
2752 	u16 nonce_size = cipher_desc->nonce;
2753 
2754 	if (crypto_info->version == TLS_1_3_VERSION) {
2755 		nonce_size = 0;
2756 		prot->aad_size = TLS_HEADER_SIZE;
2757 		prot->tail_size = 1;
2758 	} else {
2759 		prot->aad_size = TLS_AAD_SPACE_SIZE;
2760 		prot->tail_size = 0;
2761 	}
2762 
2763 	/* Sanity-check the sizes for stack allocations. */
2764 	if (nonce_size > TLS_MAX_IV_SIZE || prot->aad_size > TLS_MAX_AAD_SIZE)
2765 		return -EINVAL;
2766 
2767 	prot->version = crypto_info->version;
2768 	prot->cipher_type = crypto_info->cipher_type;
2769 	prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2770 	prot->tag_size = cipher_desc->tag;
2771 	prot->overhead_size = prot->prepend_size + prot->tag_size + prot->tail_size;
2772 	prot->iv_size = cipher_desc->iv;
2773 	prot->salt_size = cipher_desc->salt;
2774 	prot->rec_seq_size = cipher_desc->rec_seq;
2775 
2776 	return 0;
2777 }
2778 
tls_finish_key_update(struct sock * sk,struct tls_context * tls_ctx)2779 static void tls_finish_key_update(struct sock *sk, struct tls_context *tls_ctx)
2780 {
2781 	struct tls_sw_context_rx *ctx = tls_ctx->priv_ctx_rx;
2782 
2783 	WRITE_ONCE(ctx->key_update_pending, false);
2784 	/* wake-up pre-existing poll() */
2785 	ctx->saved_data_ready(sk);
2786 }
2787 
tls_set_sw_offload(struct sock * sk,int tx,struct tls_crypto_info * new_crypto_info)2788 int tls_set_sw_offload(struct sock *sk, int tx,
2789 		       struct tls_crypto_info *new_crypto_info)
2790 {
2791 	struct tls_crypto_info *crypto_info, *src_crypto_info;
2792 	struct tls_sw_context_tx *sw_ctx_tx = NULL;
2793 	struct tls_sw_context_rx *sw_ctx_rx = NULL;
2794 	const struct tls_cipher_desc *cipher_desc;
2795 	char *iv, *rec_seq, *key, *salt;
2796 	struct cipher_context *cctx;
2797 	struct tls_prot_info *prot;
2798 	struct crypto_aead **aead;
2799 	struct tls_context *ctx;
2800 	struct crypto_tfm *tfm;
2801 	int rc = 0;
2802 
2803 	ctx = tls_get_ctx(sk);
2804 	prot = &ctx->prot_info;
2805 
2806 	/* new_crypto_info != NULL means rekey */
2807 	if (!new_crypto_info) {
2808 		if (tx) {
2809 			ctx->priv_ctx_tx = init_ctx_tx(ctx, sk);
2810 			if (!ctx->priv_ctx_tx)
2811 				return -ENOMEM;
2812 		} else {
2813 			ctx->priv_ctx_rx = init_ctx_rx(ctx);
2814 			if (!ctx->priv_ctx_rx)
2815 				return -ENOMEM;
2816 		}
2817 	}
2818 
2819 	if (tx) {
2820 		sw_ctx_tx = ctx->priv_ctx_tx;
2821 		crypto_info = &ctx->crypto_send.info;
2822 		cctx = &ctx->tx;
2823 		aead = &sw_ctx_tx->aead_send;
2824 	} else {
2825 		sw_ctx_rx = ctx->priv_ctx_rx;
2826 		crypto_info = &ctx->crypto_recv.info;
2827 		cctx = &ctx->rx;
2828 		aead = &sw_ctx_rx->aead_recv;
2829 	}
2830 
2831 	src_crypto_info = new_crypto_info ?: crypto_info;
2832 
2833 	cipher_desc = get_cipher_desc(src_crypto_info->cipher_type);
2834 	if (!cipher_desc) {
2835 		rc = -EINVAL;
2836 		goto free_priv;
2837 	}
2838 
2839 	rc = init_prot_info(prot, src_crypto_info, cipher_desc);
2840 	if (rc)
2841 		goto free_priv;
2842 
2843 	iv = crypto_info_iv(src_crypto_info, cipher_desc);
2844 	key = crypto_info_key(src_crypto_info, cipher_desc);
2845 	salt = crypto_info_salt(src_crypto_info, cipher_desc);
2846 	rec_seq = crypto_info_rec_seq(src_crypto_info, cipher_desc);
2847 
2848 	if (!*aead) {
2849 		*aead = crypto_alloc_aead(cipher_desc->cipher_name, 0, 0);
2850 		if (IS_ERR(*aead)) {
2851 			rc = PTR_ERR(*aead);
2852 			*aead = NULL;
2853 			goto free_priv;
2854 		}
2855 	}
2856 
2857 	ctx->push_pending_record = tls_sw_push_pending_record;
2858 
2859 	/* setkey is the last operation that could fail during a
2860 	 * rekey. if it succeeds, we can start modifying the
2861 	 * context.
2862 	 */
2863 	rc = crypto_aead_setkey(*aead, key, cipher_desc->key);
2864 	if (rc) {
2865 		if (new_crypto_info)
2866 			goto out;
2867 		else
2868 			goto free_aead;
2869 	}
2870 
2871 	if (!new_crypto_info) {
2872 		rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2873 		if (rc)
2874 			goto free_aead;
2875 	}
2876 
2877 	if (!tx && !new_crypto_info) {
2878 		tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2879 
2880 		tls_update_rx_zc_capable(ctx);
2881 		sw_ctx_rx->async_capable =
2882 			src_crypto_info->version != TLS_1_3_VERSION &&
2883 			!!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2884 
2885 		rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2886 		if (rc)
2887 			goto free_aead;
2888 	}
2889 
2890 	memcpy(cctx->iv, salt, cipher_desc->salt);
2891 	memcpy(cctx->iv + cipher_desc->salt, iv, cipher_desc->iv);
2892 	memcpy(cctx->rec_seq, rec_seq, cipher_desc->rec_seq);
2893 
2894 	if (new_crypto_info) {
2895 		unsafe_memcpy(crypto_info, new_crypto_info,
2896 			      cipher_desc->crypto_info,
2897 			      /* size was checked in do_tls_setsockopt_conf */);
2898 		memzero_explicit(new_crypto_info, cipher_desc->crypto_info);
2899 		if (!tx)
2900 			tls_finish_key_update(sk, ctx);
2901 	}
2902 
2903 	goto out;
2904 
2905 free_aead:
2906 	crypto_free_aead(*aead);
2907 	*aead = NULL;
2908 free_priv:
2909 	if (!new_crypto_info) {
2910 		if (tx) {
2911 			kfree(ctx->priv_ctx_tx);
2912 			ctx->priv_ctx_tx = NULL;
2913 		} else {
2914 			kfree(ctx->priv_ctx_rx);
2915 			ctx->priv_ctx_rx = NULL;
2916 		}
2917 	}
2918 out:
2919 	return rc;
2920 }
2921