1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */ 3 4 #include <linux/skbuff.h> 5 #include <linux/skbuff_ref.h> 6 #include <linux/workqueue.h> 7 #include <net/strparser.h> 8 #include <net/tcp.h> 9 #include <net/sock.h> 10 #include <net/tls.h> 11 12 #include "tls.h" 13 14 static struct workqueue_struct *tls_strp_wq; 15 16 static void tls_strp_abort_strp(struct tls_strparser *strp, int err) 17 { 18 if (strp->stopped) 19 return; 20 21 strp->stopped = 1; 22 23 /* Report an error on the lower socket */ 24 WRITE_ONCE(strp->sk->sk_err, -err); 25 /* Paired with smp_rmb() in tcp_poll() */ 26 smp_wmb(); 27 sk_error_report(strp->sk); 28 } 29 30 static void tls_strp_anchor_free(struct tls_strparser *strp) 31 { 32 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 33 34 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); 35 if (!strp->copy_mode) 36 shinfo->frag_list = NULL; 37 consume_skb(strp->anchor); 38 strp->anchor = NULL; 39 } 40 41 static struct sk_buff * 42 tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb, 43 int offset, int len) 44 { 45 struct sk_buff *skb; 46 int i, err; 47 48 skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER, 49 &err, strp->sk->sk_allocation); 50 if (!skb) 51 return NULL; 52 53 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 54 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 55 56 WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 57 skb_frag_address(frag), 58 skb_frag_size(frag))); 59 offset += skb_frag_size(frag); 60 } 61 62 skb->len = len; 63 skb->data_len = len; 64 skb_copy_header(skb, in_skb); 65 return skb; 66 } 67 68 /* Create a new skb with the contents of input copied to its page frags */ 69 static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp) 70 { 71 struct strp_msg *rxm; 72 struct sk_buff *skb; 73 74 skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset, 75 strp->stm.full_len); 76 if (!skb) 77 return NULL; 78 79 rxm = strp_msg(skb); 80 rxm->offset = 0; 81 return skb; 82 } 83 84 /* Steal the input skb, input msg is invalid after calling this function */ 85 struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx) 86 { 87 struct tls_strparser *strp = &ctx->strp; 88 89 #ifdef CONFIG_TLS_DEVICE 90 DEBUG_NET_WARN_ON_ONCE(!strp->anchor->decrypted); 91 #else 92 /* This function turns an input into an output, 93 * that can only happen if we have offload. 94 */ 95 WARN_ON(1); 96 #endif 97 98 if (strp->copy_mode) { 99 struct sk_buff *skb; 100 101 /* Replace anchor with an empty skb, this is a little 102 * dangerous but __tls_cur_msg() warns on empty skbs 103 * so hopefully we'll catch abuses. 104 */ 105 skb = alloc_skb(0, strp->sk->sk_allocation); 106 if (!skb) 107 return NULL; 108 109 swap(strp->anchor, skb); 110 return skb; 111 } 112 113 return tls_strp_msg_make_copy(strp); 114 } 115 116 /* Force the input skb to be in copy mode. The data ownership remains 117 * with the input skb itself (meaning unpause will wipe it) but it can 118 * be modified. 119 */ 120 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx) 121 { 122 struct tls_strparser *strp = &ctx->strp; 123 struct sk_buff *skb; 124 125 if (strp->copy_mode) 126 return 0; 127 128 skb = tls_strp_msg_make_copy(strp); 129 if (!skb) 130 return -ENOMEM; 131 132 tls_strp_anchor_free(strp); 133 strp->anchor = skb; 134 135 tcp_read_done(strp->sk, strp->stm.full_len); 136 strp->copy_mode = 1; 137 138 return 0; 139 } 140 141 /* Make a clone (in the skb sense) of the input msg to keep a reference 142 * to the underlying data. The reference-holding skbs get placed on 143 * @dst. 144 */ 145 int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst) 146 { 147 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 148 149 if (strp->copy_mode) { 150 struct sk_buff *skb; 151 152 WARN_ON_ONCE(!shinfo->nr_frags); 153 154 /* We can't skb_clone() the anchor, it gets wiped by unpause */ 155 skb = alloc_skb(0, strp->sk->sk_allocation); 156 if (!skb) 157 return -ENOMEM; 158 159 __skb_queue_tail(dst, strp->anchor); 160 strp->anchor = skb; 161 } else { 162 struct sk_buff *iter, *clone; 163 int chunk, len, offset; 164 165 offset = strp->stm.offset; 166 len = strp->stm.full_len; 167 iter = shinfo->frag_list; 168 169 while (len > 0) { 170 if (iter->len <= offset) { 171 offset -= iter->len; 172 goto next; 173 } 174 175 chunk = iter->len - offset; 176 offset = 0; 177 178 clone = skb_clone(iter, strp->sk->sk_allocation); 179 if (!clone) 180 return -ENOMEM; 181 __skb_queue_tail(dst, clone); 182 183 len -= chunk; 184 next: 185 iter = iter->next; 186 } 187 } 188 189 return 0; 190 } 191 192 static void tls_strp_flush_anchor_copy(struct tls_strparser *strp) 193 { 194 struct skb_shared_info *shinfo = skb_shinfo(strp->anchor); 195 int i; 196 197 DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1); 198 199 for (i = 0; i < shinfo->nr_frags; i++) 200 __skb_frag_unref(&shinfo->frags[i], false); 201 shinfo->nr_frags = 0; 202 if (strp->copy_mode) { 203 kfree_skb_list(shinfo->frag_list); 204 shinfo->frag_list = NULL; 205 } 206 strp->copy_mode = 0; 207 strp->mixed_decrypted = 0; 208 } 209 210 static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb, 211 struct sk_buff *in_skb, unsigned int offset, 212 size_t in_len) 213 { 214 size_t len, chunk; 215 skb_frag_t *frag; 216 int sz; 217 218 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; 219 220 len = in_len; 221 /* First make sure we got the header */ 222 if (!strp->stm.full_len) { 223 /* Assume one page is more than enough for headers */ 224 chunk = min_t(size_t, len, PAGE_SIZE - skb_frag_size(frag)); 225 WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 226 skb_frag_address(frag) + 227 skb_frag_size(frag), 228 chunk)); 229 230 skb->len += chunk; 231 skb->data_len += chunk; 232 skb_frag_size_add(frag, chunk); 233 234 sz = tls_rx_msg_size(strp, skb); 235 if (sz < 0) 236 return sz; 237 238 /* We may have over-read, sz == 0 is guaranteed under-read */ 239 if (unlikely(sz && sz < skb->len)) { 240 int over = skb->len - sz; 241 242 WARN_ON_ONCE(over > chunk); 243 skb->len -= over; 244 skb->data_len -= over; 245 skb_frag_size_add(frag, -over); 246 247 chunk -= over; 248 } 249 250 frag++; 251 len -= chunk; 252 offset += chunk; 253 254 strp->stm.full_len = sz; 255 if (!strp->stm.full_len) 256 goto read_done; 257 } 258 259 /* Load up more data */ 260 while (len && strp->stm.full_len > skb->len) { 261 chunk = min_t(size_t, len, strp->stm.full_len - skb->len); 262 chunk = min_t(size_t, chunk, PAGE_SIZE - skb_frag_size(frag)); 263 WARN_ON_ONCE(skb_copy_bits(in_skb, offset, 264 skb_frag_address(frag) + 265 skb_frag_size(frag), 266 chunk)); 267 268 skb->len += chunk; 269 skb->data_len += chunk; 270 skb_frag_size_add(frag, chunk); 271 frag++; 272 len -= chunk; 273 offset += chunk; 274 } 275 276 read_done: 277 return in_len - len; 278 } 279 280 static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb, 281 struct sk_buff *in_skb, unsigned int offset, 282 size_t in_len) 283 { 284 struct sk_buff *nskb, *first, *last; 285 struct skb_shared_info *shinfo; 286 size_t chunk; 287 int sz; 288 289 if (strp->stm.full_len) 290 chunk = strp->stm.full_len - skb->len; 291 else 292 chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; 293 chunk = min(chunk, in_len); 294 295 nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk); 296 if (!nskb) 297 return -ENOMEM; 298 299 shinfo = skb_shinfo(skb); 300 if (!shinfo->frag_list) { 301 shinfo->frag_list = nskb; 302 nskb->prev = nskb; 303 } else { 304 first = shinfo->frag_list; 305 last = first->prev; 306 last->next = nskb; 307 first->prev = nskb; 308 } 309 310 skb->len += chunk; 311 skb->data_len += chunk; 312 313 if (!strp->stm.full_len) { 314 sz = tls_rx_msg_size(strp, skb); 315 if (sz < 0) 316 return sz; 317 318 /* We may have over-read, sz == 0 is guaranteed under-read */ 319 if (unlikely(sz && sz < skb->len)) { 320 int over = skb->len - sz; 321 322 WARN_ON_ONCE(over > chunk); 323 skb->len -= over; 324 skb->data_len -= over; 325 __pskb_trim(nskb, nskb->len - over); 326 327 chunk -= over; 328 } 329 330 strp->stm.full_len = sz; 331 } 332 333 return chunk; 334 } 335 336 static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb, 337 unsigned int offset, size_t in_len) 338 { 339 struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data; 340 struct sk_buff *skb; 341 int ret; 342 343 if (strp->msg_ready) 344 return 0; 345 346 skb = strp->anchor; 347 if (!skb->len) 348 skb_copy_decrypted(skb, in_skb); 349 else 350 strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb); 351 352 if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted) 353 ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len); 354 else 355 ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len); 356 if (ret < 0) { 357 desc->error = ret; 358 ret = 0; 359 } 360 361 if (strp->stm.full_len && strp->stm.full_len == skb->len) { 362 desc->count = 0; 363 364 WRITE_ONCE(strp->msg_ready, 1); 365 tls_rx_msg_ready(strp); 366 } 367 368 return ret; 369 } 370 371 static int tls_strp_read_copyin(struct tls_strparser *strp) 372 { 373 read_descriptor_t desc; 374 375 desc.arg.data = strp; 376 desc.error = 0; 377 desc.count = 1; /* give more than one skb per call */ 378 379 /* sk should be locked here, so okay to do read_sock */ 380 tcp_read_sock(strp->sk, &desc, tls_strp_copyin); 381 382 return desc.error; 383 } 384 385 static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) 386 { 387 struct skb_shared_info *shinfo; 388 struct page *page; 389 int need_spc, len; 390 391 /* If the rbuf is small or rcv window has collapsed to 0 we need 392 * to read the data out. Otherwise the connection will stall. 393 * Without pressure threshold of INT_MAX will never be ready. 394 */ 395 if (likely(qshort && !tcp_epollin_ready(strp->sk, INT_MAX))) 396 return 0; 397 398 shinfo = skb_shinfo(strp->anchor); 399 400 /* If we don't know the length go max plus page for cipher overhead */ 401 need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; 402 403 for (len = need_spc; len > 0; len -= PAGE_SIZE) { 404 page = alloc_page(strp->sk->sk_allocation); 405 if (!page) { 406 tls_strp_flush_anchor_copy(strp); 407 return -ENOMEM; 408 } 409 410 skb_fill_page_desc(strp->anchor, shinfo->nr_frags++, 411 page, 0, 0); 412 } 413 414 shinfo->frag_list = NULL; 415 416 strp->copy_mode = 1; 417 strp->stm.offset = 0; 418 419 strp->anchor->len = 0; 420 strp->anchor->data_len = 0; 421 strp->anchor->truesize = round_up(need_spc, PAGE_SIZE); 422 423 tls_strp_read_copyin(strp); 424 425 return 0; 426 } 427 428 static bool tls_strp_check_queue_ok(struct tls_strparser *strp) 429 { 430 unsigned int len = strp->stm.offset + strp->stm.full_len; 431 struct sk_buff *first, *skb; 432 u32 seq; 433 434 first = skb_shinfo(strp->anchor)->frag_list; 435 skb = first; 436 seq = TCP_SKB_CB(first)->seq; 437 438 /* Make sure there's no duplicate data in the queue, 439 * and the decrypted status matches. 440 */ 441 while (skb->len < len) { 442 seq += skb->len; 443 len -= skb->len; 444 skb = skb->next; 445 446 if (TCP_SKB_CB(skb)->seq != seq) 447 return false; 448 if (skb_cmp_decrypted(first, skb)) 449 return false; 450 } 451 452 return true; 453 } 454 455 static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len) 456 { 457 struct tcp_sock *tp = tcp_sk(strp->sk); 458 struct sk_buff *first; 459 u32 offset; 460 461 first = tcp_recv_skb(strp->sk, tp->copied_seq, &offset); 462 if (WARN_ON_ONCE(!first)) 463 return; 464 465 /* Bestow the state onto the anchor */ 466 strp->anchor->len = offset + len; 467 strp->anchor->data_len = offset + len; 468 strp->anchor->truesize = offset + len; 469 470 skb_shinfo(strp->anchor)->frag_list = first; 471 472 skb_copy_header(strp->anchor, first); 473 strp->anchor->destructor = NULL; 474 475 strp->stm.offset = offset; 476 } 477 478 void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh) 479 { 480 struct strp_msg *rxm; 481 struct tls_msg *tlm; 482 483 DEBUG_NET_WARN_ON_ONCE(!strp->msg_ready); 484 DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len); 485 486 if (!strp->copy_mode && force_refresh) { 487 if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len)) 488 return; 489 490 tls_strp_load_anchor_with_queue(strp, strp->stm.full_len); 491 } 492 493 rxm = strp_msg(strp->anchor); 494 rxm->full_len = strp->stm.full_len; 495 rxm->offset = strp->stm.offset; 496 tlm = tls_msg(strp->anchor); 497 tlm->control = strp->mark; 498 } 499 500 /* Called with lock held on lower socket */ 501 static int tls_strp_read_sock(struct tls_strparser *strp) 502 { 503 int sz, inq; 504 505 inq = tcp_inq(strp->sk); 506 if (inq < 1) 507 return 0; 508 509 if (unlikely(strp->copy_mode)) 510 return tls_strp_read_copyin(strp); 511 512 if (inq < strp->stm.full_len) 513 return tls_strp_read_copy(strp, true); 514 515 if (!strp->stm.full_len) { 516 tls_strp_load_anchor_with_queue(strp, inq); 517 518 sz = tls_rx_msg_size(strp, strp->anchor); 519 if (sz < 0) { 520 tls_strp_abort_strp(strp, sz); 521 return sz; 522 } 523 524 strp->stm.full_len = sz; 525 526 if (!strp->stm.full_len || inq < strp->stm.full_len) 527 return tls_strp_read_copy(strp, true); 528 } 529 530 if (!tls_strp_check_queue_ok(strp)) 531 return tls_strp_read_copy(strp, false); 532 533 WRITE_ONCE(strp->msg_ready, 1); 534 tls_rx_msg_ready(strp); 535 536 return 0; 537 } 538 539 void tls_strp_check_rcv(struct tls_strparser *strp) 540 { 541 if (unlikely(strp->stopped) || strp->msg_ready) 542 return; 543 544 if (tls_strp_read_sock(strp) == -ENOMEM) 545 queue_work(tls_strp_wq, &strp->work); 546 } 547 548 /* Lower sock lock held */ 549 void tls_strp_data_ready(struct tls_strparser *strp) 550 { 551 /* This check is needed to synchronize with do_tls_strp_work. 552 * do_tls_strp_work acquires a process lock (lock_sock) whereas 553 * the lock held here is bh_lock_sock. The two locks can be 554 * held by different threads at the same time, but bh_lock_sock 555 * allows a thread in BH context to safely check if the process 556 * lock is held. In this case, if the lock is held, queue work. 557 */ 558 if (sock_owned_by_user_nocheck(strp->sk)) { 559 queue_work(tls_strp_wq, &strp->work); 560 return; 561 } 562 563 tls_strp_check_rcv(strp); 564 } 565 566 static void tls_strp_work(struct work_struct *w) 567 { 568 struct tls_strparser *strp = 569 container_of(w, struct tls_strparser, work); 570 571 lock_sock(strp->sk); 572 tls_strp_check_rcv(strp); 573 release_sock(strp->sk); 574 } 575 576 void tls_strp_msg_done(struct tls_strparser *strp) 577 { 578 WARN_ON(!strp->stm.full_len); 579 580 if (likely(!strp->copy_mode)) 581 tcp_read_done(strp->sk, strp->stm.full_len); 582 else 583 tls_strp_flush_anchor_copy(strp); 584 585 WRITE_ONCE(strp->msg_ready, 0); 586 memset(&strp->stm, 0, sizeof(strp->stm)); 587 588 tls_strp_check_rcv(strp); 589 } 590 591 void tls_strp_stop(struct tls_strparser *strp) 592 { 593 strp->stopped = 1; 594 } 595 596 int tls_strp_init(struct tls_strparser *strp, struct sock *sk) 597 { 598 memset(strp, 0, sizeof(*strp)); 599 600 strp->sk = sk; 601 602 strp->anchor = alloc_skb(0, GFP_KERNEL); 603 if (!strp->anchor) 604 return -ENOMEM; 605 606 INIT_WORK(&strp->work, tls_strp_work); 607 608 return 0; 609 } 610 611 /* strp must already be stopped so that tls_strp_recv will no longer be called. 612 * Note that tls_strp_done is not called with the lower socket held. 613 */ 614 void tls_strp_done(struct tls_strparser *strp) 615 { 616 WARN_ON(!strp->stopped); 617 618 cancel_work_sync(&strp->work); 619 tls_strp_anchor_free(strp); 620 } 621 622 int __init tls_strp_dev_init(void) 623 { 624 tls_strp_wq = create_workqueue("tls-strp"); 625 if (unlikely(!tls_strp_wq)) 626 return -ENOMEM; 627 628 return 0; 629 } 630 631 void tls_strp_dev_exit(void) 632 { 633 destroy_workqueue(tls_strp_wq); 634 } 635