1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ 3 4 #include <linux/skmsg.h> 5 #include <linux/skbuff.h> 6 #include <linux/scatterlist.h> 7 8 #include <net/sock.h> 9 #include <net/tcp.h> 10 #include <net/tls.h> 11 #include <trace/events/sock.h> 12 13 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) 14 { 15 if (msg->sg.end > msg->sg.start && 16 elem_first_coalesce < msg->sg.end) 17 return true; 18 19 if (msg->sg.end < msg->sg.start && 20 (elem_first_coalesce > msg->sg.start || 21 elem_first_coalesce < msg->sg.end)) 22 return true; 23 24 return false; 25 } 26 27 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, 28 int elem_first_coalesce) 29 { 30 struct page_frag *pfrag = sk_page_frag(sk); 31 u32 osize = msg->sg.size; 32 int ret = 0; 33 34 len -= msg->sg.size; 35 while (len > 0) { 36 struct scatterlist *sge; 37 u32 orig_offset; 38 int use, i; 39 40 if (!sk_page_frag_refill(sk, pfrag)) { 41 ret = -ENOMEM; 42 goto msg_trim; 43 } 44 45 orig_offset = pfrag->offset; 46 use = min_t(int, len, pfrag->size - orig_offset); 47 if (!sk_wmem_schedule(sk, use)) { 48 ret = -ENOMEM; 49 goto msg_trim; 50 } 51 52 i = msg->sg.end; 53 sk_msg_iter_var_prev(i); 54 sge = &msg->sg.data[i]; 55 56 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) && 57 sg_page(sge) == pfrag->page && 58 sge->offset + sge->length == orig_offset) { 59 sge->length += use; 60 } else { 61 if (sk_msg_full(msg)) { 62 ret = -ENOSPC; 63 break; 64 } 65 66 sge = &msg->sg.data[msg->sg.end]; 67 sg_unmark_end(sge); 68 sg_set_page(sge, pfrag->page, use, orig_offset); 69 get_page(pfrag->page); 70 sk_msg_iter_next(msg, end); 71 } 72 73 sk_mem_charge(sk, use); 74 msg->sg.size += use; 75 pfrag->offset += use; 76 len -= use; 77 } 78 79 return ret; 80 81 msg_trim: 82 sk_msg_trim(sk, msg, osize); 83 return ret; 84 } 85 EXPORT_SYMBOL_GPL(sk_msg_alloc); 86 87 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, 88 u32 off, u32 len) 89 { 90 int i = src->sg.start; 91 struct scatterlist *sge = sk_msg_elem(src, i); 92 struct scatterlist *sgd = NULL; 93 u32 sge_len, sge_off; 94 95 while (off) { 96 if (sge->length > off) 97 break; 98 off -= sge->length; 99 sk_msg_iter_var_next(i); 100 if (i == src->sg.end && off) 101 return -ENOSPC; 102 sge = sk_msg_elem(src, i); 103 } 104 105 while (len) { 106 sge_len = sge->length - off; 107 if (sge_len > len) 108 sge_len = len; 109 110 if (dst->sg.end) 111 sgd = sk_msg_elem(dst, dst->sg.end - 1); 112 113 if (sgd && 114 (sg_page(sge) == sg_page(sgd)) && 115 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) { 116 sgd->length += sge_len; 117 dst->sg.size += sge_len; 118 } else if (!sk_msg_full(dst)) { 119 sge_off = sge->offset + off; 120 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off); 121 } else { 122 return -ENOSPC; 123 } 124 125 off = 0; 126 len -= sge_len; 127 sk_mem_charge(sk, sge_len); 128 sk_msg_iter_var_next(i); 129 if (i == src->sg.end && len) 130 return -ENOSPC; 131 sge = sk_msg_elem(src, i); 132 } 133 134 return 0; 135 } 136 EXPORT_SYMBOL_GPL(sk_msg_clone); 137 138 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes) 139 { 140 int i = msg->sg.start; 141 142 do { 143 struct scatterlist *sge = sk_msg_elem(msg, i); 144 145 if (bytes < sge->length) { 146 sge->length -= bytes; 147 sge->offset += bytes; 148 sk_mem_uncharge(sk, bytes); 149 break; 150 } 151 152 sk_mem_uncharge(sk, sge->length); 153 bytes -= sge->length; 154 sge->length = 0; 155 sge->offset = 0; 156 sk_msg_iter_var_next(i); 157 } while (bytes && i != msg->sg.end); 158 msg->sg.start = i; 159 } 160 EXPORT_SYMBOL_GPL(sk_msg_return_zero); 161 162 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes) 163 { 164 int i = msg->sg.start; 165 166 do { 167 struct scatterlist *sge = &msg->sg.data[i]; 168 int uncharge = (bytes < sge->length) ? bytes : sge->length; 169 170 sk_mem_uncharge(sk, uncharge); 171 bytes -= uncharge; 172 sk_msg_iter_var_next(i); 173 } while (i != msg->sg.end); 174 } 175 EXPORT_SYMBOL_GPL(sk_msg_return); 176 177 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, 178 bool charge) 179 { 180 struct scatterlist *sge = sk_msg_elem(msg, i); 181 u32 len = sge->length; 182 183 /* When the skb owns the memory we free it from consume_skb path. */ 184 if (!msg->skb) { 185 if (charge) 186 sk_mem_uncharge(sk, len); 187 put_page(sg_page(sge)); 188 } 189 memset(sge, 0, sizeof(*sge)); 190 return len; 191 } 192 193 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, 194 bool charge) 195 { 196 struct scatterlist *sge = sk_msg_elem(msg, i); 197 int freed = 0; 198 199 while (msg->sg.size) { 200 msg->sg.size -= sge->length; 201 freed += sk_msg_free_elem(sk, msg, i, charge); 202 sk_msg_iter_var_next(i); 203 sk_msg_check_to_free(msg, i, msg->sg.size); 204 sge = sk_msg_elem(msg, i); 205 } 206 consume_skb(msg->skb); 207 sk_msg_init(msg); 208 return freed; 209 } 210 211 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg) 212 { 213 return __sk_msg_free(sk, msg, msg->sg.start, false); 214 } 215 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge); 216 217 int sk_msg_free(struct sock *sk, struct sk_msg *msg) 218 { 219 return __sk_msg_free(sk, msg, msg->sg.start, true); 220 } 221 EXPORT_SYMBOL_GPL(sk_msg_free); 222 223 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, 224 u32 bytes, bool charge) 225 { 226 struct scatterlist *sge; 227 u32 i = msg->sg.start; 228 229 while (bytes) { 230 sge = sk_msg_elem(msg, i); 231 if (!sge->length) 232 break; 233 if (bytes < sge->length) { 234 if (charge) 235 sk_mem_uncharge(sk, bytes); 236 sge->length -= bytes; 237 sge->offset += bytes; 238 msg->sg.size -= bytes; 239 break; 240 } 241 242 msg->sg.size -= sge->length; 243 bytes -= sge->length; 244 sk_msg_free_elem(sk, msg, i, charge); 245 sk_msg_iter_var_next(i); 246 sk_msg_check_to_free(msg, i, bytes); 247 } 248 msg->sg.start = i; 249 } 250 251 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes) 252 { 253 __sk_msg_free_partial(sk, msg, bytes, true); 254 } 255 EXPORT_SYMBOL_GPL(sk_msg_free_partial); 256 257 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, 258 u32 bytes) 259 { 260 __sk_msg_free_partial(sk, msg, bytes, false); 261 } 262 263 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len) 264 { 265 int trim = msg->sg.size - len; 266 u32 i = msg->sg.end; 267 268 if (trim <= 0) { 269 WARN_ON(trim < 0); 270 return; 271 } 272 273 sk_msg_iter_var_prev(i); 274 msg->sg.size = len; 275 while (msg->sg.data[i].length && 276 trim >= msg->sg.data[i].length) { 277 trim -= msg->sg.data[i].length; 278 sk_msg_free_elem(sk, msg, i, true); 279 sk_msg_iter_var_prev(i); 280 if (!trim) 281 goto out; 282 } 283 284 msg->sg.data[i].length -= trim; 285 sk_mem_uncharge(sk, trim); 286 /* Adjust copybreak if it falls into the trimmed part of last buf */ 287 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length) 288 msg->sg.copybreak = msg->sg.data[i].length; 289 out: 290 sk_msg_iter_var_next(i); 291 msg->sg.end = i; 292 293 /* If we trim data a full sg elem before curr pointer update 294 * copybreak and current so that any future copy operations 295 * start at new copy location. 296 * However trimmed data that has not yet been used in a copy op 297 * does not require an update. 298 */ 299 if (!msg->sg.size) { 300 msg->sg.curr = msg->sg.start; 301 msg->sg.copybreak = 0; 302 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= 303 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) { 304 sk_msg_iter_var_prev(i); 305 msg->sg.curr = i; 306 msg->sg.copybreak = msg->sg.data[i].length; 307 } 308 } 309 EXPORT_SYMBOL_GPL(sk_msg_trim); 310 311 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, 312 struct sk_msg *msg, u32 bytes) 313 { 314 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg); 315 const int to_max_pages = MAX_MSG_FRAGS; 316 struct page *pages[MAX_MSG_FRAGS]; 317 ssize_t orig, copied, use, offset; 318 319 orig = msg->sg.size; 320 while (bytes > 0) { 321 i = 0; 322 maxpages = to_max_pages - num_elems; 323 if (maxpages == 0) { 324 ret = -EFAULT; 325 goto out; 326 } 327 328 copied = iov_iter_get_pages2(from, pages, bytes, maxpages, 329 &offset); 330 if (copied <= 0) { 331 ret = -EFAULT; 332 goto out; 333 } 334 335 bytes -= copied; 336 msg->sg.size += copied; 337 338 while (copied) { 339 use = min_t(int, copied, PAGE_SIZE - offset); 340 sg_set_page(&msg->sg.data[msg->sg.end], 341 pages[i], use, offset); 342 sg_unmark_end(&msg->sg.data[msg->sg.end]); 343 sk_mem_charge(sk, use); 344 345 offset = 0; 346 copied -= use; 347 sk_msg_iter_next(msg, end); 348 num_elems++; 349 i++; 350 } 351 /* When zerocopy is mixed with sk_msg_*copy* operations we 352 * may have a copybreak set in this case clear and prefer 353 * zerocopy remainder when possible. 354 */ 355 msg->sg.copybreak = 0; 356 msg->sg.curr = msg->sg.end; 357 } 358 out: 359 /* Revert iov_iter updates, msg will need to use 'trim' later if it 360 * also needs to be cleared. 361 */ 362 if (ret) 363 iov_iter_revert(from, msg->sg.size - orig); 364 return ret; 365 } 366 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter); 367 368 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 369 struct sk_msg *msg, u32 bytes) 370 { 371 int ret = -ENOSPC, i = msg->sg.curr; 372 u32 copy, buf_size, copied = 0; 373 struct scatterlist *sge; 374 void *to; 375 376 do { 377 sge = sk_msg_elem(msg, i); 378 /* This is possible if a trim operation shrunk the buffer */ 379 if (msg->sg.copybreak >= sge->length) { 380 msg->sg.copybreak = 0; 381 sk_msg_iter_var_next(i); 382 if (i == msg->sg.end) 383 break; 384 sge = sk_msg_elem(msg, i); 385 } 386 387 buf_size = sge->length - msg->sg.copybreak; 388 copy = (buf_size > bytes) ? bytes : buf_size; 389 to = sg_virt(sge) + msg->sg.copybreak; 390 msg->sg.copybreak += copy; 391 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) 392 ret = copy_from_iter_nocache(to, copy, from); 393 else 394 ret = copy_from_iter(to, copy, from); 395 if (ret != copy) { 396 ret = -EFAULT; 397 goto out; 398 } 399 bytes -= copy; 400 copied += copy; 401 if (!bytes) 402 break; 403 msg->sg.copybreak = 0; 404 sk_msg_iter_var_next(i); 405 } while (i != msg->sg.end); 406 out: 407 msg->sg.curr = i; 408 return (ret < 0) ? ret : copied; 409 } 410 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); 411 412 /* Receive sk_msg from psock->ingress_msg to @msg. */ 413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 414 int len, int flags) 415 { 416 struct iov_iter *iter = &msg->msg_iter; 417 int peek = flags & MSG_PEEK; 418 struct sk_msg *msg_rx; 419 int i, copied = 0; 420 421 msg_rx = sk_psock_peek_msg(psock); 422 while (copied != len) { 423 struct scatterlist *sge; 424 425 if (unlikely(!msg_rx)) 426 break; 427 428 i = msg_rx->sg.start; 429 do { 430 struct page *page; 431 int copy; 432 433 sge = sk_msg_elem(msg_rx, i); 434 copy = sge->length; 435 page = sg_page(sge); 436 if (copied + copy > len) 437 copy = len - copied; 438 if (copy) 439 copy = copy_page_to_iter(page, sge->offset, copy, iter); 440 if (!copy) { 441 copied = copied ? copied : -EFAULT; 442 goto out; 443 } 444 445 copied += copy; 446 if (likely(!peek)) { 447 sge->offset += copy; 448 sge->length -= copy; 449 if (!msg_rx->skb) { 450 sk_mem_uncharge(sk, copy); 451 atomic_sub(copy, &sk->sk_rmem_alloc); 452 } 453 msg_rx->sg.size -= copy; 454 455 if (!sge->length) { 456 sk_msg_iter_var_next(i); 457 if (!msg_rx->skb) 458 put_page(page); 459 } 460 } else { 461 /* Lets not optimize peek case if copy_page_to_iter 462 * didn't copy the entire length lets just break. 463 */ 464 if (copy != sge->length) 465 goto out; 466 sk_msg_iter_var_next(i); 467 } 468 469 if (copied == len) 470 break; 471 } while ((i != msg_rx->sg.end) && !sg_is_last(sge)); 472 473 if (unlikely(peek)) { 474 msg_rx = sk_psock_next_msg(psock, msg_rx); 475 if (!msg_rx) 476 break; 477 continue; 478 } 479 480 msg_rx->sg.start = i; 481 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) { 482 msg_rx = sk_psock_dequeue_msg(psock); 483 kfree_sk_msg(msg_rx); 484 } 485 msg_rx = sk_psock_peek_msg(psock); 486 } 487 out: 488 return copied; 489 } 490 EXPORT_SYMBOL_GPL(sk_msg_recvmsg); 491 492 bool sk_msg_is_readable(struct sock *sk) 493 { 494 struct sk_psock *psock; 495 bool empty = true; 496 497 rcu_read_lock(); 498 psock = sk_psock(sk); 499 if (likely(psock)) 500 empty = list_empty(&psock->ingress_msg); 501 rcu_read_unlock(); 502 return !empty; 503 } 504 EXPORT_SYMBOL_GPL(sk_msg_is_readable); 505 506 static struct sk_msg *alloc_sk_msg(gfp_t gfp) 507 { 508 struct sk_msg *msg; 509 510 msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN); 511 if (unlikely(!msg)) 512 return NULL; 513 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); 514 return msg; 515 } 516 517 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, 518 struct sk_buff *skb) 519 { 520 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 521 return NULL; 522 523 if (!sk_rmem_schedule(sk, skb, skb->truesize)) 524 return NULL; 525 526 return alloc_sk_msg(GFP_KERNEL); 527 } 528 529 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, 530 u32 off, u32 len, 531 struct sk_psock *psock, 532 struct sock *sk, 533 struct sk_msg *msg) 534 { 535 int num_sge, copied; 536 537 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); 538 if (num_sge < 0) { 539 /* skb linearize may fail with ENOMEM, but lets simply try again 540 * later if this happens. Under memory pressure we don't want to 541 * drop the skb. We need to linearize the skb so that the mapping 542 * in skb_to_sgvec can not error. 543 */ 544 if (skb_linearize(skb)) 545 return -EAGAIN; 546 547 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); 548 if (unlikely(num_sge < 0)) 549 return num_sge; 550 } 551 552 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 553 psock->ingress_bytes += len; 554 #endif 555 copied = len; 556 msg->sg.start = 0; 557 msg->sg.size = copied; 558 msg->sg.end = num_sge; 559 msg->skb = skb; 560 561 sk_psock_queue_msg(psock, msg); 562 sk_psock_data_ready(sk, psock); 563 return copied; 564 } 565 566 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, 567 u32 off, u32 len); 568 569 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, 570 u32 off, u32 len) 571 { 572 struct sock *sk = psock->sk; 573 struct sk_msg *msg; 574 int err; 575 576 /* If we are receiving on the same sock skb->sk is already assigned, 577 * skip memory accounting and owner transition seeing it already set 578 * correctly. 579 */ 580 if (unlikely(skb->sk == sk)) 581 return sk_psock_skb_ingress_self(psock, skb, off, len); 582 msg = sk_psock_create_ingress_msg(sk, skb); 583 if (!msg) 584 return -EAGAIN; 585 586 /* This will transition ownership of the data from the socket where 587 * the BPF program was run initiating the redirect to the socket 588 * we will eventually receive this data on. The data will be released 589 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied 590 * into user buffers. 591 */ 592 skb_set_owner_r(skb, sk); 593 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); 594 if (err < 0) 595 kfree(msg); 596 return err; 597 } 598 599 /* Puts an skb on the ingress queue of the socket already assigned to the 600 * skb. In this case we do not need to check memory limits or skb_set_owner_r 601 * because the skb is already accounted for here. 602 */ 603 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, 604 u32 off, u32 len) 605 { 606 struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); 607 struct sock *sk = psock->sk; 608 int err; 609 610 if (unlikely(!msg)) 611 return -EAGAIN; 612 skb_set_owner_r(skb, sk); 613 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); 614 if (err < 0) 615 kfree(msg); 616 return err; 617 } 618 619 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, 620 u32 off, u32 len, bool ingress) 621 { 622 int err = 0; 623 624 if (!ingress) { 625 if (!sock_writeable(psock->sk)) 626 return -EAGAIN; 627 return skb_send_sock(psock->sk, skb, off, len); 628 } 629 skb_get(skb); 630 err = sk_psock_skb_ingress(psock, skb, off, len); 631 if (err < 0) 632 kfree_skb(skb); 633 return err; 634 } 635 636 static void sk_psock_skb_state(struct sk_psock *psock, 637 struct sk_psock_work_state *state, 638 int len, int off) 639 { 640 spin_lock_bh(&psock->ingress_lock); 641 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 642 state->len = len; 643 state->off = off; 644 } 645 spin_unlock_bh(&psock->ingress_lock); 646 } 647 648 static void sk_psock_backlog(struct work_struct *work) 649 { 650 struct delayed_work *dwork = to_delayed_work(work); 651 struct sk_psock *psock = container_of(dwork, struct sk_psock, work); 652 struct sk_psock_work_state *state = &psock->work_state; 653 struct sk_buff *skb = NULL; 654 u32 len = 0, off = 0; 655 bool ingress; 656 int ret; 657 658 mutex_lock(&psock->work_mutex); 659 if (unlikely(state->len)) { 660 len = state->len; 661 off = state->off; 662 } 663 664 while ((skb = skb_peek(&psock->ingress_skb))) { 665 len = skb->len; 666 off = 0; 667 if (skb_bpf_strparser(skb)) { 668 struct strp_msg *stm = strp_msg(skb); 669 670 off = stm->offset; 671 len = stm->full_len; 672 } 673 ingress = skb_bpf_ingress(skb); 674 skb_bpf_redirect_clear(skb); 675 do { 676 ret = -EIO; 677 if (!sock_flag(psock->sk, SOCK_DEAD)) 678 ret = sk_psock_handle_skb(psock, skb, off, 679 len, ingress); 680 if (ret <= 0) { 681 if (ret == -EAGAIN) { 682 sk_psock_skb_state(psock, state, len, off); 683 684 /* Delay slightly to prioritize any 685 * other work that might be here. 686 */ 687 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 688 schedule_delayed_work(&psock->work, 1); 689 goto end; 690 } 691 /* Hard errors break pipe and stop xmit. */ 692 sk_psock_report_error(psock, ret ? -ret : EPIPE); 693 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 694 goto end; 695 } 696 off += ret; 697 len -= ret; 698 } while (len); 699 700 skb = skb_dequeue(&psock->ingress_skb); 701 kfree_skb(skb); 702 } 703 end: 704 mutex_unlock(&psock->work_mutex); 705 } 706 707 struct sk_psock *sk_psock_init(struct sock *sk, int node) 708 { 709 struct sk_psock *psock; 710 struct proto *prot; 711 712 write_lock_bh(&sk->sk_callback_lock); 713 714 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) { 715 psock = ERR_PTR(-EINVAL); 716 goto out; 717 } 718 719 if (sk->sk_user_data) { 720 psock = ERR_PTR(-EBUSY); 721 goto out; 722 } 723 724 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); 725 if (!psock) { 726 psock = ERR_PTR(-ENOMEM); 727 goto out; 728 } 729 730 prot = READ_ONCE(sk->sk_prot); 731 psock->sk = sk; 732 psock->eval = __SK_NONE; 733 psock->sk_proto = prot; 734 psock->saved_unhash = prot->unhash; 735 psock->saved_destroy = prot->destroy; 736 psock->saved_close = prot->close; 737 psock->saved_write_space = sk->sk_write_space; 738 739 INIT_LIST_HEAD(&psock->link); 740 spin_lock_init(&psock->link_lock); 741 742 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); 743 mutex_init(&psock->work_mutex); 744 INIT_LIST_HEAD(&psock->ingress_msg); 745 spin_lock_init(&psock->ingress_lock); 746 skb_queue_head_init(&psock->ingress_skb); 747 748 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); 749 refcount_set(&psock->refcnt, 1); 750 751 __rcu_assign_sk_user_data_with_flags(sk, psock, 752 SK_USER_DATA_NOCOPY | 753 SK_USER_DATA_PSOCK); 754 sock_hold(sk); 755 756 out: 757 write_unlock_bh(&sk->sk_callback_lock); 758 return psock; 759 } 760 EXPORT_SYMBOL_GPL(sk_psock_init); 761 762 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) 763 { 764 struct sk_psock_link *link; 765 766 spin_lock_bh(&psock->link_lock); 767 link = list_first_entry_or_null(&psock->link, struct sk_psock_link, 768 list); 769 if (link) 770 list_del(&link->list); 771 spin_unlock_bh(&psock->link_lock); 772 return link; 773 } 774 775 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) 776 { 777 struct sk_msg *msg, *tmp; 778 779 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { 780 list_del(&msg->list); 781 if (!msg->skb) 782 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); 783 sk_msg_free(psock->sk, msg); 784 kfree(msg); 785 } 786 } 787 788 static void __sk_psock_zap_ingress(struct sk_psock *psock) 789 { 790 struct sk_buff *skb; 791 792 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { 793 skb_bpf_redirect_clear(skb); 794 sock_drop(psock->sk, skb); 795 } 796 __sk_psock_purge_ingress_msg(psock); 797 } 798 799 static void sk_psock_link_destroy(struct sk_psock *psock) 800 { 801 struct sk_psock_link *link, *tmp; 802 803 list_for_each_entry_safe(link, tmp, &psock->link, list) { 804 list_del(&link->list); 805 sk_psock_free_link(link); 806 } 807 } 808 809 void sk_psock_stop(struct sk_psock *psock) 810 { 811 spin_lock_bh(&psock->ingress_lock); 812 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 813 sk_psock_cork_free(psock); 814 spin_unlock_bh(&psock->ingress_lock); 815 } 816 817 static void sk_psock_done_strp(struct sk_psock *psock); 818 819 static void sk_psock_destroy(struct work_struct *work) 820 { 821 struct sk_psock *psock = container_of(to_rcu_work(work), 822 struct sk_psock, rwork); 823 /* No sk_callback_lock since already detached. */ 824 825 sk_psock_done_strp(psock); 826 827 cancel_delayed_work_sync(&psock->work); 828 __sk_psock_zap_ingress(psock); 829 mutex_destroy(&psock->work_mutex); 830 831 psock_progs_drop(&psock->progs); 832 833 sk_psock_link_destroy(psock); 834 sk_psock_cork_free(psock); 835 836 if (psock->sk_redir) 837 sock_put(psock->sk_redir); 838 if (psock->sk_pair) 839 sock_put(psock->sk_pair); 840 sock_put(psock->sk); 841 kfree(psock); 842 } 843 844 void sk_psock_drop(struct sock *sk, struct sk_psock *psock) 845 { 846 write_lock_bh(&sk->sk_callback_lock); 847 sk_psock_restore_proto(sk, psock); 848 rcu_assign_sk_user_data(sk, NULL); 849 if (psock->progs.stream_parser) 850 sk_psock_stop_strp(sk, psock); 851 else if (psock->progs.stream_verdict || psock->progs.skb_verdict) 852 sk_psock_stop_verdict(sk, psock); 853 write_unlock_bh(&sk->sk_callback_lock); 854 855 sk_psock_stop(psock); 856 857 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); 858 queue_rcu_work(system_wq, &psock->rwork); 859 } 860 EXPORT_SYMBOL_GPL(sk_psock_drop); 861 862 static int sk_psock_map_verd(int verdict, bool redir) 863 { 864 switch (verdict) { 865 case SK_PASS: 866 return redir ? __SK_REDIRECT : __SK_PASS; 867 case SK_DROP: 868 default: 869 break; 870 } 871 872 return __SK_DROP; 873 } 874 875 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 876 struct sk_msg *msg) 877 { 878 struct bpf_prog *prog; 879 int ret; 880 881 rcu_read_lock(); 882 prog = READ_ONCE(psock->progs.msg_parser); 883 if (unlikely(!prog)) { 884 ret = __SK_PASS; 885 goto out; 886 } 887 888 sk_msg_compute_data_pointers(msg); 889 msg->sk = sk; 890 ret = bpf_prog_run_pin_on_cpu(prog, msg); 891 ret = sk_psock_map_verd(ret, msg->sk_redir); 892 psock->apply_bytes = msg->apply_bytes; 893 if (ret == __SK_REDIRECT) { 894 if (psock->sk_redir) { 895 sock_put(psock->sk_redir); 896 psock->sk_redir = NULL; 897 } 898 if (!msg->sk_redir) { 899 ret = __SK_DROP; 900 goto out; 901 } 902 psock->redir_ingress = sk_msg_to_ingress(msg); 903 psock->sk_redir = msg->sk_redir; 904 sock_hold(psock->sk_redir); 905 } 906 out: 907 rcu_read_unlock(); 908 return ret; 909 } 910 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); 911 912 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) 913 { 914 struct sk_psock *psock_other; 915 struct sock *sk_other; 916 917 sk_other = skb_bpf_redirect_fetch(skb); 918 /* This error is a buggy BPF program, it returned a redirect 919 * return code, but then didn't set a redirect interface. 920 */ 921 if (unlikely(!sk_other)) { 922 skb_bpf_redirect_clear(skb); 923 sock_drop(from->sk, skb); 924 return -EIO; 925 } 926 psock_other = sk_psock(sk_other); 927 /* This error indicates the socket is being torn down or had another 928 * error that caused the pipe to break. We can't send a packet on 929 * a socket that is in this state so we drop the skb. 930 */ 931 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { 932 skb_bpf_redirect_clear(skb); 933 sock_drop(from->sk, skb); 934 return -EIO; 935 } 936 spin_lock_bh(&psock_other->ingress_lock); 937 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { 938 spin_unlock_bh(&psock_other->ingress_lock); 939 skb_bpf_redirect_clear(skb); 940 sock_drop(from->sk, skb); 941 return -EIO; 942 } 943 944 skb_queue_tail(&psock_other->ingress_skb, skb); 945 schedule_delayed_work(&psock_other->work, 0); 946 spin_unlock_bh(&psock_other->ingress_lock); 947 return 0; 948 } 949 950 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, 951 struct sk_psock *from, int verdict) 952 { 953 switch (verdict) { 954 case __SK_REDIRECT: 955 sk_psock_skb_redirect(from, skb); 956 break; 957 case __SK_PASS: 958 case __SK_DROP: 959 default: 960 break; 961 } 962 } 963 964 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) 965 { 966 struct bpf_prog *prog; 967 int ret = __SK_PASS; 968 969 rcu_read_lock(); 970 prog = READ_ONCE(psock->progs.stream_verdict); 971 if (likely(prog)) { 972 skb->sk = psock->sk; 973 skb_dst_drop(skb); 974 skb_bpf_redirect_clear(skb); 975 ret = bpf_prog_run_pin_on_cpu(prog, skb); 976 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 977 skb->sk = NULL; 978 } 979 sk_psock_tls_verdict_apply(skb, psock, ret); 980 rcu_read_unlock(); 981 return ret; 982 } 983 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); 984 985 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, 986 int verdict) 987 { 988 struct sock *sk_other; 989 int err = 0; 990 u32 len, off; 991 992 switch (verdict) { 993 case __SK_PASS: 994 err = -EIO; 995 sk_other = psock->sk; 996 if (sock_flag(sk_other, SOCK_DEAD) || 997 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 998 goto out_free; 999 1000 skb_bpf_set_ingress(skb); 1001 1002 /* If the queue is empty then we can submit directly 1003 * into the msg queue. If its not empty we have to 1004 * queue work otherwise we may get OOO data. Otherwise, 1005 * if sk_psock_skb_ingress errors will be handled by 1006 * retrying later from workqueue. 1007 */ 1008 if (skb_queue_empty(&psock->ingress_skb)) { 1009 len = skb->len; 1010 off = 0; 1011 if (skb_bpf_strparser(skb)) { 1012 struct strp_msg *stm = strp_msg(skb); 1013 1014 off = stm->offset; 1015 len = stm->full_len; 1016 } 1017 err = sk_psock_skb_ingress_self(psock, skb, off, len); 1018 } 1019 if (err < 0) { 1020 spin_lock_bh(&psock->ingress_lock); 1021 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 1022 skb_queue_tail(&psock->ingress_skb, skb); 1023 schedule_delayed_work(&psock->work, 0); 1024 err = 0; 1025 } 1026 spin_unlock_bh(&psock->ingress_lock); 1027 if (err < 0) 1028 goto out_free; 1029 } 1030 break; 1031 case __SK_REDIRECT: 1032 tcp_eat_skb(psock->sk, skb); 1033 err = sk_psock_skb_redirect(psock, skb); 1034 break; 1035 case __SK_DROP: 1036 default: 1037 out_free: 1038 skb_bpf_redirect_clear(skb); 1039 tcp_eat_skb(psock->sk, skb); 1040 sock_drop(psock->sk, skb); 1041 } 1042 1043 return err; 1044 } 1045 1046 static void sk_psock_write_space(struct sock *sk) 1047 { 1048 struct sk_psock *psock; 1049 void (*write_space)(struct sock *sk) = NULL; 1050 1051 rcu_read_lock(); 1052 psock = sk_psock(sk); 1053 if (likely(psock)) { 1054 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) 1055 schedule_delayed_work(&psock->work, 0); 1056 write_space = psock->saved_write_space; 1057 } 1058 rcu_read_unlock(); 1059 if (write_space) 1060 write_space(sk); 1061 } 1062 1063 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) 1064 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) 1065 { 1066 struct sk_psock *psock; 1067 struct bpf_prog *prog; 1068 int ret = __SK_DROP; 1069 struct sock *sk; 1070 1071 rcu_read_lock(); 1072 sk = strp->sk; 1073 psock = sk_psock(sk); 1074 if (unlikely(!psock)) { 1075 sock_drop(sk, skb); 1076 goto out; 1077 } 1078 prog = READ_ONCE(psock->progs.stream_verdict); 1079 if (likely(prog)) { 1080 skb->sk = sk; 1081 skb_dst_drop(skb); 1082 skb_bpf_redirect_clear(skb); 1083 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1084 skb_bpf_set_strparser(skb); 1085 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1086 skb->sk = NULL; 1087 } 1088 sk_psock_verdict_apply(psock, skb, ret); 1089 out: 1090 rcu_read_unlock(); 1091 } 1092 1093 static int sk_psock_strp_read_done(struct strparser *strp, int err) 1094 { 1095 return err; 1096 } 1097 1098 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) 1099 { 1100 struct sk_psock *psock = container_of(strp, struct sk_psock, strp); 1101 struct bpf_prog *prog; 1102 int ret = skb->len; 1103 1104 rcu_read_lock(); 1105 prog = READ_ONCE(psock->progs.stream_parser); 1106 if (likely(prog)) { 1107 skb->sk = psock->sk; 1108 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1109 skb->sk = NULL; 1110 } 1111 rcu_read_unlock(); 1112 return ret; 1113 } 1114 1115 /* Called with socket lock held. */ 1116 static void sk_psock_strp_data_ready(struct sock *sk) 1117 { 1118 struct sk_psock *psock; 1119 1120 trace_sk_data_ready(sk); 1121 1122 rcu_read_lock(); 1123 psock = sk_psock(sk); 1124 if (likely(psock)) { 1125 if (tls_sw_has_ctx_rx(sk)) { 1126 psock->saved_data_ready(sk); 1127 } else { 1128 read_lock_bh(&sk->sk_callback_lock); 1129 strp_data_ready(&psock->strp); 1130 read_unlock_bh(&sk->sk_callback_lock); 1131 } 1132 } 1133 rcu_read_unlock(); 1134 } 1135 1136 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) 1137 { 1138 int ret; 1139 1140 static const struct strp_callbacks cb = { 1141 .rcv_msg = sk_psock_strp_read, 1142 .read_sock_done = sk_psock_strp_read_done, 1143 .parse_msg = sk_psock_strp_parse, 1144 }; 1145 1146 ret = strp_init(&psock->strp, sk, &cb); 1147 if (!ret) 1148 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); 1149 1150 if (sk_is_tcp(sk)) { 1151 psock->strp.cb.read_sock = tcp_bpf_strp_read_sock; 1152 psock->copied_seq = tcp_sk(sk)->copied_seq; 1153 } 1154 return ret; 1155 } 1156 1157 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) 1158 { 1159 if (psock->saved_data_ready) 1160 return; 1161 1162 psock->saved_data_ready = sk->sk_data_ready; 1163 sk->sk_data_ready = sk_psock_strp_data_ready; 1164 sk->sk_write_space = sk_psock_write_space; 1165 } 1166 1167 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) 1168 { 1169 psock_set_prog(&psock->progs.stream_parser, NULL); 1170 1171 if (!psock->saved_data_ready) 1172 return; 1173 1174 sk->sk_data_ready = psock->saved_data_ready; 1175 psock->saved_data_ready = NULL; 1176 strp_stop(&psock->strp); 1177 } 1178 1179 static void sk_psock_done_strp(struct sk_psock *psock) 1180 { 1181 /* Parser has been stopped */ 1182 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) 1183 strp_done(&psock->strp); 1184 } 1185 #else 1186 static void sk_psock_done_strp(struct sk_psock *psock) 1187 { 1188 } 1189 #endif /* CONFIG_BPF_STREAM_PARSER */ 1190 1191 static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) 1192 { 1193 struct sk_psock *psock; 1194 struct bpf_prog *prog; 1195 int ret = __SK_DROP; 1196 int len = skb->len; 1197 1198 rcu_read_lock(); 1199 psock = sk_psock(sk); 1200 if (unlikely(!psock)) { 1201 len = 0; 1202 tcp_eat_skb(sk, skb); 1203 sock_drop(sk, skb); 1204 goto out; 1205 } 1206 prog = READ_ONCE(psock->progs.stream_verdict); 1207 if (!prog) 1208 prog = READ_ONCE(psock->progs.skb_verdict); 1209 if (likely(prog)) { 1210 skb_dst_drop(skb); 1211 skb_bpf_redirect_clear(skb); 1212 ret = bpf_prog_run_pin_on_cpu(prog, skb); 1213 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1214 } 1215 ret = sk_psock_verdict_apply(psock, skb, ret); 1216 if (ret < 0) 1217 len = ret; 1218 out: 1219 rcu_read_unlock(); 1220 return len; 1221 } 1222 1223 static void sk_psock_verdict_data_ready(struct sock *sk) 1224 { 1225 struct socket *sock = sk->sk_socket; 1226 const struct proto_ops *ops; 1227 int copied; 1228 1229 trace_sk_data_ready(sk); 1230 1231 if (unlikely(!sock)) 1232 return; 1233 ops = READ_ONCE(sock->ops); 1234 if (!ops || !ops->read_skb) 1235 return; 1236 copied = ops->read_skb(sk, sk_psock_verdict_recv); 1237 if (copied >= 0) { 1238 struct sk_psock *psock; 1239 1240 rcu_read_lock(); 1241 psock = sk_psock(sk); 1242 if (psock) 1243 sk_psock_data_ready(sk, psock); 1244 rcu_read_unlock(); 1245 } 1246 } 1247 1248 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) 1249 { 1250 if (psock->saved_data_ready) 1251 return; 1252 1253 psock->saved_data_ready = sk->sk_data_ready; 1254 sk->sk_data_ready = sk_psock_verdict_data_ready; 1255 sk->sk_write_space = sk_psock_write_space; 1256 } 1257 1258 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) 1259 { 1260 psock_set_prog(&psock->progs.stream_verdict, NULL); 1261 psock_set_prog(&psock->progs.skb_verdict, NULL); 1262 1263 if (!psock->saved_data_ready) 1264 return; 1265 1266 sk->sk_data_ready = psock->saved_data_ready; 1267 psock->saved_data_ready = NULL; 1268 } 1269