1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 * 21 * Fixes: 22 * Alan Cox : Numerous verify_area() calls 23 * Alan Cox : Set the ACK bit on a reset 24 * Alan Cox : Stopped it crashing if it closed while 25 * sk->inuse=1 and was trying to connect 26 * (tcp_err()). 27 * Alan Cox : All icmp error handling was broken 28 * pointers passed where wrong and the 29 * socket was looked up backwards. Nobody 30 * tested any icmp error code obviously. 31 * Alan Cox : tcp_err() now handled properly. It 32 * wakes people on errors. poll 33 * behaves and the icmp error race 34 * has gone by moving it into sock.c 35 * Alan Cox : tcp_send_reset() fixed to work for 36 * everything not just packets for 37 * unknown sockets. 38 * Alan Cox : tcp option processing. 39 * Alan Cox : Reset tweaked (still not 100%) [Had 40 * syn rule wrong] 41 * Herp Rosmanith : More reset fixes 42 * Alan Cox : No longer acks invalid rst frames. 43 * Acking any kind of RST is right out. 44 * Alan Cox : Sets an ignore me flag on an rst 45 * receive otherwise odd bits of prattle 46 * escape still 47 * Alan Cox : Fixed another acking RST frame bug. 48 * Should stop LAN workplace lockups. 49 * Alan Cox : Some tidyups using the new skb list 50 * facilities 51 * Alan Cox : sk->keepopen now seems to work 52 * Alan Cox : Pulls options out correctly on accepts 53 * Alan Cox : Fixed assorted sk->rqueue->next errors 54 * Alan Cox : PSH doesn't end a TCP read. Switched a 55 * bit to skb ops. 56 * Alan Cox : Tidied tcp_data to avoid a potential 57 * nasty. 58 * Alan Cox : Added some better commenting, as the 59 * tcp is hard to follow 60 * Alan Cox : Removed incorrect check for 20 * psh 61 * Michael O'Reilly : ack < copied bug fix. 62 * Johannes Stille : Misc tcp fixes (not all in yet). 63 * Alan Cox : FIN with no memory -> CRASH 64 * Alan Cox : Added socket option proto entries. 65 * Also added awareness of them to accept. 66 * Alan Cox : Added TCP options (SOL_TCP) 67 * Alan Cox : Switched wakeup calls to callbacks, 68 * so the kernel can layer network 69 * sockets. 70 * Alan Cox : Use ip_tos/ip_ttl settings. 71 * Alan Cox : Handle FIN (more) properly (we hope). 72 * Alan Cox : RST frames sent on unsynchronised 73 * state ack error. 74 * Alan Cox : Put in missing check for SYN bit. 75 * Alan Cox : Added tcp_select_window() aka NET2E 76 * window non shrink trick. 77 * Alan Cox : Added a couple of small NET2E timer 78 * fixes 79 * Charles Hedrick : TCP fixes 80 * Toomas Tamm : TCP window fixes 81 * Alan Cox : Small URG fix to rlogin ^C ack fight 82 * Charles Hedrick : Rewrote most of it to actually work 83 * Linus : Rewrote tcp_read() and URG handling 84 * completely 85 * Gerhard Koerting: Fixed some missing timer handling 86 * Matthew Dillon : Reworked TCP machine states as per RFC 87 * Gerhard Koerting: PC/TCP workarounds 88 * Adam Caldwell : Assorted timer/timing errors 89 * Matthew Dillon : Fixed another RST bug 90 * Alan Cox : Move to kernel side addressing changes. 91 * Alan Cox : Beginning work on TCP fastpathing 92 * (not yet usable) 93 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 94 * Alan Cox : TCP fast path debugging 95 * Alan Cox : Window clamping 96 * Michael Riepe : Bug in tcp_check() 97 * Matt Dillon : More TCP improvements and RST bug fixes 98 * Matt Dillon : Yet more small nasties remove from the 99 * TCP code (Be very nice to this man if 100 * tcp finally works 100%) 8) 101 * Alan Cox : BSD accept semantics. 102 * Alan Cox : Reset on closedown bug. 103 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 104 * Michael Pall : Handle poll() after URG properly in 105 * all cases. 106 * Michael Pall : Undo the last fix in tcp_read_urg() 107 * (multi URG PUSH broke rlogin). 108 * Michael Pall : Fix the multi URG PUSH problem in 109 * tcp_readable(), poll() after URG 110 * works now. 111 * Michael Pall : recv(...,MSG_OOB) never blocks in the 112 * BSD api. 113 * Alan Cox : Changed the semantics of sk->socket to 114 * fix a race and a signal problem with 115 * accept() and async I/O. 116 * Alan Cox : Relaxed the rules on tcp_sendto(). 117 * Yury Shevchuk : Really fixed accept() blocking problem. 118 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 119 * clients/servers which listen in on 120 * fixed ports. 121 * Alan Cox : Cleaned the above up and shrank it to 122 * a sensible code size. 123 * Alan Cox : Self connect lockup fix. 124 * Alan Cox : No connect to multicast. 125 * Ross Biro : Close unaccepted children on master 126 * socket close. 127 * Alan Cox : Reset tracing code. 128 * Alan Cox : Spurious resets on shutdown. 129 * Alan Cox : Giant 15 minute/60 second timer error 130 * Alan Cox : Small whoops in polling before an 131 * accept. 132 * Alan Cox : Kept the state trace facility since 133 * it's handy for debugging. 134 * Alan Cox : More reset handler fixes. 135 * Alan Cox : Started rewriting the code based on 136 * the RFC's for other useful protocol 137 * references see: Comer, KA9Q NOS, and 138 * for a reference on the difference 139 * between specifications and how BSD 140 * works see the 4.4lite source. 141 * A.N.Kuznetsov : Don't time wait on completion of tidy 142 * close. 143 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 144 * Linus Torvalds : Fixed BSD port reuse to work first syn 145 * Alan Cox : Reimplemented timers as per the RFC 146 * and using multiple timers for sanity. 147 * Alan Cox : Small bug fixes, and a lot of new 148 * comments. 149 * Alan Cox : Fixed dual reader crash by locking 150 * the buffers (much like datagram.c) 151 * Alan Cox : Fixed stuck sockets in probe. A probe 152 * now gets fed up of retrying without 153 * (even a no space) answer. 154 * Alan Cox : Extracted closing code better 155 * Alan Cox : Fixed the closing state machine to 156 * resemble the RFC. 157 * Alan Cox : More 'per spec' fixes. 158 * Jorge Cwik : Even faster checksumming. 159 * Alan Cox : tcp_data() doesn't ack illegal PSH 160 * only frames. At least one pc tcp stack 161 * generates them. 162 * Alan Cox : Cache last socket. 163 * Alan Cox : Per route irtt. 164 * Matt Day : poll()->select() match BSD precisely on error 165 * Alan Cox : New buffers 166 * Marc Tamsky : Various sk->prot->retransmits and 167 * sk->retransmits misupdating fixed. 168 * Fixed tcp_write_timeout: stuck close, 169 * and TCP syn retries gets used now. 170 * Mark Yarvis : In tcp_read_wakeup(), don't send an 171 * ack if state is TCP_CLOSED. 172 * Alan Cox : Look up device on a retransmit - routes may 173 * change. Doesn't yet cope with MSS shrink right 174 * but it's a start! 175 * Marc Tamsky : Closing in closing fixes. 176 * Mike Shaver : RFC1122 verifications. 177 * Alan Cox : rcv_saddr errors. 178 * Alan Cox : Block double connect(). 179 * Alan Cox : Small hooks for enSKIP. 180 * Alexey Kuznetsov: Path MTU discovery. 181 * Alan Cox : Support soft errors. 182 * Alan Cox : Fix MTU discovery pathological case 183 * when the remote claims no mtu! 184 * Marc Tamsky : TCP_CLOSE fix. 185 * Colin (G3TNE) : Send a reset on syn ack replies in 186 * window but wrong (fixes NT lpd problems) 187 * Pedro Roque : Better TCP window handling, delayed ack. 188 * Joerg Reuter : No modification of locked buffers in 189 * tcp_do_retransmit() 190 * Eric Schenk : Changed receiver side silly window 191 * avoidance algorithm to BSD style 192 * algorithm. This doubles throughput 193 * against machines running Solaris, 194 * and seems to result in general 195 * improvement. 196 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 197 * Willy Konynenberg : Transparent proxying support. 198 * Mike McLagan : Routing by source 199 * Keith Owens : Do proper merging with partial SKB's in 200 * tcp_do_sendmsg to avoid burstiness. 201 * Eric Schenk : Fix fast close down bug with 202 * shutdown() followed by close(). 203 * Andi Kleen : Make poll agree with SIGIO 204 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 205 * lingertime == 0 (RFC 793 ABORT Call) 206 * Hirokazu Takahashi : Use copy_from_user() instead of 207 * csum_and_copy_from_user() if possible. 208 * 209 * Description of States: 210 * 211 * TCP_SYN_SENT sent a connection request, waiting for ack 212 * 213 * TCP_SYN_RECV received a connection request, sent ack, 214 * waiting for final ack in three-way handshake. 215 * 216 * TCP_ESTABLISHED connection established 217 * 218 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 219 * transmission of remaining buffered data 220 * 221 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 222 * to shutdown 223 * 224 * TCP_CLOSING both sides have shutdown but we still have 225 * data we have to finish sending 226 * 227 * TCP_TIME_WAIT timeout to catch resent junk before entering 228 * closed, can only be entered from FIN_WAIT2 229 * or CLOSING. Required because the other end 230 * may not have gotten our last ACK causing it 231 * to retransmit the data packet (which we ignore) 232 * 233 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 234 * us to finish writing our data and to shutdown 235 * (we have to close() to move on to LAST_ACK) 236 * 237 * TCP_LAST_ACK out side has shutdown after remote has 238 * shutdown. There may still be data in our 239 * buffer that we have to finish sending 240 * 241 * TCP_CLOSE socket is finished 242 */ 243 244 #define pr_fmt(fmt) "TCP: " fmt 245 246 #include <crypto/md5.h> 247 #include <crypto/utils.h> 248 #include <linux/kernel.h> 249 #include <linux/module.h> 250 #include <linux/types.h> 251 #include <linux/fcntl.h> 252 #include <linux/poll.h> 253 #include <linux/inet_diag.h> 254 #include <linux/init.h> 255 #include <linux/fs.h> 256 #include <linux/skbuff.h> 257 #include <linux/splice.h> 258 #include <linux/net.h> 259 #include <linux/socket.h> 260 #include <linux/random.h> 261 #include <linux/memblock.h> 262 #include <linux/highmem.h> 263 #include <linux/cache.h> 264 #include <linux/err.h> 265 #include <linux/time.h> 266 #include <linux/slab.h> 267 #include <linux/errqueue.h> 268 #include <linux/static_key.h> 269 #include <linux/btf.h> 270 271 #include <net/icmp.h> 272 #include <net/inet_common.h> 273 #include <net/inet_ecn.h> 274 #include <net/tcp.h> 275 #include <net/tcp_ecn.h> 276 #include <net/mptcp.h> 277 #include <net/proto_memory.h> 278 #include <net/xfrm.h> 279 #include <net/ip.h> 280 #include <net/psp.h> 281 #include <net/sock.h> 282 #include <net/rstreason.h> 283 284 #include <linux/uaccess.h> 285 #include <asm/ioctls.h> 286 #include <net/busy_poll.h> 287 #include <net/hotdata.h> 288 #include <trace/events/tcp.h> 289 #include <net/rps.h> 290 291 #include "../core/devmem.h" 292 293 /* Track pending CMSGs. */ 294 enum { 295 TCP_CMSG_INQ = 1, 296 TCP_CMSG_TS = 2 297 }; 298 299 DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 300 EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 301 302 DEFINE_PER_CPU(u32, tcp_tw_isn); 303 EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn); 304 305 long sysctl_tcp_mem[3] __read_mostly; 306 307 DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 308 EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 309 310 #if IS_ENABLED(CONFIG_SMC) 311 DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 312 EXPORT_SYMBOL(tcp_have_smc); 313 #endif 314 315 /* 316 * Current number of TCP sockets. 317 */ 318 struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 319 320 /* 321 * Pressure flag: try to collapse. 322 * Technical note: it is used by multiple contexts non atomically. 323 * All the __sk_mem_schedule() is of this nature: accounting 324 * is strict, actions are advisory and have some latency. 325 */ 326 unsigned long tcp_memory_pressure __read_mostly; 327 EXPORT_SYMBOL_GPL(tcp_memory_pressure); 328 329 void tcp_enter_memory_pressure(struct sock *sk) 330 { 331 unsigned long val; 332 333 if (READ_ONCE(tcp_memory_pressure)) 334 return; 335 val = jiffies; 336 337 if (!val) 338 val--; 339 if (!cmpxchg(&tcp_memory_pressure, 0, val)) 340 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 341 } 342 343 void tcp_leave_memory_pressure(struct sock *sk) 344 { 345 unsigned long val; 346 347 if (!READ_ONCE(tcp_memory_pressure)) 348 return; 349 val = xchg(&tcp_memory_pressure, 0); 350 if (val) 351 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 352 jiffies_to_msecs(jiffies - val)); 353 } 354 355 /* Convert seconds to retransmits based on initial and max timeout */ 356 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 357 { 358 u8 res = 0; 359 360 if (seconds > 0) { 361 int period = timeout; 362 363 res = 1; 364 while (seconds > period && res < 255) { 365 res++; 366 timeout <<= 1; 367 if (timeout > rto_max) 368 timeout = rto_max; 369 period += timeout; 370 } 371 } 372 return res; 373 } 374 375 /* Convert retransmits to seconds based on initial and max timeout */ 376 static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 377 { 378 int period = 0; 379 380 if (retrans > 0) { 381 period = timeout; 382 while (--retrans) { 383 timeout <<= 1; 384 if (timeout > rto_max) 385 timeout = rto_max; 386 period += timeout; 387 } 388 } 389 return period; 390 } 391 392 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 393 { 394 u32 rate = READ_ONCE(tp->rate_delivered); 395 u32 intv = READ_ONCE(tp->rate_interval_us); 396 u64 rate64 = 0; 397 398 if (rate && intv) { 399 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 400 do_div(rate64, intv); 401 } 402 return rate64; 403 } 404 405 #ifdef CONFIG_TCP_MD5SIG 406 void tcp_md5_destruct_sock(struct sock *sk) 407 { 408 struct tcp_sock *tp = tcp_sk(sk); 409 410 if (tp->md5sig_info) { 411 412 tcp_clear_md5_list(sk); 413 kfree(rcu_replace_pointer(tp->md5sig_info, NULL, 1)); 414 static_branch_slow_dec_deferred(&tcp_md5_needed); 415 } 416 } 417 #endif 418 419 /* Address-family independent initialization for a tcp_sock. 420 * 421 * NOTE: A lot of things set to zero explicitly by call to 422 * sk_alloc() so need not be done here. 423 */ 424 void tcp_init_sock(struct sock *sk) 425 { 426 struct inet_connection_sock *icsk = inet_csk(sk); 427 struct tcp_sock *tp = tcp_sk(sk); 428 int rto_min_us, rto_max_ms; 429 430 tp->out_of_order_queue = RB_ROOT; 431 sk->tcp_rtx_queue = RB_ROOT; 432 tcp_init_xmit_timers(sk); 433 INIT_LIST_HEAD(&tp->tsq_node); 434 INIT_LIST_HEAD(&tp->tsorted_sent_queue); 435 436 icsk->icsk_rto = TCP_TIMEOUT_INIT; 437 438 rto_max_ms = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_max_ms); 439 icsk->icsk_rto_max = msecs_to_jiffies(rto_max_ms); 440 441 rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us); 442 icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us); 443 icsk->icsk_delack_max = TCP_DELACK_MAX; 444 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 445 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 446 447 /* So many TCP implementations out there (incorrectly) count the 448 * initial SYN frame in their delayed-ACK and congestion control 449 * algorithms that we must have the following bandaid to talk 450 * efficiently to them. -DaveM 451 */ 452 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 453 454 /* There's a bubble in the pipe until at least the first ACK. */ 455 tp->app_limited = ~0U; 456 tp->rate_app_limited = 1; 457 458 /* See draft-stevens-tcpca-spec-01 for discussion of the 459 * initialization of these values. 460 */ 461 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 462 tp->snd_cwnd_clamp = ~0; 463 tp->mss_cache = TCP_MSS_DEFAULT; 464 465 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 466 tcp_assign_congestion_control(sk); 467 468 tp->tsoffset = 0; 469 tp->rack.reo_wnd_steps = 1; 470 471 sk->sk_write_space = sk_stream_write_space; 472 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 473 474 icsk->icsk_sync_mss = tcp_sync_mss; 475 476 WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 477 WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 478 tcp_scaling_ratio_init(sk); 479 480 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 481 sk_sockets_allocated_inc(sk); 482 xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1); 483 } 484 485 static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) 486 { 487 struct sk_buff *skb = tcp_write_queue_tail(sk); 488 u32 tsflags = sockc->tsflags; 489 490 if (unlikely(!skb)) 491 skb = skb_rb_last(&sk->tcp_rtx_queue); 492 493 if (tsflags && skb) { 494 struct skb_shared_info *shinfo = skb_shinfo(skb); 495 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 496 497 sock_tx_timestamp(sk, sockc, &shinfo->tx_flags); 498 if (tsflags & SOF_TIMESTAMPING_TX_ACK) 499 tcb->txstamp_ack |= TSTAMP_ACK_SK; 500 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 501 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 502 } 503 504 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && 505 SK_BPF_CB_FLAG_TEST(sk, SK_BPF_CB_TX_TIMESTAMPING) && skb) 506 bpf_skops_tx_timestamping(sk, skb, BPF_SOCK_OPS_TSTAMP_SENDMSG_CB); 507 } 508 509 /* @wake is one when sk_stream_write_space() calls us. 510 * This sends EPOLLOUT only if notsent_bytes is half the limit. 511 * This mimics the strategy used in sock_def_write_space(). 512 */ 513 bool tcp_stream_memory_free(const struct sock *sk, int wake) 514 { 515 const struct tcp_sock *tp = tcp_sk(sk); 516 u32 notsent_bytes = READ_ONCE(tp->write_seq) - READ_ONCE(tp->snd_nxt); 517 518 return (notsent_bytes << wake) < tcp_notsent_lowat(tp); 519 } 520 EXPORT_SYMBOL(tcp_stream_memory_free); 521 522 static bool tcp_stream_is_readable(struct sock *sk, int target) 523 { 524 if (tcp_epollin_ready(sk, target)) 525 return true; 526 return sk_is_readable(sk); 527 } 528 529 /* 530 * Wait for a TCP event. 531 * 532 * Note that we don't need to lock the socket, as the upper poll layers 533 * take care of normal races (between the test and the event) and we don't 534 * go look at any of the socket buffers directly. 535 */ 536 __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 537 { 538 __poll_t mask; 539 struct sock *sk = sock->sk; 540 const struct tcp_sock *tp = tcp_sk(sk); 541 u8 shutdown; 542 int state; 543 544 sock_poll_wait(file, sock, wait); 545 546 state = inet_sk_state_load(sk); 547 if (state == TCP_LISTEN) 548 return inet_csk_listen_poll(sk); 549 550 /* Socket is not locked. We are protected from async events 551 * by poll logic and correct handling of state changes 552 * made by other threads is impossible in any case. 553 */ 554 555 mask = 0; 556 557 /* 558 * EPOLLHUP is certainly not done right. But poll() doesn't 559 * have a notion of HUP in just one direction, and for a 560 * socket the read side is more interesting. 561 * 562 * Some poll() documentation says that EPOLLHUP is incompatible 563 * with the EPOLLOUT/POLLWR flags, so somebody should check this 564 * all. But careful, it tends to be safer to return too many 565 * bits than too few, and you can easily break real applications 566 * if you don't tell them that something has hung up! 567 * 568 * Check-me. 569 * 570 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 571 * our fs/select.c). It means that after we received EOF, 572 * poll always returns immediately, making impossible poll() on write() 573 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 574 * if and only if shutdown has been made in both directions. 575 * Actually, it is interesting to look how Solaris and DUX 576 * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 577 * then we could set it on SND_SHUTDOWN. BTW examples given 578 * in Stevens' books assume exactly this behaviour, it explains 579 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 580 * 581 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 582 * blocking on fresh not-connected or disconnected socket. --ANK 583 */ 584 shutdown = READ_ONCE(sk->sk_shutdown); 585 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 586 mask |= EPOLLHUP; 587 if (shutdown & RCV_SHUTDOWN) 588 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 589 590 /* Connected or passive Fast Open socket? */ 591 if (state != TCP_SYN_SENT && 592 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 593 int target = sock_rcvlowat(sk, 0, INT_MAX); 594 u16 urg_data = READ_ONCE(tp->urg_data); 595 596 if (unlikely(urg_data) && 597 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 598 !sock_flag(sk, SOCK_URGINLINE)) 599 target++; 600 601 if (tcp_stream_is_readable(sk, target)) 602 mask |= EPOLLIN | EPOLLRDNORM; 603 604 if (!(shutdown & SEND_SHUTDOWN)) { 605 if (__sk_stream_is_writeable(sk, 1)) { 606 mask |= EPOLLOUT | EPOLLWRNORM; 607 } else { /* send SIGIO later */ 608 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 609 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 610 611 /* Race breaker. If space is freed after 612 * wspace test but before the flags are set, 613 * IO signal will be lost. Memory barrier 614 * pairs with the input side. 615 */ 616 smp_mb__after_atomic(); 617 if (__sk_stream_is_writeable(sk, 1)) 618 mask |= EPOLLOUT | EPOLLWRNORM; 619 } 620 } else 621 mask |= EPOLLOUT | EPOLLWRNORM; 622 623 if (urg_data & TCP_URG_VALID) 624 mask |= EPOLLPRI; 625 } else if (state == TCP_SYN_SENT && 626 inet_test_bit(DEFER_CONNECT, sk)) { 627 /* Active TCP fastopen socket with defer_connect 628 * Return EPOLLOUT so application can call write() 629 * in order for kernel to generate SYN+data 630 */ 631 mask |= EPOLLOUT | EPOLLWRNORM; 632 } 633 /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */ 634 smp_rmb(); 635 if (READ_ONCE(sk->sk_err) || 636 !skb_queue_empty_lockless(&sk->sk_error_queue)) 637 mask |= EPOLLERR; 638 639 return mask; 640 } 641 EXPORT_SYMBOL(tcp_poll); 642 643 int tcp_ioctl(struct sock *sk, int cmd, int *karg) 644 { 645 struct tcp_sock *tp = tcp_sk(sk); 646 int answ; 647 bool slow; 648 649 switch (cmd) { 650 case SIOCINQ: 651 if (sk->sk_state == TCP_LISTEN) 652 return -EINVAL; 653 654 slow = lock_sock_fast(sk); 655 answ = tcp_inq(sk); 656 unlock_sock_fast(sk, slow); 657 break; 658 case SIOCATMARK: 659 answ = READ_ONCE(tp->urg_data) && 660 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 661 break; 662 case SIOCOUTQ: 663 if (sk->sk_state == TCP_LISTEN) 664 return -EINVAL; 665 666 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 667 answ = 0; 668 else 669 answ = READ_ONCE(tp->write_seq) - tp->snd_una; 670 break; 671 case SIOCOUTQNSD: 672 if (sk->sk_state == TCP_LISTEN) 673 return -EINVAL; 674 675 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 676 answ = 0; 677 else 678 answ = READ_ONCE(tp->write_seq) - 679 READ_ONCE(tp->snd_nxt); 680 break; 681 default: 682 return -ENOIOCTLCMD; 683 } 684 685 *karg = answ; 686 return 0; 687 } 688 689 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 690 { 691 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 692 tp->pushed_seq = tp->write_seq; 693 } 694 695 static inline bool forced_push(const struct tcp_sock *tp) 696 { 697 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 698 } 699 700 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 701 { 702 struct tcp_sock *tp = tcp_sk(sk); 703 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 704 705 tcb->seq = tcb->end_seq = tp->write_seq; 706 tcb->tcp_flags = TCPHDR_ACK; 707 __skb_header_release(skb); 708 psp_enqueue_set_decrypted(sk, skb); 709 tcp_add_write_queue_tail(sk, skb); 710 sk_wmem_queued_add(sk, skb->truesize); 711 sk_mem_charge(sk, skb->truesize); 712 if (tp->nonagle & TCP_NAGLE_PUSH) 713 tp->nonagle &= ~TCP_NAGLE_PUSH; 714 715 tcp_slow_start_after_idle_check(sk); 716 } 717 718 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 719 { 720 if (flags & MSG_OOB) 721 tp->snd_up = tp->write_seq; 722 } 723 724 /* If a not yet filled skb is pushed, do not send it if 725 * we have data packets in Qdisc or NIC queues : 726 * Because TX completion will happen shortly, it gives a chance 727 * to coalesce future sendmsg() payload into this skb, without 728 * need for a timer, and with no latency trade off. 729 * As packets containing data payload have a bigger truesize 730 * than pure acks (dataless) packets, the last checks prevent 731 * autocorking if we only have an ACK in Qdisc/NIC queues, 732 * or if TX completion was delayed after we processed ACK packet. 733 */ 734 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 735 int size_goal) 736 { 737 return skb->len < size_goal && 738 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 739 !tcp_rtx_queue_empty(sk) && 740 refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 741 tcp_skb_can_collapse_to(skb); 742 } 743 744 void tcp_push(struct sock *sk, int flags, int mss_now, 745 int nonagle, int size_goal) 746 { 747 struct tcp_sock *tp = tcp_sk(sk); 748 struct sk_buff *skb; 749 750 skb = tcp_write_queue_tail(sk); 751 if (!skb) 752 return; 753 if (!(flags & MSG_MORE) || forced_push(tp)) 754 tcp_mark_push(tp, skb); 755 756 tcp_mark_urg(tp, flags); 757 758 if (tcp_should_autocork(sk, skb, size_goal)) { 759 760 /* avoid atomic op if TSQ_THROTTLED bit is already set */ 761 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 762 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 763 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 764 smp_mb__after_atomic(); 765 } 766 /* It is possible TX completion already happened 767 * before we set TSQ_THROTTLED. 768 */ 769 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 770 return; 771 } 772 773 if (flags & MSG_MORE) 774 nonagle = TCP_NAGLE_CORK; 775 776 __tcp_push_pending_frames(sk, mss_now, nonagle); 777 } 778 779 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 780 unsigned int offset, size_t len) 781 { 782 struct tcp_splice_state *tss = rd_desc->arg.data; 783 int ret; 784 785 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 786 min(rd_desc->count, len), tss->flags); 787 if (ret > 0) 788 rd_desc->count -= ret; 789 return ret; 790 } 791 792 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 793 { 794 /* Store TCP splice context information in read_descriptor_t. */ 795 read_descriptor_t rd_desc = { 796 .arg.data = tss, 797 .count = tss->len, 798 }; 799 800 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 801 } 802 803 /** 804 * tcp_splice_read - splice data from TCP socket to a pipe 805 * @sock: socket to splice from 806 * @ppos: position (not valid) 807 * @pipe: pipe to splice to 808 * @len: number of bytes to splice 809 * @flags: splice modifier flags 810 * 811 * Description: 812 * Will read pages from given socket and fill them into a pipe. 813 * 814 **/ 815 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 816 struct pipe_inode_info *pipe, size_t len, 817 unsigned int flags) 818 { 819 struct sock *sk = sock->sk; 820 struct tcp_splice_state tss = { 821 .pipe = pipe, 822 .len = len, 823 .flags = flags, 824 }; 825 long timeo; 826 ssize_t spliced; 827 int ret; 828 829 sock_rps_record_flow(sk); 830 /* 831 * We can't seek on a socket input 832 */ 833 if (unlikely(*ppos)) 834 return -ESPIPE; 835 836 ret = spliced = 0; 837 838 lock_sock(sk); 839 840 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 841 while (tss.len) { 842 ret = __tcp_splice_read(sk, &tss); 843 if (ret < 0) 844 break; 845 else if (!ret) { 846 if (spliced) 847 break; 848 if (sock_flag(sk, SOCK_DONE)) 849 break; 850 if (sk->sk_err) { 851 ret = sock_error(sk); 852 break; 853 } 854 if (sk->sk_shutdown & RCV_SHUTDOWN) 855 break; 856 if (sk->sk_state == TCP_CLOSE) { 857 /* 858 * This occurs when user tries to read 859 * from never connected socket. 860 */ 861 ret = -ENOTCONN; 862 break; 863 } 864 if (!timeo) { 865 ret = -EAGAIN; 866 break; 867 } 868 /* if __tcp_splice_read() got nothing while we have 869 * an skb in receive queue, we do not want to loop. 870 * This might happen with URG data. 871 */ 872 if (!skb_queue_empty(&sk->sk_receive_queue)) 873 break; 874 ret = sk_wait_data(sk, &timeo, NULL); 875 if (ret < 0) 876 break; 877 if (signal_pending(current)) { 878 ret = sock_intr_errno(timeo); 879 break; 880 } 881 continue; 882 } 883 tss.len -= ret; 884 spliced += ret; 885 886 if (!tss.len || !timeo) 887 break; 888 release_sock(sk); 889 lock_sock(sk); 890 891 if (tcp_recv_should_stop(sk)) 892 break; 893 } 894 895 release_sock(sk); 896 897 if (spliced) 898 return spliced; 899 900 return ret; 901 } 902 903 /* We allow to exceed memory limits for FIN packets to expedite 904 * connection tear down and (memory) recovery. 905 * Otherwise tcp_send_fin() could be tempted to either delay FIN 906 * or even be forced to close flow without any FIN. 907 * In general, we want to allow one skb per socket to avoid hangs 908 * with edge trigger epoll() 909 */ 910 void sk_forced_mem_schedule(struct sock *sk, int size) 911 { 912 int delta, amt; 913 914 delta = size - sk->sk_forward_alloc; 915 if (delta <= 0) 916 return; 917 918 amt = sk_mem_pages(delta); 919 sk_forward_alloc_add(sk, amt << PAGE_SHIFT); 920 921 if (mem_cgroup_sk_enabled(sk)) 922 mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge() | __GFP_NOFAIL); 923 924 if (sk->sk_bypass_prot_mem) 925 return; 926 927 sk_memory_allocated_add(sk, amt); 928 } 929 930 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 931 bool force_schedule) 932 { 933 struct sk_buff *skb; 934 935 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 936 if (likely(skb)) { 937 bool mem_scheduled; 938 939 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 940 if (force_schedule) { 941 mem_scheduled = true; 942 sk_forced_mem_schedule(sk, skb->truesize); 943 } else { 944 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 945 } 946 if (likely(mem_scheduled)) { 947 skb_reserve(skb, MAX_TCP_HEADER); 948 skb->ip_summed = CHECKSUM_PARTIAL; 949 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 950 return skb; 951 } 952 __kfree_skb(skb); 953 } else { 954 if (!sk->sk_bypass_prot_mem) 955 tcp_enter_memory_pressure(sk); 956 sk_stream_moderate_sndbuf(sk); 957 } 958 return NULL; 959 } 960 961 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 962 int large_allowed) 963 { 964 struct tcp_sock *tp = tcp_sk(sk); 965 u32 new_size_goal, size_goal; 966 967 if (!large_allowed) 968 return mss_now; 969 970 /* Note : tcp_tso_autosize() will eventually split this later */ 971 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 972 973 /* We try hard to avoid divides here */ 974 size_goal = tp->gso_segs * mss_now; 975 if (unlikely(new_size_goal < size_goal || 976 new_size_goal >= size_goal + mss_now)) { 977 tp->gso_segs = min_t(u16, new_size_goal / mss_now, 978 sk->sk_gso_max_segs); 979 size_goal = tp->gso_segs * mss_now; 980 } 981 982 return max(size_goal, mss_now); 983 } 984 985 int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 986 { 987 int mss_now; 988 989 mss_now = tcp_current_mss(sk); 990 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 991 992 return mss_now; 993 } 994 995 /* In some cases, sendmsg() could have added an skb to the write queue, 996 * but failed adding payload on it. We need to remove it to consume less 997 * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger 998 * epoll() users. Another reason is that tcp_write_xmit() does not like 999 * finding an empty skb in the write queue. 1000 */ 1001 void tcp_remove_empty_skb(struct sock *sk) 1002 { 1003 struct sk_buff *skb = tcp_write_queue_tail(sk); 1004 1005 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 1006 tcp_unlink_write_queue(skb, sk); 1007 if (tcp_write_queue_empty(sk)) 1008 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 1009 tcp_wmem_free_skb(sk, skb); 1010 } 1011 } 1012 1013 /* skb changing from pure zc to mixed, must charge zc */ 1014 static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 1015 { 1016 if (unlikely(skb_zcopy_pure(skb))) { 1017 u32 extra = skb->truesize - 1018 SKB_TRUESIZE(skb_end_offset(skb)); 1019 1020 if (!sk_wmem_schedule(sk, extra)) 1021 return -ENOMEM; 1022 1023 sk_mem_charge(sk, extra); 1024 skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 1025 } 1026 return 0; 1027 } 1028 1029 1030 int tcp_wmem_schedule(struct sock *sk, int copy) 1031 { 1032 int left; 1033 1034 if (likely(sk_wmem_schedule(sk, copy))) 1035 return copy; 1036 1037 /* We could be in trouble if we have nothing queued. 1038 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 1039 * to guarantee some progress. 1040 */ 1041 left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued; 1042 if (left > 0) 1043 sk_forced_mem_schedule(sk, min(left, copy)); 1044 return min(copy, sk->sk_forward_alloc); 1045 } 1046 1047 void tcp_free_fastopen_req(struct tcp_sock *tp) 1048 { 1049 if (tp->fastopen_req) { 1050 kfree(tp->fastopen_req); 1051 tp->fastopen_req = NULL; 1052 } 1053 } 1054 1055 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 1056 size_t size, struct ubuf_info *uarg) 1057 { 1058 struct tcp_sock *tp = tcp_sk(sk); 1059 struct inet_sock *inet = inet_sk(sk); 1060 struct sockaddr *uaddr = msg->msg_name; 1061 int err, flags; 1062 1063 if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 1064 TFO_CLIENT_ENABLE) || 1065 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1066 uaddr->sa_family == AF_UNSPEC)) 1067 return -EOPNOTSUPP; 1068 if (tp->fastopen_req) 1069 return -EALREADY; /* Another Fast Open is in progress */ 1070 1071 tp->fastopen_req = kzalloc_obj(struct tcp_fastopen_request, 1072 sk->sk_allocation); 1073 if (unlikely(!tp->fastopen_req)) 1074 return -ENOBUFS; 1075 tp->fastopen_req->data = msg; 1076 tp->fastopen_req->size = size; 1077 tp->fastopen_req->uarg = uarg; 1078 1079 if (inet_test_bit(DEFER_CONNECT, sk)) { 1080 err = tcp_connect(sk); 1081 /* Same failure procedure as in tcp_v4/6_connect */ 1082 if (err) { 1083 tcp_set_state(sk, TCP_CLOSE); 1084 inet->inet_dport = 0; 1085 sk->sk_route_caps = 0; 1086 } 1087 } 1088 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1089 err = __inet_stream_connect(sk->sk_socket, (struct sockaddr_unsized *)uaddr, 1090 msg->msg_namelen, flags, 1); 1091 /* fastopen_req could already be freed in __inet_stream_connect 1092 * if the connection times out or gets rst 1093 */ 1094 if (tp->fastopen_req) { 1095 *copied = tp->fastopen_req->copied; 1096 tcp_free_fastopen_req(tp); 1097 inet_clear_bit(DEFER_CONNECT, sk); 1098 } 1099 return err; 1100 } 1101 1102 /* If a gap is detected between sends, mark the socket application-limited. */ 1103 void tcp_rate_check_app_limited(struct sock *sk) 1104 { 1105 struct tcp_sock *tp = tcp_sk(sk); 1106 1107 if (/* We have less than one packet to send. */ 1108 tp->write_seq - tp->snd_nxt < tp->mss_cache && 1109 /* Nothing in sending host's qdisc queues or NIC tx queue. */ 1110 sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) && 1111 /* We are not limited by CWND. */ 1112 tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) && 1113 /* All lost packets have been retransmitted. */ 1114 tp->lost_out <= tp->retrans_out) 1115 tp->app_limited = 1116 (tp->delivered + tcp_packets_in_flight(tp)) ? : 1; 1117 } 1118 EXPORT_SYMBOL_GPL(tcp_rate_check_app_limited); 1119 1120 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 1121 { 1122 struct net_devmem_dmabuf_binding *binding = NULL; 1123 struct tcp_sock *tp = tcp_sk(sk); 1124 struct ubuf_info *uarg = NULL; 1125 struct sk_buff *skb; 1126 struct sockcm_cookie sockc; 1127 int flags, err, copied = 0; 1128 int mss_now = 0, size_goal, copied_syn = 0; 1129 int process_backlog = 0; 1130 int sockc_err = 0; 1131 int zc = 0; 1132 long timeo; 1133 1134 flags = msg->msg_flags; 1135 1136 sockc = (struct sockcm_cookie){ .tsflags = READ_ONCE(sk->sk_tsflags) }; 1137 if (msg->msg_controllen) { 1138 sockc_err = sock_cmsg_send(sk, msg, &sockc); 1139 /* Don't return error until MSG_FASTOPEN has been processed; 1140 * that may succeed even if the cmsg is invalid. 1141 */ 1142 } 1143 1144 if ((flags & MSG_ZEROCOPY) && size) { 1145 if (msg->msg_ubuf) { 1146 uarg = msg->msg_ubuf; 1147 if (sk->sk_route_caps & NETIF_F_SG) 1148 zc = MSG_ZEROCOPY; 1149 } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1150 skb = tcp_write_queue_tail(sk); 1151 uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb), 1152 !sockc_err && sockc.dmabuf_id); 1153 if (!uarg) { 1154 err = -ENOBUFS; 1155 goto out_err; 1156 } 1157 if (sk->sk_route_caps & NETIF_F_SG) 1158 zc = MSG_ZEROCOPY; 1159 else 1160 uarg_to_msgzc(uarg)->zerocopy = 0; 1161 1162 if (!sockc_err && sockc.dmabuf_id) { 1163 binding = net_devmem_get_binding(sk, sockc.dmabuf_id); 1164 if (IS_ERR(binding)) { 1165 err = PTR_ERR(binding); 1166 binding = NULL; 1167 goto out_err; 1168 } 1169 } 1170 } 1171 } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { 1172 if (sk->sk_route_caps & NETIF_F_SG) 1173 zc = MSG_SPLICE_PAGES; 1174 } 1175 1176 if (!sockc_err && sockc.dmabuf_id && 1177 (!(flags & MSG_ZEROCOPY) || !sock_flag(sk, SOCK_ZEROCOPY))) { 1178 err = -EINVAL; 1179 goto out_err; 1180 } 1181 1182 if (unlikely(flags & MSG_FASTOPEN || 1183 inet_test_bit(DEFER_CONNECT, sk)) && 1184 !tp->repair) { 1185 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1186 if (err == -EINPROGRESS && copied_syn > 0) 1187 goto out; 1188 else if (err) 1189 goto out_err; 1190 } 1191 1192 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1193 1194 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1195 1196 /* Wait for a connection to finish. One exception is TCP Fast Open 1197 * (passive side) where data is allowed to be sent before a connection 1198 * is fully established. 1199 */ 1200 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1201 !tcp_passive_fastopen(sk)) { 1202 err = sk_stream_wait_connect(sk, &timeo); 1203 if (err != 0) 1204 goto do_error; 1205 } 1206 1207 if (unlikely(tp->repair)) { 1208 if (tp->repair_queue == TCP_RECV_QUEUE) { 1209 copied = tcp_send_rcvq(sk, msg, size); 1210 goto out_nopush; 1211 } 1212 1213 err = -EINVAL; 1214 if (tp->repair_queue == TCP_NO_QUEUE) 1215 goto out_err; 1216 1217 /* 'common' sending to sendq */ 1218 } 1219 1220 if (sockc_err) { 1221 err = sockc_err; 1222 goto out_err; 1223 } 1224 1225 /* This should be in poll */ 1226 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1227 1228 /* Ok commence sending. */ 1229 copied = 0; 1230 1231 restart: 1232 mss_now = tcp_send_mss(sk, &size_goal, flags); 1233 1234 err = -EPIPE; 1235 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1236 goto do_error; 1237 1238 while (msg_data_left(msg)) { 1239 int copy = 0; 1240 1241 skb = tcp_write_queue_tail(sk); 1242 if (skb) 1243 copy = size_goal - skb->len; 1244 1245 trace_tcp_sendmsg_locked(sk, msg, skb, size_goal); 1246 1247 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 1248 bool first_skb; 1249 1250 new_segment: 1251 if (!sk_stream_memory_free(sk)) 1252 goto wait_for_space; 1253 1254 if (unlikely(process_backlog >= 16)) { 1255 process_backlog = 0; 1256 if (sk_flush_backlog(sk)) 1257 goto restart; 1258 } 1259 first_skb = tcp_rtx_and_write_queues_empty(sk); 1260 skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, 1261 first_skb); 1262 if (!skb) 1263 goto wait_for_space; 1264 1265 process_backlog++; 1266 1267 #ifdef CONFIG_SKB_DECRYPTED 1268 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); 1269 #endif 1270 tcp_skb_entail(sk, skb); 1271 copy = size_goal; 1272 1273 /* All packets are restored as if they have 1274 * already been sent. skb_mstamp_ns isn't set to 1275 * avoid wrong rtt estimation. 1276 */ 1277 if (tp->repair) 1278 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 1279 } 1280 1281 /* Try to append data to the end of skb. */ 1282 if (copy > msg_data_left(msg)) 1283 copy = msg_data_left(msg); 1284 1285 if (zc == 0) { 1286 bool merge = true; 1287 int i = skb_shinfo(skb)->nr_frags; 1288 struct page_frag *pfrag = sk_page_frag(sk); 1289 1290 if (!sk_page_frag_refill(sk, pfrag)) 1291 goto wait_for_space; 1292 1293 if (!skb_can_coalesce(skb, i, pfrag->page, 1294 pfrag->offset)) { 1295 if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { 1296 tcp_mark_push(tp, skb); 1297 goto new_segment; 1298 } 1299 merge = false; 1300 } 1301 1302 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1303 1304 if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1305 if (tcp_downgrade_zcopy_pure(sk, skb)) 1306 goto wait_for_space; 1307 skb_zcopy_downgrade_managed(skb); 1308 } 1309 1310 copy = tcp_wmem_schedule(sk, copy); 1311 if (!copy) 1312 goto wait_for_space; 1313 1314 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 1315 pfrag->page, 1316 pfrag->offset, 1317 copy); 1318 if (err) 1319 goto do_error; 1320 1321 /* Update the skb. */ 1322 if (merge) { 1323 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1324 } else { 1325 skb_fill_page_desc(skb, i, pfrag->page, 1326 pfrag->offset, copy); 1327 page_ref_inc(pfrag->page); 1328 } 1329 pfrag->offset += copy; 1330 } else if (zc == MSG_ZEROCOPY) { 1331 /* First append to a fragless skb builds initial 1332 * pure zerocopy skb 1333 */ 1334 if (!skb->len) 1335 skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 1336 1337 if (!skb_zcopy_pure(skb)) { 1338 copy = tcp_wmem_schedule(sk, copy); 1339 if (!copy) 1340 goto wait_for_space; 1341 } 1342 1343 err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg, 1344 binding); 1345 if (err == -EMSGSIZE || err == -EEXIST) { 1346 tcp_mark_push(tp, skb); 1347 goto new_segment; 1348 } 1349 if (err < 0) 1350 goto do_error; 1351 copy = err; 1352 } else if (zc == MSG_SPLICE_PAGES) { 1353 /* Splice in data if we can; copy if we can't. */ 1354 if (tcp_downgrade_zcopy_pure(sk, skb)) 1355 goto wait_for_space; 1356 copy = tcp_wmem_schedule(sk, copy); 1357 if (!copy) 1358 goto wait_for_space; 1359 1360 err = skb_splice_from_iter(skb, &msg->msg_iter, copy); 1361 if (err < 0) { 1362 if (err == -EMSGSIZE) { 1363 tcp_mark_push(tp, skb); 1364 goto new_segment; 1365 } 1366 goto do_error; 1367 } 1368 copy = err; 1369 1370 if (!(flags & MSG_NO_SHARED_FRAGS)) 1371 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1372 1373 sk_wmem_queued_add(sk, copy); 1374 sk_mem_charge(sk, copy); 1375 } 1376 1377 if (!copied) 1378 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1379 1380 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1381 TCP_SKB_CB(skb)->end_seq += copy; 1382 tcp_skb_pcount_set(skb, 0); 1383 1384 copied += copy; 1385 if (!msg_data_left(msg)) { 1386 if (unlikely(flags & MSG_EOR)) 1387 TCP_SKB_CB(skb)->eor = 1; 1388 goto out; 1389 } 1390 1391 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 1392 continue; 1393 1394 if (forced_push(tp)) { 1395 tcp_mark_push(tp, skb); 1396 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1397 } else if (skb == tcp_send_head(sk)) 1398 tcp_push_one(sk, mss_now); 1399 continue; 1400 1401 wait_for_space: 1402 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1403 tcp_remove_empty_skb(sk); 1404 if (copied) 1405 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1406 TCP_NAGLE_PUSH, size_goal); 1407 1408 err = sk_stream_wait_memory(sk, &timeo); 1409 if (err != 0) 1410 goto do_error; 1411 1412 mss_now = tcp_send_mss(sk, &size_goal, flags); 1413 } 1414 1415 out: 1416 if (copied) { 1417 tcp_tx_timestamp(sk, &sockc); 1418 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1419 } 1420 out_nopush: 1421 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1422 if (uarg && !msg->msg_ubuf) 1423 net_zcopy_put(uarg); 1424 if (binding) 1425 net_devmem_dmabuf_binding_put(binding); 1426 return copied + copied_syn; 1427 1428 do_error: 1429 tcp_remove_empty_skb(sk); 1430 1431 if (copied + copied_syn) 1432 goto out; 1433 out_err: 1434 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1435 if (uarg && !msg->msg_ubuf) 1436 net_zcopy_put_abort(uarg, true); 1437 err = sk_stream_error(sk, flags, err); 1438 /* make sure we wake any epoll edge trigger waiter */ 1439 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1440 READ_ONCE(sk->sk_write_space)(sk); 1441 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1442 } 1443 if (binding) 1444 net_devmem_dmabuf_binding_put(binding); 1445 1446 return err; 1447 } 1448 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1449 1450 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1451 { 1452 int ret; 1453 1454 lock_sock(sk); 1455 ret = tcp_sendmsg_locked(sk, msg, size); 1456 release_sock(sk); 1457 1458 return ret; 1459 } 1460 EXPORT_SYMBOL(tcp_sendmsg); 1461 1462 void tcp_splice_eof(struct socket *sock) 1463 { 1464 struct sock *sk = sock->sk; 1465 struct tcp_sock *tp = tcp_sk(sk); 1466 int mss_now, size_goal; 1467 1468 if (!tcp_write_queue_tail(sk)) 1469 return; 1470 1471 lock_sock(sk); 1472 mss_now = tcp_send_mss(sk, &size_goal, 0); 1473 tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 1474 release_sock(sk); 1475 } 1476 1477 /* 1478 * Handle reading urgent data. BSD has very simple semantics for 1479 * this, no blocking and very strange errors 8) 1480 */ 1481 1482 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1483 { 1484 struct tcp_sock *tp = tcp_sk(sk); 1485 1486 /* No URG data to read. */ 1487 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1488 tp->urg_data == TCP_URG_READ) 1489 return -EINVAL; /* Yes this is right ! */ 1490 1491 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1492 return -ENOTCONN; 1493 1494 if (tp->urg_data & TCP_URG_VALID) { 1495 int err = 0; 1496 char c = tp->urg_data; 1497 1498 if (!(flags & MSG_PEEK)) 1499 WRITE_ONCE(tp->urg_data, TCP_URG_READ); 1500 1501 /* Read urgent data. */ 1502 msg->msg_flags |= MSG_OOB; 1503 1504 if (len > 0) { 1505 if (!(flags & MSG_TRUNC)) 1506 err = memcpy_to_msg(msg, &c, 1); 1507 len = 1; 1508 } else 1509 msg->msg_flags |= MSG_TRUNC; 1510 1511 return err ? -EFAULT : len; 1512 } 1513 1514 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1515 return 0; 1516 1517 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1518 * the available implementations agree in this case: 1519 * this call should never block, independent of the 1520 * blocking state of the socket. 1521 * Mike <pall@rz.uni-karlsruhe.de> 1522 */ 1523 return -EAGAIN; 1524 } 1525 1526 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1527 { 1528 struct sk_buff *skb; 1529 int copied = 0, err = 0; 1530 1531 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 1532 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1533 if (err) 1534 return err; 1535 copied += skb->len; 1536 } 1537 1538 skb_queue_walk(&sk->sk_write_queue, skb) { 1539 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1540 if (err) 1541 break; 1542 1543 copied += skb->len; 1544 } 1545 1546 return err ?: copied; 1547 } 1548 1549 /* Clean up the receive buffer for full frames taken by the user, 1550 * then send an ACK if necessary. COPIED is the number of bytes 1551 * tcp_recvmsg has given to the user so far, it speeds up the 1552 * calculation of whether or not we must ACK for the sake of 1553 * a window update. 1554 */ 1555 void __tcp_cleanup_rbuf(struct sock *sk, int copied) 1556 { 1557 struct tcp_sock *tp = tcp_sk(sk); 1558 bool time_to_ack = false; 1559 1560 if (inet_csk_ack_scheduled(sk)) { 1561 const struct inet_connection_sock *icsk = inet_csk(sk); 1562 1563 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1564 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1565 /* 1566 * If this read emptied read buffer, we send ACK, if 1567 * connection is not bidirectional, user drained 1568 * receive buffer and there was a small segment 1569 * in queue. 1570 */ 1571 (copied > 0 && 1572 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1573 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1574 !inet_csk_in_pingpong_mode(sk))) && 1575 !atomic_read(&sk->sk_rmem_alloc))) 1576 time_to_ack = true; 1577 } 1578 1579 /* We send an ACK if we can now advertise a non-zero window 1580 * which has been raised "significantly". 1581 * 1582 * Even if window raised up to infinity, do not send window open ACK 1583 * in states, where we will not receive more. It is useless. 1584 */ 1585 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1586 __u32 rcv_window_now = tcp_receive_window(tp); 1587 1588 /* Optimize, __tcp_select_window() is not cheap. */ 1589 if (2*rcv_window_now <= tp->window_clamp) { 1590 __u32 new_window = __tcp_select_window(sk); 1591 1592 /* Send ACK now, if this read freed lots of space 1593 * in our buffer. Certainly, new_window is new window. 1594 * We can advertise it now, if it is not less than current one. 1595 * "Lots" means "at least twice" here. 1596 */ 1597 if (new_window && new_window >= 2 * rcv_window_now) 1598 time_to_ack = true; 1599 } 1600 } 1601 if (time_to_ack) { 1602 tcp_mstamp_refresh(tp); 1603 tcp_send_ack(sk); 1604 } 1605 } 1606 1607 void tcp_cleanup_rbuf(struct sock *sk, int copied) 1608 { 1609 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1610 struct tcp_sock *tp = tcp_sk(sk); 1611 1612 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1613 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1614 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1615 __tcp_cleanup_rbuf(sk, copied); 1616 } 1617 1618 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 1619 { 1620 __skb_unlink(skb, &sk->sk_receive_queue); 1621 if (likely(skb->destructor == sock_rfree)) { 1622 sock_rfree(skb); 1623 skb->destructor = NULL; 1624 skb->sk = NULL; 1625 return skb_attempt_defer_free(skb); 1626 } 1627 __kfree_skb(skb); 1628 } 1629 1630 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1631 { 1632 struct sk_buff *skb; 1633 u32 offset; 1634 1635 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1636 offset = seq - TCP_SKB_CB(skb)->seq; 1637 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 1638 pr_err_once("%s: found a SYN, please report !\n", __func__); 1639 offset--; 1640 } 1641 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 1642 *off = offset; 1643 return skb; 1644 } 1645 /* This looks weird, but this can happen if TCP collapsing 1646 * splitted a fat GRO packet, while we released socket lock 1647 * in skb_splice_bits() 1648 */ 1649 tcp_eat_recv_skb(sk, skb); 1650 } 1651 return NULL; 1652 } 1653 EXPORT_SYMBOL(tcp_recv_skb); 1654 1655 /* 1656 * This routine provides an alternative to tcp_recvmsg() for routines 1657 * that would like to handle copying from skbuffs directly in 'sendfile' 1658 * fashion. 1659 * Note: 1660 * - It is assumed that the socket was locked by the caller. 1661 * - The routine does not block. 1662 * - At present, there is no support for reading OOB data 1663 * or for 'peeking' the socket using this routine 1664 * (although both would be easy to implement). 1665 */ 1666 static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1667 sk_read_actor_t recv_actor, bool noack, 1668 u32 *copied_seq) 1669 { 1670 struct sk_buff *skb; 1671 struct tcp_sock *tp = tcp_sk(sk); 1672 u32 seq = *copied_seq; 1673 u32 offset; 1674 int copied = 0; 1675 1676 if (sk->sk_state == TCP_LISTEN) 1677 return -ENOTCONN; 1678 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1679 if (offset < skb->len) { 1680 int used; 1681 size_t len; 1682 1683 len = skb->len - offset; 1684 /* Stop reading if we hit a patch of urgent data */ 1685 if (unlikely(tp->urg_data)) { 1686 u32 urg_offset = tp->urg_seq - seq; 1687 if (urg_offset < len) 1688 len = urg_offset; 1689 if (!len) 1690 break; 1691 } 1692 used = recv_actor(desc, skb, offset, len); 1693 if (used <= 0) { 1694 if (!copied) 1695 copied = used; 1696 break; 1697 } 1698 if (WARN_ON_ONCE(used > len)) 1699 used = len; 1700 seq += used; 1701 copied += used; 1702 offset += used; 1703 1704 /* If recv_actor drops the lock (e.g. TCP splice 1705 * receive) the skb pointer might be invalid when 1706 * getting here: tcp_collapse might have deleted it 1707 * while aggregating skbs from the socket queue. 1708 */ 1709 skb = tcp_recv_skb(sk, seq - 1, &offset); 1710 if (!skb) 1711 break; 1712 /* TCP coalescing might have appended data to the skb. 1713 * Try to splice more frags 1714 */ 1715 if (offset + 1 != skb->len) 1716 continue; 1717 } 1718 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1719 tcp_eat_recv_skb(sk, skb); 1720 ++seq; 1721 break; 1722 } 1723 tcp_eat_recv_skb(sk, skb); 1724 if (!desc->count) 1725 break; 1726 WRITE_ONCE(*copied_seq, seq); 1727 } 1728 WRITE_ONCE(*copied_seq, seq); 1729 1730 if (noack) 1731 goto out; 1732 1733 tcp_rcv_space_adjust(sk); 1734 1735 /* Clean up data we have read: This will do ACK frames. */ 1736 if (copied > 0) { 1737 tcp_recv_skb(sk, seq, &offset); 1738 tcp_cleanup_rbuf(sk, copied); 1739 } 1740 out: 1741 return copied; 1742 } 1743 1744 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1745 sk_read_actor_t recv_actor) 1746 { 1747 return __tcp_read_sock(sk, desc, recv_actor, false, 1748 &tcp_sk(sk)->copied_seq); 1749 } 1750 EXPORT_SYMBOL(tcp_read_sock); 1751 1752 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, 1753 sk_read_actor_t recv_actor, bool noack, 1754 u32 *copied_seq) 1755 { 1756 return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq); 1757 } 1758 1759 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 1760 { 1761 struct sk_buff *skb; 1762 int copied = 0; 1763 1764 if (sk->sk_state == TCP_LISTEN) 1765 return -ENOTCONN; 1766 1767 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1768 u8 tcp_flags; 1769 int used; 1770 1771 __skb_unlink(skb, &sk->sk_receive_queue); 1772 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1773 tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1774 used = recv_actor(sk, skb); 1775 if (used < 0) { 1776 if (!copied) 1777 copied = used; 1778 break; 1779 } 1780 copied += used; 1781 1782 if (tcp_flags & TCPHDR_FIN) 1783 break; 1784 } 1785 return copied; 1786 } 1787 1788 void tcp_read_done(struct sock *sk, size_t len) 1789 { 1790 struct tcp_sock *tp = tcp_sk(sk); 1791 u32 seq = tp->copied_seq; 1792 struct sk_buff *skb; 1793 size_t left; 1794 u32 offset; 1795 1796 if (sk->sk_state == TCP_LISTEN) 1797 return; 1798 1799 left = len; 1800 while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1801 int used; 1802 1803 used = min_t(size_t, skb->len - offset, left); 1804 seq += used; 1805 left -= used; 1806 1807 if (skb->len > offset + used) 1808 break; 1809 1810 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1811 tcp_eat_recv_skb(sk, skb); 1812 ++seq; 1813 break; 1814 } 1815 tcp_eat_recv_skb(sk, skb); 1816 } 1817 WRITE_ONCE(tp->copied_seq, seq); 1818 1819 tcp_rcv_space_adjust(sk); 1820 1821 /* Clean up data we have read: This will do ACK frames. */ 1822 if (left != len) 1823 tcp_cleanup_rbuf(sk, len - left); 1824 } 1825 EXPORT_SYMBOL(tcp_read_done); 1826 1827 int tcp_peek_len(struct socket *sock) 1828 { 1829 return tcp_inq(sock->sk); 1830 } 1831 1832 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1833 int tcp_set_rcvlowat(struct sock *sk, int val) 1834 { 1835 struct tcp_sock *tp = tcp_sk(sk); 1836 int space, cap; 1837 1838 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1839 cap = sk->sk_rcvbuf >> 1; 1840 else 1841 cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1842 val = min(val, cap); 1843 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1844 1845 /* Check if we need to signal EPOLLIN right now */ 1846 tcp_data_ready(sk); 1847 1848 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1849 return 0; 1850 1851 space = tcp_space_from_win(sk, val); 1852 if (space > sk->sk_rcvbuf) { 1853 WRITE_ONCE(sk->sk_rcvbuf, space); 1854 1855 if (tp->window_clamp && tp->window_clamp < val) 1856 WRITE_ONCE(tp->window_clamp, val); 1857 } 1858 return 0; 1859 } 1860 1861 void tcp_set_rcvbuf(struct sock *sk, int val) 1862 { 1863 tcp_set_window_clamp(sk, tcp_win_from_space(sk, val)); 1864 } 1865 1866 #ifdef CONFIG_MMU 1867 static const struct vm_operations_struct tcp_vm_ops = { 1868 }; 1869 1870 int tcp_mmap(struct file *file, struct socket *sock, 1871 struct vm_area_struct *vma) 1872 { 1873 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 1874 return -EPERM; 1875 vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 1876 1877 /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 1878 vm_flags_set(vma, VM_MIXEDMAP); 1879 1880 vma->vm_ops = &tcp_vm_ops; 1881 return 0; 1882 } 1883 1884 static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 1885 u32 *offset_frag) 1886 { 1887 skb_frag_t *frag; 1888 1889 if (unlikely(offset_skb >= skb->len)) 1890 return NULL; 1891 1892 offset_skb -= skb_headlen(skb); 1893 if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 1894 return NULL; 1895 1896 frag = skb_shinfo(skb)->frags; 1897 while (offset_skb) { 1898 if (skb_frag_size(frag) > offset_skb) { 1899 *offset_frag = offset_skb; 1900 return frag; 1901 } 1902 offset_skb -= skb_frag_size(frag); 1903 ++frag; 1904 } 1905 *offset_frag = 0; 1906 return frag; 1907 } 1908 1909 static bool can_map_frag(const skb_frag_t *frag) 1910 { 1911 struct page *page; 1912 1913 if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag)) 1914 return false; 1915 1916 page = skb_frag_page(frag); 1917 1918 if (PageCompound(page) || page->mapping) 1919 return false; 1920 1921 return true; 1922 } 1923 1924 static int find_next_mappable_frag(const skb_frag_t *frag, 1925 int remaining_in_skb) 1926 { 1927 int offset = 0; 1928 1929 if (likely(can_map_frag(frag))) 1930 return 0; 1931 1932 while (offset < remaining_in_skb && !can_map_frag(frag)) { 1933 offset += skb_frag_size(frag); 1934 ++frag; 1935 } 1936 return offset; 1937 } 1938 1939 static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 1940 struct tcp_zerocopy_receive *zc, 1941 struct sk_buff *skb, u32 offset) 1942 { 1943 u32 frag_offset, partial_frag_remainder = 0; 1944 int mappable_offset; 1945 skb_frag_t *frag; 1946 1947 /* worst case: skip to next skb. try to improve on this case below */ 1948 zc->recv_skip_hint = skb->len - offset; 1949 1950 /* Find the frag containing this offset (and how far into that frag) */ 1951 frag = skb_advance_to_frag(skb, offset, &frag_offset); 1952 if (!frag) 1953 return; 1954 1955 if (frag_offset) { 1956 struct skb_shared_info *info = skb_shinfo(skb); 1957 1958 /* We read part of the last frag, must recvmsg() rest of skb. */ 1959 if (frag == &info->frags[info->nr_frags - 1]) 1960 return; 1961 1962 /* Else, we must at least read the remainder in this frag. */ 1963 partial_frag_remainder = skb_frag_size(frag) - frag_offset; 1964 zc->recv_skip_hint -= partial_frag_remainder; 1965 ++frag; 1966 } 1967 1968 /* partial_frag_remainder: If part way through a frag, must read rest. 1969 * mappable_offset: Bytes till next mappable frag, *not* counting bytes 1970 * in partial_frag_remainder. 1971 */ 1972 mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 1973 zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 1974 } 1975 1976 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1977 int flags, struct scm_timestamping_internal *tss, 1978 int *cmsg_flags); 1979 static int receive_fallback_to_copy(struct sock *sk, 1980 struct tcp_zerocopy_receive *zc, int inq, 1981 struct scm_timestamping_internal *tss) 1982 { 1983 unsigned long copy_address = (unsigned long)zc->copybuf_address; 1984 struct msghdr msg = {}; 1985 int err; 1986 1987 zc->length = 0; 1988 zc->recv_skip_hint = 0; 1989 1990 if (copy_address != zc->copybuf_address) 1991 return -EINVAL; 1992 1993 err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq, 1994 &msg.msg_iter); 1995 if (err) 1996 return err; 1997 1998 err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 1999 tss, &zc->msg_flags); 2000 if (err < 0) 2001 return err; 2002 2003 zc->copybuf_len = err; 2004 if (likely(zc->copybuf_len)) { 2005 struct sk_buff *skb; 2006 u32 offset; 2007 2008 skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 2009 if (skb) 2010 tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 2011 } 2012 return 0; 2013 } 2014 2015 static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 2016 struct sk_buff *skb, u32 copylen, 2017 u32 *offset, u32 *seq) 2018 { 2019 unsigned long copy_address = (unsigned long)zc->copybuf_address; 2020 struct msghdr msg = {}; 2021 int err; 2022 2023 if (copy_address != zc->copybuf_address) 2024 return -EINVAL; 2025 2026 err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen, 2027 &msg.msg_iter); 2028 if (err) 2029 return err; 2030 err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 2031 if (err) 2032 return err; 2033 zc->recv_skip_hint -= copylen; 2034 *offset += copylen; 2035 *seq += copylen; 2036 return (__s32)copylen; 2037 } 2038 2039 static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 2040 struct sock *sk, 2041 struct sk_buff *skb, 2042 u32 *seq, 2043 s32 copybuf_len, 2044 struct scm_timestamping_internal *tss) 2045 { 2046 u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 2047 2048 if (!copylen) 2049 return 0; 2050 /* skb is null if inq < PAGE_SIZE. */ 2051 if (skb) { 2052 offset = *seq - TCP_SKB_CB(skb)->seq; 2053 } else { 2054 skb = tcp_recv_skb(sk, *seq, &offset); 2055 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2056 tcp_update_recv_tstamps(skb, tss); 2057 zc->msg_flags |= TCP_CMSG_TS; 2058 } 2059 } 2060 2061 zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 2062 seq); 2063 return zc->copybuf_len < 0 ? 0 : copylen; 2064 } 2065 2066 static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 2067 struct page **pending_pages, 2068 unsigned long pages_remaining, 2069 unsigned long *address, 2070 u32 *length, 2071 u32 *seq, 2072 struct tcp_zerocopy_receive *zc, 2073 u32 total_bytes_to_map, 2074 int err) 2075 { 2076 /* At least one page did not map. Try zapping if we skipped earlier. */ 2077 if (err == -EBUSY && 2078 zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 2079 u32 maybe_zap_len; 2080 2081 maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 2082 *length + /* Mapped or pending */ 2083 (pages_remaining * PAGE_SIZE); /* Failed map. */ 2084 zap_page_range_single(vma, *address, maybe_zap_len, NULL); 2085 err = 0; 2086 } 2087 2088 if (!err) { 2089 unsigned long leftover_pages = pages_remaining; 2090 int bytes_mapped; 2091 2092 /* We called zap_page_range_single, try to reinsert. */ 2093 err = vm_insert_pages(vma, *address, 2094 pending_pages, 2095 &pages_remaining); 2096 bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 2097 *seq += bytes_mapped; 2098 *address += bytes_mapped; 2099 } 2100 if (err) { 2101 /* Either we were unable to zap, OR we zapped, retried an 2102 * insert, and still had an issue. Either ways, pages_remaining 2103 * is the number of pages we were unable to map, and we unroll 2104 * some state we speculatively touched before. 2105 */ 2106 const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 2107 2108 *length -= bytes_not_mapped; 2109 zc->recv_skip_hint += bytes_not_mapped; 2110 } 2111 return err; 2112 } 2113 2114 static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 2115 struct page **pages, 2116 unsigned int pages_to_map, 2117 unsigned long *address, 2118 u32 *length, 2119 u32 *seq, 2120 struct tcp_zerocopy_receive *zc, 2121 u32 total_bytes_to_map) 2122 { 2123 unsigned long pages_remaining = pages_to_map; 2124 unsigned int pages_mapped; 2125 unsigned int bytes_mapped; 2126 int err; 2127 2128 err = vm_insert_pages(vma, *address, pages, &pages_remaining); 2129 pages_mapped = pages_to_map - (unsigned int)pages_remaining; 2130 bytes_mapped = PAGE_SIZE * pages_mapped; 2131 /* Even if vm_insert_pages fails, it may have partially succeeded in 2132 * mapping (some but not all of the pages). 2133 */ 2134 *seq += bytes_mapped; 2135 *address += bytes_mapped; 2136 2137 if (likely(!err)) 2138 return 0; 2139 2140 /* Error: maybe zap and retry + rollback state for failed inserts. */ 2141 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 2142 pages_remaining, address, length, seq, zc, total_bytes_to_map, 2143 err); 2144 } 2145 2146 #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 2147 static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 2148 struct tcp_zerocopy_receive *zc, 2149 struct scm_timestamping_internal *tss) 2150 { 2151 unsigned long msg_control_addr; 2152 struct msghdr cmsg_dummy; 2153 2154 msg_control_addr = (unsigned long)zc->msg_control; 2155 cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 2156 cmsg_dummy.msg_controllen = 2157 (__kernel_size_t)zc->msg_controllen; 2158 cmsg_dummy.msg_flags = in_compat_syscall() 2159 ? MSG_CMSG_COMPAT : 0; 2160 cmsg_dummy.msg_control_is_user = true; 2161 zc->msg_flags = 0; 2162 if (zc->msg_control == msg_control_addr && 2163 zc->msg_controllen == cmsg_dummy.msg_controllen) { 2164 tcp_recv_timestamp(&cmsg_dummy, sk, tss); 2165 zc->msg_control = (__u64) 2166 ((uintptr_t)cmsg_dummy.msg_control_user); 2167 zc->msg_controllen = 2168 (__u64)cmsg_dummy.msg_controllen; 2169 zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 2170 } 2171 } 2172 2173 static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, 2174 unsigned long address, 2175 bool *mmap_locked) 2176 { 2177 struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); 2178 2179 if (vma) { 2180 if (vma->vm_ops != &tcp_vm_ops) { 2181 vma_end_read(vma); 2182 return NULL; 2183 } 2184 *mmap_locked = false; 2185 return vma; 2186 } 2187 2188 mmap_read_lock(mm); 2189 vma = vma_lookup(mm, address); 2190 if (!vma || vma->vm_ops != &tcp_vm_ops) { 2191 mmap_read_unlock(mm); 2192 return NULL; 2193 } 2194 *mmap_locked = true; 2195 return vma; 2196 } 2197 2198 #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 2199 static int tcp_zerocopy_receive(struct sock *sk, 2200 struct tcp_zerocopy_receive *zc, 2201 struct scm_timestamping_internal *tss) 2202 { 2203 u32 length = 0, offset, vma_len, avail_len, copylen = 0; 2204 unsigned long address = (unsigned long)zc->address; 2205 struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 2206 s32 copybuf_len = zc->copybuf_len; 2207 struct tcp_sock *tp = tcp_sk(sk); 2208 const skb_frag_t *frags = NULL; 2209 unsigned int pages_to_map = 0; 2210 struct vm_area_struct *vma; 2211 struct sk_buff *skb = NULL; 2212 u32 seq = tp->copied_seq; 2213 u32 total_bytes_to_map; 2214 int inq = tcp_inq(sk); 2215 bool mmap_locked; 2216 int ret; 2217 2218 zc->copybuf_len = 0; 2219 zc->msg_flags = 0; 2220 2221 if (address & (PAGE_SIZE - 1) || address != zc->address) 2222 return -EINVAL; 2223 2224 if (sk->sk_state == TCP_LISTEN) 2225 return -ENOTCONN; 2226 2227 sock_rps_record_flow(sk); 2228 2229 if (inq && inq <= copybuf_len) 2230 return receive_fallback_to_copy(sk, zc, inq, tss); 2231 2232 if (inq < PAGE_SIZE) { 2233 zc->length = 0; 2234 zc->recv_skip_hint = inq; 2235 if (!inq && sock_flag(sk, SOCK_DONE)) 2236 return -EIO; 2237 return 0; 2238 } 2239 2240 vma = find_tcp_vma(current->mm, address, &mmap_locked); 2241 if (!vma) 2242 return -EINVAL; 2243 2244 vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 2245 avail_len = min_t(u32, vma_len, inq); 2246 total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 2247 if (total_bytes_to_map) { 2248 if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2249 zap_page_range_single(vma, address, total_bytes_to_map, 2250 NULL); 2251 zc->length = total_bytes_to_map; 2252 zc->recv_skip_hint = 0; 2253 } else { 2254 zc->length = avail_len; 2255 zc->recv_skip_hint = avail_len; 2256 } 2257 ret = 0; 2258 while (length + PAGE_SIZE <= zc->length) { 2259 int mappable_offset; 2260 struct page *page; 2261 2262 if (zc->recv_skip_hint < PAGE_SIZE) { 2263 u32 offset_frag; 2264 2265 if (skb) { 2266 if (zc->recv_skip_hint > 0) 2267 break; 2268 skb = skb->next; 2269 offset = seq - TCP_SKB_CB(skb)->seq; 2270 } else { 2271 skb = tcp_recv_skb(sk, seq, &offset); 2272 } 2273 2274 if (!skb_frags_readable(skb)) 2275 break; 2276 2277 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2278 tcp_update_recv_tstamps(skb, tss); 2279 zc->msg_flags |= TCP_CMSG_TS; 2280 } 2281 zc->recv_skip_hint = skb->len - offset; 2282 frags = skb_advance_to_frag(skb, offset, &offset_frag); 2283 if (!frags || offset_frag) 2284 break; 2285 } 2286 2287 mappable_offset = find_next_mappable_frag(frags, 2288 zc->recv_skip_hint); 2289 if (mappable_offset) { 2290 zc->recv_skip_hint = mappable_offset; 2291 break; 2292 } 2293 page = skb_frag_page(frags); 2294 if (WARN_ON_ONCE(!page)) 2295 break; 2296 2297 prefetchw(page); 2298 pages[pages_to_map++] = page; 2299 length += PAGE_SIZE; 2300 zc->recv_skip_hint -= PAGE_SIZE; 2301 frags++; 2302 if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 2303 zc->recv_skip_hint < PAGE_SIZE) { 2304 /* Either full batch, or we're about to go to next skb 2305 * (and we cannot unroll failed ops across skbs). 2306 */ 2307 ret = tcp_zerocopy_vm_insert_batch(vma, pages, 2308 pages_to_map, 2309 &address, &length, 2310 &seq, zc, 2311 total_bytes_to_map); 2312 if (ret) 2313 goto out; 2314 pages_to_map = 0; 2315 } 2316 } 2317 if (pages_to_map) { 2318 ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 2319 &address, &length, &seq, 2320 zc, total_bytes_to_map); 2321 } 2322 out: 2323 if (mmap_locked) 2324 mmap_read_unlock(current->mm); 2325 else 2326 vma_end_read(vma); 2327 /* Try to copy straggler data. */ 2328 if (!ret) 2329 copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 2330 2331 if (length + copylen) { 2332 WRITE_ONCE(tp->copied_seq, seq); 2333 tcp_rcv_space_adjust(sk); 2334 2335 /* Clean up data we have read: This will do ACK frames. */ 2336 tcp_recv_skb(sk, seq, &offset); 2337 tcp_cleanup_rbuf(sk, length + copylen); 2338 ret = 0; 2339 if (length == zc->length) 2340 zc->recv_skip_hint = 0; 2341 } else { 2342 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 2343 ret = -EIO; 2344 } 2345 zc->length = length; 2346 return ret; 2347 } 2348 #endif 2349 2350 /* Similar to __sock_recv_timestamp, but does not require an skb */ 2351 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 2352 struct scm_timestamping_internal *tss) 2353 { 2354 int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 2355 u32 tsflags = READ_ONCE(sk->sk_tsflags); 2356 2357 if (tss->ts[0]) { 2358 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 2359 struct timespec64 tv = ktime_to_timespec64(tss->ts[0]); 2360 2361 if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2362 if (new_tstamp) { 2363 struct __kernel_timespec kts = { 2364 .tv_sec = tv.tv_sec, 2365 .tv_nsec = tv.tv_nsec, 2366 }; 2367 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2368 sizeof(kts), &kts); 2369 } else { 2370 struct __kernel_old_timespec ts_old = { 2371 .tv_sec = tv.tv_sec, 2372 .tv_nsec = tv.tv_nsec, 2373 }; 2374 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 2375 sizeof(ts_old), &ts_old); 2376 } 2377 } else { 2378 if (new_tstamp) { 2379 struct __kernel_sock_timeval stv = { 2380 .tv_sec = tv.tv_sec, 2381 .tv_usec = tv.tv_nsec / 1000, 2382 }; 2383 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2384 sizeof(stv), &stv); 2385 } else { 2386 struct __kernel_old_timeval otv = { 2387 .tv_sec = tv.tv_sec, 2388 .tv_usec = tv.tv_nsec / 1000, 2389 }; 2390 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 2391 sizeof(otv), &otv); 2392 } 2393 } 2394 } 2395 2396 if (!(tsflags & SOF_TIMESTAMPING_SOFTWARE && 2397 (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE || 2398 !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))) 2399 tss->ts[0] = 0; 2400 } 2401 2402 if (tss->ts[2]) { 2403 if (!(tsflags & SOF_TIMESTAMPING_RAW_HARDWARE && 2404 (tsflags & SOF_TIMESTAMPING_RX_HARDWARE || 2405 !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))) 2406 tss->ts[2] = 0; 2407 } 2408 2409 if (tss->ts[0] | tss->ts[2]) { 2410 tss->ts[1] = 0; 2411 if (sock_flag(sk, SOCK_TSTAMP_NEW)) 2412 put_cmsg_scm_timestamping64(msg, tss); 2413 else 2414 put_cmsg_scm_timestamping(msg, tss); 2415 } 2416 } 2417 2418 static int tcp_inq_hint(struct sock *sk) 2419 { 2420 const struct tcp_sock *tp = tcp_sk(sk); 2421 u32 copied_seq = READ_ONCE(tp->copied_seq); 2422 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2423 int inq; 2424 2425 inq = rcv_nxt - copied_seq; 2426 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2427 lock_sock(sk); 2428 inq = tp->rcv_nxt - tp->copied_seq; 2429 release_sock(sk); 2430 } 2431 /* After receiving a FIN, tell the user-space to continue reading 2432 * by returning a non-zero inq. 2433 */ 2434 if (inq == 0 && sock_flag(sk, SOCK_DONE)) 2435 inq = 1; 2436 return inq; 2437 } 2438 2439 /* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */ 2440 struct tcp_xa_pool { 2441 u8 max; /* max <= MAX_SKB_FRAGS */ 2442 u8 idx; /* idx <= max */ 2443 __u32 tokens[MAX_SKB_FRAGS]; 2444 netmem_ref netmems[MAX_SKB_FRAGS]; 2445 }; 2446 2447 static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p) 2448 { 2449 int i; 2450 2451 /* Commit part that has been copied to user space. */ 2452 for (i = 0; i < p->idx; i++) 2453 __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY, 2454 (__force void *)p->netmems[i], GFP_KERNEL); 2455 /* Rollback what has been pre-allocated and is no longer needed. */ 2456 for (; i < p->max; i++) 2457 __xa_erase(&sk->sk_user_frags, p->tokens[i]); 2458 2459 p->max = 0; 2460 p->idx = 0; 2461 } 2462 2463 static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p) 2464 { 2465 if (!p->max) 2466 return; 2467 2468 xa_lock_bh(&sk->sk_user_frags); 2469 2470 tcp_xa_pool_commit_locked(sk, p); 2471 2472 xa_unlock_bh(&sk->sk_user_frags); 2473 } 2474 2475 static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p, 2476 unsigned int max_frags) 2477 { 2478 int err, k; 2479 2480 if (p->idx < p->max) 2481 return 0; 2482 2483 xa_lock_bh(&sk->sk_user_frags); 2484 2485 tcp_xa_pool_commit_locked(sk, p); 2486 2487 for (k = 0; k < max_frags; k++) { 2488 err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k], 2489 XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL); 2490 if (err) 2491 break; 2492 } 2493 2494 xa_unlock_bh(&sk->sk_user_frags); 2495 2496 p->max = k; 2497 p->idx = 0; 2498 return k ? 0 : err; 2499 } 2500 2501 /* On error, returns the -errno. On success, returns number of bytes sent to the 2502 * user. May not consume all of @remaining_len. 2503 */ 2504 static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, 2505 unsigned int offset, struct msghdr *msg, 2506 int remaining_len) 2507 { 2508 struct dmabuf_cmsg dmabuf_cmsg = { 0 }; 2509 struct tcp_xa_pool tcp_xa_pool; 2510 unsigned int start; 2511 int i, copy, n; 2512 int sent = 0; 2513 int err = 0; 2514 2515 tcp_xa_pool.max = 0; 2516 tcp_xa_pool.idx = 0; 2517 do { 2518 start = skb_headlen(skb); 2519 2520 if (skb_frags_readable(skb)) { 2521 err = -ENODEV; 2522 goto out; 2523 } 2524 2525 /* Copy header. */ 2526 copy = start - offset; 2527 if (copy > 0) { 2528 copy = min(copy, remaining_len); 2529 2530 n = copy_to_iter(skb->data + offset, copy, 2531 &msg->msg_iter); 2532 if (n != copy) { 2533 err = -EFAULT; 2534 goto out; 2535 } 2536 2537 offset += copy; 2538 remaining_len -= copy; 2539 2540 /* First a dmabuf_cmsg for # bytes copied to user 2541 * buffer. 2542 */ 2543 memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg)); 2544 dmabuf_cmsg.frag_size = copy; 2545 err = put_cmsg_notrunc(msg, SOL_SOCKET, 2546 SO_DEVMEM_LINEAR, 2547 sizeof(dmabuf_cmsg), 2548 &dmabuf_cmsg); 2549 if (err) 2550 goto out; 2551 2552 sent += copy; 2553 2554 if (remaining_len == 0) 2555 goto out; 2556 } 2557 2558 /* after that, send information of dmabuf pages through a 2559 * sequence of cmsg 2560 */ 2561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2562 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2563 struct net_iov *niov; 2564 u64 frag_offset; 2565 int end; 2566 2567 /* !skb_frags_readable() should indicate that ALL the 2568 * frags in this skb are dmabuf net_iovs. We're checking 2569 * for that flag above, but also check individual frags 2570 * here. If the tcp stack is not setting 2571 * skb_frags_readable() correctly, we still don't want 2572 * to crash here. 2573 */ 2574 if (!skb_frag_net_iov(frag)) { 2575 net_err_ratelimited("Found non-dmabuf skb with net_iov"); 2576 err = -ENODEV; 2577 goto out; 2578 } 2579 2580 niov = skb_frag_net_iov(frag); 2581 if (!net_is_devmem_iov(niov)) { 2582 err = -ENODEV; 2583 goto out; 2584 } 2585 2586 end = start + skb_frag_size(frag); 2587 copy = end - offset; 2588 2589 if (copy > 0) { 2590 copy = min(copy, remaining_len); 2591 2592 frag_offset = net_iov_virtual_addr(niov) + 2593 skb_frag_off(frag) + offset - 2594 start; 2595 dmabuf_cmsg.frag_offset = frag_offset; 2596 dmabuf_cmsg.frag_size = copy; 2597 err = tcp_xa_pool_refill(sk, &tcp_xa_pool, 2598 skb_shinfo(skb)->nr_frags - i); 2599 if (err) 2600 goto out; 2601 2602 /* Will perform the exchange later */ 2603 dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx]; 2604 dmabuf_cmsg.dmabuf_id = net_devmem_iov_binding_id(niov); 2605 2606 offset += copy; 2607 remaining_len -= copy; 2608 2609 err = put_cmsg_notrunc(msg, SOL_SOCKET, 2610 SO_DEVMEM_DMABUF, 2611 sizeof(dmabuf_cmsg), 2612 &dmabuf_cmsg); 2613 if (err) 2614 goto out; 2615 2616 atomic_long_inc(&niov->desc.pp_ref_count); 2617 tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag); 2618 2619 sent += copy; 2620 2621 if (remaining_len == 0) 2622 goto out; 2623 } 2624 start = end; 2625 } 2626 2627 tcp_xa_pool_commit(sk, &tcp_xa_pool); 2628 if (!remaining_len) 2629 goto out; 2630 2631 /* if remaining_len is not satisfied yet, we need to go to the 2632 * next frag in the frag_list to satisfy remaining_len. 2633 */ 2634 skb = skb_shinfo(skb)->frag_list ?: skb->next; 2635 2636 offset = offset - start; 2637 } while (skb); 2638 2639 if (remaining_len) { 2640 err = -EFAULT; 2641 goto out; 2642 } 2643 2644 out: 2645 tcp_xa_pool_commit(sk, &tcp_xa_pool); 2646 if (!sent) 2647 sent = err; 2648 2649 return sent; 2650 } 2651 2652 /* 2653 * This routine copies from a sock struct into the user buffer. 2654 * 2655 * Technical note: in 2.3 we work on _locked_ socket, so that 2656 * tricks with *seq access order and skb->users are not required. 2657 * Probably, code can be easily improved even more. 2658 */ 2659 2660 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2661 int flags, struct scm_timestamping_internal *tss, 2662 int *cmsg_flags) 2663 { 2664 struct tcp_sock *tp = tcp_sk(sk); 2665 int last_copied_dmabuf = -1; /* uninitialized */ 2666 int copied = 0; 2667 u32 peek_seq; 2668 u32 *seq; 2669 unsigned long used; 2670 int err; 2671 int target; /* Read at least this many bytes */ 2672 long timeo; 2673 struct sk_buff *skb, *last; 2674 u32 peek_offset = 0; 2675 u32 urg_hole = 0; 2676 2677 err = -ENOTCONN; 2678 if (sk->sk_state == TCP_LISTEN) 2679 goto out; 2680 2681 if (tp->recvmsg_inq) 2682 *cmsg_flags = TCP_CMSG_INQ; 2683 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2684 2685 /* Urgent data needs to be handled specially. */ 2686 if (flags & MSG_OOB) 2687 goto recv_urg; 2688 2689 if (unlikely(tp->repair)) { 2690 err = -EPERM; 2691 if (!(flags & MSG_PEEK)) 2692 goto out; 2693 2694 if (tp->repair_queue == TCP_SEND_QUEUE) 2695 goto recv_sndq; 2696 2697 err = -EINVAL; 2698 if (tp->repair_queue == TCP_NO_QUEUE) 2699 goto out; 2700 2701 /* 'common' recv queue MSG_PEEK-ing */ 2702 } 2703 2704 seq = &tp->copied_seq; 2705 if (flags & MSG_PEEK) { 2706 peek_offset = max(sk_peek_offset(sk, flags), 0); 2707 peek_seq = tp->copied_seq + peek_offset; 2708 seq = &peek_seq; 2709 } 2710 2711 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2712 2713 do { 2714 u32 offset; 2715 2716 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2717 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 2718 if (copied) 2719 break; 2720 if (signal_pending(current)) { 2721 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 2722 break; 2723 } 2724 } 2725 2726 /* Next get a buffer. */ 2727 2728 last = skb_peek_tail(&sk->sk_receive_queue); 2729 skb_queue_walk(&sk->sk_receive_queue, skb) { 2730 last = skb; 2731 /* Now that we have two receive queues this 2732 * shouldn't happen. 2733 */ 2734 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2735 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 2736 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2737 flags)) 2738 break; 2739 2740 offset = *seq - TCP_SKB_CB(skb)->seq; 2741 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2742 pr_err_once("%s: found a SYN, please report !\n", __func__); 2743 offset--; 2744 } 2745 if (offset < skb->len) 2746 goto found_ok_skb; 2747 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2748 goto found_fin_ok; 2749 WARN(!(flags & MSG_PEEK), 2750 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 2751 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 2752 } 2753 2754 /* Well, if we have backlog, try to process it now yet. */ 2755 2756 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 2757 break; 2758 2759 if (copied) { 2760 if (!timeo || 2761 tcp_recv_should_stop(sk)) 2762 break; 2763 } else { 2764 if (sock_flag(sk, SOCK_DONE)) 2765 break; 2766 2767 if (sk->sk_err) { 2768 copied = sock_error(sk); 2769 break; 2770 } 2771 2772 if (sk->sk_shutdown & RCV_SHUTDOWN) 2773 break; 2774 2775 if (sk->sk_state == TCP_CLOSE) { 2776 /* This occurs when user tries to read 2777 * from never connected socket. 2778 */ 2779 copied = -ENOTCONN; 2780 break; 2781 } 2782 2783 if (!timeo) { 2784 copied = -EAGAIN; 2785 break; 2786 } 2787 2788 if (signal_pending(current)) { 2789 copied = sock_intr_errno(timeo); 2790 break; 2791 } 2792 } 2793 2794 if (copied >= target) { 2795 /* Do not sleep, just process backlog. */ 2796 __sk_flush_backlog(sk); 2797 } else { 2798 tcp_cleanup_rbuf(sk, copied); 2799 err = sk_wait_data(sk, &timeo, last); 2800 if (err < 0) { 2801 err = copied ? : err; 2802 goto out; 2803 } 2804 } 2805 2806 if ((flags & MSG_PEEK) && 2807 (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) { 2808 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2809 current->comm, 2810 task_pid_nr(current)); 2811 peek_seq = tp->copied_seq + peek_offset; 2812 } 2813 continue; 2814 2815 found_ok_skb: 2816 /* Ok so how much can we use? */ 2817 used = skb->len - offset; 2818 if (len < used) 2819 used = len; 2820 2821 /* Do we have urgent data here? */ 2822 if (unlikely(tp->urg_data)) { 2823 u32 urg_offset = tp->urg_seq - *seq; 2824 if (urg_offset < used) { 2825 if (!urg_offset) { 2826 if (!sock_flag(sk, SOCK_URGINLINE)) { 2827 WRITE_ONCE(*seq, *seq + 1); 2828 urg_hole++; 2829 offset++; 2830 used--; 2831 if (!used) 2832 goto skip_copy; 2833 } 2834 } else 2835 used = urg_offset; 2836 } 2837 } 2838 2839 if (!(flags & MSG_TRUNC)) { 2840 if (last_copied_dmabuf != -1 && 2841 last_copied_dmabuf != !skb_frags_readable(skb)) 2842 break; 2843 2844 if (skb_frags_readable(skb)) { 2845 err = skb_copy_datagram_msg(skb, offset, msg, 2846 used); 2847 if (err) { 2848 /* Exception. Bailout! */ 2849 if (!copied) 2850 copied = -EFAULT; 2851 break; 2852 } 2853 } else { 2854 if (!(flags & MSG_SOCK_DEVMEM)) { 2855 /* dmabuf skbs can only be received 2856 * with the MSG_SOCK_DEVMEM flag. 2857 */ 2858 if (!copied) 2859 copied = -EFAULT; 2860 2861 break; 2862 } 2863 2864 err = tcp_recvmsg_dmabuf(sk, skb, offset, msg, 2865 used); 2866 if (err < 0) { 2867 if (!copied) 2868 copied = err; 2869 2870 break; 2871 } 2872 used = err; 2873 } 2874 } 2875 2876 last_copied_dmabuf = !skb_frags_readable(skb); 2877 2878 WRITE_ONCE(*seq, *seq + used); 2879 copied += used; 2880 len -= used; 2881 if (flags & MSG_PEEK) 2882 sk_peek_offset_fwd(sk, used); 2883 else 2884 sk_peek_offset_bwd(sk, used); 2885 tcp_rcv_space_adjust(sk); 2886 2887 skip_copy: 2888 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 2889 WRITE_ONCE(tp->urg_data, 0); 2890 tcp_fast_path_check(sk); 2891 } 2892 2893 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2894 tcp_update_recv_tstamps(skb, tss); 2895 *cmsg_flags |= TCP_CMSG_TS; 2896 } 2897 2898 if (used + offset < skb->len) 2899 continue; 2900 2901 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2902 goto found_fin_ok; 2903 if (!(flags & MSG_PEEK)) 2904 tcp_eat_recv_skb(sk, skb); 2905 continue; 2906 2907 found_fin_ok: 2908 /* Process the FIN. */ 2909 WRITE_ONCE(*seq, *seq + 1); 2910 if (!(flags & MSG_PEEK)) 2911 tcp_eat_recv_skb(sk, skb); 2912 break; 2913 } while (len > 0); 2914 2915 /* According to UNIX98, msg_name/msg_namelen are ignored 2916 * on connected socket. I was just happy when found this 8) --ANK 2917 */ 2918 2919 /* Clean up data we have read: This will do ACK frames. */ 2920 tcp_cleanup_rbuf(sk, copied); 2921 return copied; 2922 2923 out: 2924 return err; 2925 2926 recv_urg: 2927 err = tcp_recv_urg(sk, msg, len, flags); 2928 goto out; 2929 2930 recv_sndq: 2931 err = tcp_peek_sndq(sk, msg, len); 2932 goto out; 2933 } 2934 2935 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags) 2936 { 2937 int cmsg_flags = 0, ret; 2938 struct scm_timestamping_internal tss; 2939 2940 if (unlikely(flags & MSG_ERRQUEUE)) 2941 return inet_recv_error(sk, msg, len); 2942 2943 if (sk_can_busy_loop(sk) && 2944 skb_queue_empty_lockless(&sk->sk_receive_queue) && 2945 sk->sk_state == TCP_ESTABLISHED) 2946 sk_busy_loop(sk, flags & MSG_DONTWAIT); 2947 2948 lock_sock(sk); 2949 ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 2950 release_sock(sk); 2951 2952 if ((cmsg_flags | msg->msg_get_inq) && ret >= 0) { 2953 if (cmsg_flags & TCP_CMSG_TS) 2954 tcp_recv_timestamp(msg, sk, &tss); 2955 if ((cmsg_flags & TCP_CMSG_INQ) | msg->msg_get_inq) { 2956 msg->msg_inq = tcp_inq_hint(sk); 2957 if (cmsg_flags & TCP_CMSG_INQ) 2958 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2959 sizeof(msg->msg_inq), &msg->msg_inq); 2960 } 2961 } 2962 return ret; 2963 } 2964 2965 void tcp_set_state(struct sock *sk, int state) 2966 { 2967 int oldstate = sk->sk_state; 2968 2969 /* We defined a new enum for TCP states that are exported in BPF 2970 * so as not force the internal TCP states to be frozen. The 2971 * following checks will detect if an internal state value ever 2972 * differs from the BPF value. If this ever happens, then we will 2973 * need to remap the internal value to the BPF value before calling 2974 * tcp_call_bpf_2arg. 2975 */ 2976 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2977 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2978 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2979 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2980 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2981 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2982 BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2983 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2984 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2985 BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2986 BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2987 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2988 BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE); 2989 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2990 2991 /* bpf uapi header bpf.h defines an anonymous enum with values 2992 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 2993 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 2994 * But clang built vmlinux does not have this enum in DWARF 2995 * since clang removes the above code before generating IR/debuginfo. 2996 * Let us explicitly emit the type debuginfo to ensure the 2997 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 2998 * regardless of which compiler is used. 2999 */ 3000 BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 3001 3002 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 3003 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 3004 3005 switch (state) { 3006 case TCP_ESTABLISHED: 3007 if (oldstate != TCP_ESTABLISHED) 3008 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 3009 break; 3010 case TCP_CLOSE_WAIT: 3011 if (oldstate == TCP_SYN_RECV) 3012 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 3013 break; 3014 3015 case TCP_CLOSE: 3016 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 3017 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 3018 3019 sk->sk_prot->unhash(sk); 3020 if (inet_csk(sk)->icsk_bind_hash && 3021 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 3022 inet_put_port(sk); 3023 fallthrough; 3024 default: 3025 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) 3026 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 3027 } 3028 3029 /* Change state AFTER socket is unhashed to avoid closed 3030 * socket sitting in hash tables. 3031 */ 3032 inet_sk_state_store(sk, state); 3033 } 3034 EXPORT_SYMBOL_GPL(tcp_set_state); 3035 3036 /* 3037 * State processing on a close. This implements the state shift for 3038 * sending our FIN frame. Note that we only send a FIN for some 3039 * states. A shutdown() may have already sent the FIN, or we may be 3040 * closed. 3041 */ 3042 3043 static const unsigned char new_state[16] = { 3044 /* current state: new state: action: */ 3045 [0 /* (Invalid) */] = TCP_CLOSE, 3046 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 3047 [TCP_SYN_SENT] = TCP_CLOSE, 3048 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 3049 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 3050 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 3051 [TCP_TIME_WAIT] = TCP_CLOSE, 3052 [TCP_CLOSE] = TCP_CLOSE, 3053 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 3054 [TCP_LAST_ACK] = TCP_LAST_ACK, 3055 [TCP_LISTEN] = TCP_CLOSE, 3056 [TCP_CLOSING] = TCP_CLOSING, 3057 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 3058 }; 3059 3060 static int tcp_close_state(struct sock *sk) 3061 { 3062 int next = (int)new_state[sk->sk_state]; 3063 int ns = next & TCP_STATE_MASK; 3064 3065 tcp_set_state(sk, ns); 3066 3067 return next & TCP_ACTION_FIN; 3068 } 3069 3070 /* 3071 * Shutdown the sending side of a connection. Much like close except 3072 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 3073 */ 3074 3075 void tcp_shutdown(struct sock *sk, int how) 3076 { 3077 /* We need to grab some memory, and put together a FIN, 3078 * and then put it into the queue to be sent. 3079 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 3080 */ 3081 if (!(how & SEND_SHUTDOWN)) 3082 return; 3083 3084 /* If we've already sent a FIN, or it's a closed state, skip this. */ 3085 if ((1 << sk->sk_state) & 3086 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 3087 TCPF_CLOSE_WAIT)) { 3088 /* Clear out any half completed packets. FIN if needed. */ 3089 if (tcp_close_state(sk)) 3090 tcp_send_fin(sk); 3091 } 3092 } 3093 3094 int tcp_orphan_count_sum(void) 3095 { 3096 int i, total = 0; 3097 3098 for_each_possible_cpu(i) 3099 total += per_cpu(tcp_orphan_count, i); 3100 3101 return max(total, 0); 3102 } 3103 3104 static int tcp_orphan_cache; 3105 static struct timer_list tcp_orphan_timer; 3106 #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 3107 3108 static void tcp_orphan_update(struct timer_list *unused) 3109 { 3110 WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 3111 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 3112 } 3113 3114 static bool tcp_too_many_orphans(int shift) 3115 { 3116 return READ_ONCE(tcp_orphan_cache) << shift > 3117 READ_ONCE(sysctl_tcp_max_orphans); 3118 } 3119 3120 static bool tcp_out_of_memory(const struct sock *sk) 3121 { 3122 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 3123 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) 3124 return true; 3125 return false; 3126 } 3127 3128 bool tcp_check_oom(const struct sock *sk, int shift) 3129 { 3130 bool too_many_orphans, out_of_socket_memory; 3131 3132 too_many_orphans = tcp_too_many_orphans(shift); 3133 out_of_socket_memory = tcp_out_of_memory(sk); 3134 3135 if (too_many_orphans) 3136 net_info_ratelimited("too many orphaned sockets\n"); 3137 if (out_of_socket_memory) 3138 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 3139 return too_many_orphans || out_of_socket_memory; 3140 } 3141 3142 void __tcp_close(struct sock *sk, long timeout) 3143 { 3144 bool data_was_unread = false; 3145 struct sk_buff *skb; 3146 int state; 3147 3148 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 3149 3150 if (sk->sk_state == TCP_LISTEN) { 3151 tcp_set_state(sk, TCP_CLOSE); 3152 3153 /* Special case. */ 3154 inet_csk_listen_stop(sk); 3155 3156 goto adjudge_to_death; 3157 } 3158 3159 /* We need to flush the recv. buffs. We do this only on the 3160 * descriptor close, not protocol-sourced closes, because the 3161 * reader process may not have drained the data yet! 3162 */ 3163 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 3164 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3165 3166 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 3167 end_seq--; 3168 if (after(end_seq, tcp_sk(sk)->copied_seq)) 3169 data_was_unread = true; 3170 tcp_eat_recv_skb(sk, skb); 3171 } 3172 3173 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 3174 if (sk->sk_state == TCP_CLOSE) 3175 goto adjudge_to_death; 3176 3177 /* As outlined in RFC 2525, section 2.17, we send a RST here because 3178 * data was lost. To witness the awful effects of the old behavior of 3179 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 3180 * GET in an FTP client, suspend the process, wait for the client to 3181 * advertise a zero window, then kill -9 the FTP client, wheee... 3182 * Note: timeout is always zero in such a case. 3183 */ 3184 if (unlikely(tcp_sk(sk)->repair)) { 3185 sk->sk_prot->disconnect(sk, 0); 3186 } else if (data_was_unread) { 3187 /* Unread data was tossed, zap the connection. */ 3188 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 3189 tcp_set_state(sk, TCP_CLOSE); 3190 tcp_send_active_reset(sk, sk->sk_allocation, 3191 SK_RST_REASON_TCP_ABORT_ON_CLOSE); 3192 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 3193 /* Check zero linger _after_ checking for unread data. */ 3194 sk->sk_prot->disconnect(sk, 0); 3195 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 3196 } else if (tcp_close_state(sk)) { 3197 /* We FIN if the application ate all the data before 3198 * zapping the connection. 3199 */ 3200 3201 /* RED-PEN. Formally speaking, we have broken TCP state 3202 * machine. State transitions: 3203 * 3204 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 3205 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult) 3206 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 3207 * 3208 * are legal only when FIN has been sent (i.e. in window), 3209 * rather than queued out of window. Purists blame. 3210 * 3211 * F.e. "RFC state" is ESTABLISHED, 3212 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 3213 * 3214 * The visible declinations are that sometimes 3215 * we enter time-wait state, when it is not required really 3216 * (harmless), do not send active resets, when they are 3217 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 3218 * they look as CLOSING or LAST_ACK for Linux) 3219 * Probably, I missed some more holelets. 3220 * --ANK 3221 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 3222 * in a single packet! (May consider it later but will 3223 * probably need API support or TCP_CORK SYN-ACK until 3224 * data is written and socket is closed.) 3225 */ 3226 tcp_send_fin(sk); 3227 } 3228 3229 sk_stream_wait_close(sk, timeout); 3230 3231 adjudge_to_death: 3232 state = sk->sk_state; 3233 sock_hold(sk); 3234 sock_orphan(sk); 3235 3236 local_bh_disable(); 3237 bh_lock_sock(sk); 3238 /* remove backlog if any, without releasing ownership. */ 3239 __release_sock(sk); 3240 3241 tcp_orphan_count_inc(); 3242 3243 /* Have we already been destroyed by a softirq or backlog? */ 3244 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 3245 goto out; 3246 3247 /* This is a (useful) BSD violating of the RFC. There is a 3248 * problem with TCP as specified in that the other end could 3249 * keep a socket open forever with no application left this end. 3250 * We use a 1 minute timeout (about the same as BSD) then kill 3251 * our end. If they send after that then tough - BUT: long enough 3252 * that we won't make the old 4*rto = almost no time - whoops 3253 * reset mistake. 3254 * 3255 * Nope, it was not mistake. It is really desired behaviour 3256 * f.e. on http servers, when such sockets are useless, but 3257 * consume significant resources. Let's do it with special 3258 * linger2 option. --ANK 3259 */ 3260 3261 if (sk->sk_state == TCP_FIN_WAIT2) { 3262 struct tcp_sock *tp = tcp_sk(sk); 3263 if (READ_ONCE(tp->linger2) < 0) { 3264 tcp_set_state(sk, TCP_CLOSE); 3265 tcp_send_active_reset(sk, GFP_ATOMIC, 3266 SK_RST_REASON_TCP_ABORT_ON_LINGER); 3267 __NET_INC_STATS(sock_net(sk), 3268 LINUX_MIB_TCPABORTONLINGER); 3269 } else { 3270 const int tmo = tcp_fin_time(sk); 3271 3272 if (tmo > TCP_TIMEWAIT_LEN) { 3273 tcp_reset_keepalive_timer(sk, 3274 tmo - TCP_TIMEWAIT_LEN); 3275 } else { 3276 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 3277 goto out; 3278 } 3279 } 3280 } 3281 if (sk->sk_state != TCP_CLOSE) { 3282 if (tcp_check_oom(sk, 0)) { 3283 tcp_set_state(sk, TCP_CLOSE); 3284 tcp_send_active_reset(sk, GFP_ATOMIC, 3285 SK_RST_REASON_TCP_ABORT_ON_MEMORY); 3286 __NET_INC_STATS(sock_net(sk), 3287 LINUX_MIB_TCPABORTONMEMORY); 3288 } else if (!check_net(sock_net(sk))) { 3289 /* Not possible to send reset; just close */ 3290 tcp_set_state(sk, TCP_CLOSE); 3291 } 3292 } 3293 3294 if (sk->sk_state == TCP_CLOSE) { 3295 struct request_sock *req; 3296 3297 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 3298 lockdep_sock_is_held(sk)); 3299 /* We could get here with a non-NULL req if the socket is 3300 * aborted (e.g., closed with unread data) before 3WHS 3301 * finishes. 3302 */ 3303 if (req) 3304 reqsk_fastopen_remove(sk, req, false); 3305 inet_csk_destroy_sock(sk); 3306 } 3307 /* Otherwise, socket is reprieved until protocol close. */ 3308 3309 out: 3310 bh_unlock_sock(sk); 3311 local_bh_enable(); 3312 } 3313 3314 void tcp_close(struct sock *sk, long timeout) 3315 { 3316 lock_sock(sk); 3317 __tcp_close(sk, timeout); 3318 release_sock(sk); 3319 if (!sk->sk_net_refcnt) 3320 inet_csk_clear_xmit_timers_sync(sk); 3321 sock_put(sk); 3322 } 3323 EXPORT_SYMBOL(tcp_close); 3324 3325 /* These states need RST on ABORT according to RFC793 */ 3326 3327 static inline bool tcp_need_reset(int state) 3328 { 3329 return (1 << state) & 3330 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 3331 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 3332 } 3333 3334 static void tcp_rtx_queue_purge(struct sock *sk) 3335 { 3336 struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 3337 3338 tcp_sk(sk)->highest_sack = NULL; 3339 while (p) { 3340 struct sk_buff *skb = rb_to_skb(p); 3341 3342 p = rb_next(p); 3343 /* Since we are deleting whole queue, no need to 3344 * list_del(&skb->tcp_tsorted_anchor) 3345 */ 3346 tcp_rtx_queue_unlink(skb, sk); 3347 tcp_wmem_free_skb(sk, skb); 3348 } 3349 } 3350 3351 void tcp_write_queue_purge(struct sock *sk) 3352 { 3353 struct sk_buff *skb; 3354 3355 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3356 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 3357 tcp_skb_tsorted_anchor_cleanup(skb); 3358 tcp_wmem_free_skb(sk, skb); 3359 } 3360 tcp_rtx_queue_purge(sk); 3361 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 3362 tcp_clear_all_retrans_hints(tcp_sk(sk)); 3363 tcp_sk(sk)->packets_out = 0; 3364 inet_csk(sk)->icsk_backoff = 0; 3365 } 3366 3367 int tcp_disconnect(struct sock *sk, int flags) 3368 { 3369 struct inet_sock *inet = inet_sk(sk); 3370 struct inet_connection_sock *icsk = inet_csk(sk); 3371 struct tcp_sock *tp = tcp_sk(sk); 3372 int old_state = sk->sk_state; 3373 struct request_sock *req; 3374 u32 seq; 3375 3376 if (old_state != TCP_CLOSE) 3377 tcp_set_state(sk, TCP_CLOSE); 3378 3379 /* ABORT function of RFC793 */ 3380 if (old_state == TCP_LISTEN) { 3381 inet_csk_listen_stop(sk); 3382 } else if (unlikely(tp->repair)) { 3383 WRITE_ONCE(sk->sk_err, ECONNABORTED); 3384 } else if (tcp_need_reset(old_state)) { 3385 tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE); 3386 WRITE_ONCE(sk->sk_err, ECONNRESET); 3387 } else if (tp->snd_nxt != tp->write_seq && 3388 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) { 3389 /* The last check adjusts for discrepancy of Linux wrt. RFC 3390 * states 3391 */ 3392 tcp_send_active_reset(sk, gfp_any(), 3393 SK_RST_REASON_TCP_DISCONNECT_WITH_DATA); 3394 WRITE_ONCE(sk->sk_err, ECONNRESET); 3395 } else if (old_state == TCP_SYN_SENT) 3396 WRITE_ONCE(sk->sk_err, ECONNRESET); 3397 3398 tcp_clear_xmit_timers(sk); 3399 __skb_queue_purge(&sk->sk_receive_queue); 3400 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 3401 WRITE_ONCE(tp->urg_data, 0); 3402 sk_set_peek_off(sk, -1); 3403 tcp_write_queue_purge(sk); 3404 tcp_fastopen_active_disable_ofo_check(sk); 3405 skb_rbtree_purge(&tp->out_of_order_queue); 3406 3407 inet->inet_dport = 0; 3408 3409 inet_bhash2_reset_saddr(sk); 3410 3411 WRITE_ONCE(sk->sk_shutdown, 0); 3412 sock_reset_flag(sk, SOCK_DONE); 3413 tp->srtt_us = 0; 3414 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 3415 tp->rcv_rtt_last_tsecr = 0; 3416 3417 seq = tp->write_seq + tp->max_window + 2; 3418 if (!seq) 3419 seq = 1; 3420 WRITE_ONCE(tp->write_seq, seq); 3421 3422 icsk->icsk_backoff = 0; 3423 WRITE_ONCE(icsk->icsk_probes_out, 0); 3424 icsk->icsk_probes_tstamp = 0; 3425 icsk->icsk_rto = TCP_TIMEOUT_INIT; 3426 WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN); 3427 WRITE_ONCE(icsk->icsk_delack_max, TCP_DELACK_MAX); 3428 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 3429 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 3430 tp->snd_cwnd_cnt = 0; 3431 tp->is_cwnd_limited = 0; 3432 tp->max_packets_out = 0; 3433 tp->window_clamp = 0; 3434 tp->delivered = 0; 3435 tp->delivered_ce = 0; 3436 tp->accecn_fail_mode = 0; 3437 tp->saw_accecn_opt = TCP_ACCECN_OPT_NOT_SEEN; 3438 tcp_accecn_init_counters(tp); 3439 tp->prev_ecnfield = 0; 3440 tp->accecn_opt_tstamp = 0; 3441 tp->pkts_acked_ewma = 0; 3442 if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release) 3443 icsk->icsk_ca_ops->release(sk); 3444 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 3445 icsk->icsk_ca_initialized = 0; 3446 tcp_set_ca_state(sk, TCP_CA_Open); 3447 tp->is_sack_reneg = 0; 3448 tcp_clear_retrans(tp); 3449 tp->total_retrans = 0; 3450 inet_csk_delack_init(sk); 3451 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3452 * issue in __tcp_select_window() 3453 */ 3454 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3455 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 3456 __sk_dst_reset(sk); 3457 dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL))); 3458 tcp_saved_syn_free(tp); 3459 tp->compressed_ack = 0; 3460 tp->segs_in = 0; 3461 tp->segs_out = 0; 3462 tp->bytes_sent = 0; 3463 tp->bytes_acked = 0; 3464 tp->bytes_received = 0; 3465 tp->bytes_retrans = 0; 3466 tp->data_segs_in = 0; 3467 tp->data_segs_out = 0; 3468 tp->duplicate_sack[0].start_seq = 0; 3469 tp->duplicate_sack[0].end_seq = 0; 3470 tp->dsack_dups = 0; 3471 tp->reord_seen = 0; 3472 tp->retrans_out = 0; 3473 tp->sacked_out = 0; 3474 tp->tlp_high_seq = 0; 3475 tp->last_oow_ack_time = 0; 3476 tp->plb_rehash = 0; 3477 /* There's a bubble in the pipe until at least the first ACK. */ 3478 tp->app_limited = ~0U; 3479 tp->rate_app_limited = 1; 3480 tp->rack.mstamp = 0; 3481 tp->rack.advanced = 0; 3482 tp->rack.reo_wnd_steps = 1; 3483 tp->rack.last_delivered = 0; 3484 tp->rack.reo_wnd_persist = 0; 3485 tp->rack.dsack_seen = 0; 3486 tp->syn_data_acked = 0; 3487 tp->syn_fastopen_child = 0; 3488 tp->rx_opt.saw_tstamp = 0; 3489 tp->rx_opt.dsack = 0; 3490 tp->rx_opt.num_sacks = 0; 3491 tp->rcv_ooopack = 0; 3492 3493 3494 /* Clean up fastopen related fields */ 3495 req = rcu_dereference_protected(tp->fastopen_rsk, 3496 lockdep_sock_is_held(sk)); 3497 if (req) 3498 reqsk_fastopen_remove(sk, req, false); 3499 tcp_free_fastopen_req(tp); 3500 inet_clear_bit(DEFER_CONNECT, sk); 3501 tp->fastopen_client_fail = 0; 3502 3503 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 3504 3505 if (sk->sk_frag.page) { 3506 put_page(sk->sk_frag.page); 3507 sk->sk_frag.page = NULL; 3508 sk->sk_frag.offset = 0; 3509 } 3510 sk_error_report(sk); 3511 return 0; 3512 } 3513 EXPORT_SYMBOL(tcp_disconnect); 3514 3515 static inline bool tcp_can_repair_sock(const struct sock *sk) 3516 { 3517 return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3518 (sk->sk_state != TCP_LISTEN); 3519 } 3520 3521 static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3522 { 3523 struct tcp_repair_window opt; 3524 3525 if (!tp->repair) 3526 return -EPERM; 3527 3528 if (len != sizeof(opt)) 3529 return -EINVAL; 3530 3531 if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3532 return -EFAULT; 3533 3534 if (opt.max_window < opt.snd_wnd) 3535 return -EINVAL; 3536 3537 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3538 return -EINVAL; 3539 3540 if (after(opt.rcv_wup, tp->rcv_nxt)) 3541 return -EINVAL; 3542 3543 tp->snd_wl1 = opt.snd_wl1; 3544 tp->snd_wnd = opt.snd_wnd; 3545 tp->max_window = opt.max_window; 3546 3547 tp->rcv_wnd = opt.rcv_wnd; 3548 tp->rcv_wup = opt.rcv_wup; 3549 tp->rcv_mwnd_seq = opt.rcv_wup + opt.rcv_wnd; 3550 3551 return 0; 3552 } 3553 3554 static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3555 unsigned int len) 3556 { 3557 struct tcp_sock *tp = tcp_sk(sk); 3558 struct tcp_repair_opt opt; 3559 size_t offset = 0; 3560 3561 while (len >= sizeof(opt)) { 3562 if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3563 return -EFAULT; 3564 3565 offset += sizeof(opt); 3566 len -= sizeof(opt); 3567 3568 switch (opt.opt_code) { 3569 case TCPOPT_MSS: 3570 tp->rx_opt.mss_clamp = opt.opt_val; 3571 tcp_mtup_init(sk); 3572 break; 3573 case TCPOPT_WINDOW: 3574 { 3575 u16 snd_wscale = opt.opt_val & 0xFFFF; 3576 u16 rcv_wscale = opt.opt_val >> 16; 3577 3578 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3579 return -EFBIG; 3580 3581 tp->rx_opt.snd_wscale = snd_wscale; 3582 tp->rx_opt.rcv_wscale = rcv_wscale; 3583 tp->rx_opt.wscale_ok = 1; 3584 } 3585 break; 3586 case TCPOPT_SACK_PERM: 3587 if (opt.opt_val != 0) 3588 return -EINVAL; 3589 3590 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3591 break; 3592 case TCPOPT_TIMESTAMP: 3593 if (opt.opt_val != 0) 3594 return -EINVAL; 3595 3596 tp->rx_opt.tstamp_ok = 1; 3597 break; 3598 } 3599 } 3600 3601 return 0; 3602 } 3603 3604 DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3605 3606 static void tcp_enable_tx_delay(struct sock *sk, int val) 3607 { 3608 struct tcp_sock *tp = tcp_sk(sk); 3609 s32 delta = (val - tp->tcp_tx_delay) << 3; 3610 3611 if (val && !static_branch_unlikely(&tcp_tx_delay_enabled)) { 3612 static int __tcp_tx_delay_enabled = 0; 3613 3614 if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3615 static_branch_enable(&tcp_tx_delay_enabled); 3616 pr_info("TCP_TX_DELAY enabled\n"); 3617 } 3618 } 3619 /* If we change tcp_tx_delay on a live flow, adjust tp->srtt_us, 3620 * tp->rtt_min, icsk_rto and sk->sk_pacing_rate. 3621 * This is best effort. 3622 */ 3623 if (delta && sk->sk_state == TCP_ESTABLISHED) { 3624 s64 srtt = (s64)tp->srtt_us + delta; 3625 3626 tp->srtt_us = clamp_t(s64, srtt, 1, ~0U); 3627 3628 /* Note: does not deal with non zero icsk_backoff */ 3629 tcp_set_rto(sk); 3630 3631 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 3632 3633 tcp_update_pacing_rate(sk); 3634 } 3635 } 3636 3637 /* When set indicates to always queue non-full frames. Later the user clears 3638 * this option and we transmit any pending partial frames in the queue. This is 3639 * meant to be used alongside sendfile() to get properly filled frames when the 3640 * user (for example) must write out headers with a write() call first and then 3641 * use sendfile to send out the data parts. 3642 * 3643 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3644 * TCP_NODELAY. 3645 */ 3646 void __tcp_sock_set_cork(struct sock *sk, bool on) 3647 { 3648 struct tcp_sock *tp = tcp_sk(sk); 3649 3650 if (on) { 3651 tp->nonagle |= TCP_NAGLE_CORK; 3652 } else { 3653 tp->nonagle &= ~TCP_NAGLE_CORK; 3654 if (tp->nonagle & TCP_NAGLE_OFF) 3655 tp->nonagle |= TCP_NAGLE_PUSH; 3656 tcp_push_pending_frames(sk); 3657 } 3658 } 3659 3660 void tcp_sock_set_cork(struct sock *sk, bool on) 3661 { 3662 lock_sock(sk); 3663 __tcp_sock_set_cork(sk, on); 3664 release_sock(sk); 3665 } 3666 EXPORT_SYMBOL(tcp_sock_set_cork); 3667 3668 /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 3669 * remembered, but it is not activated until cork is cleared. 3670 * 3671 * However, when TCP_NODELAY is set we make an explicit push, which overrides 3672 * even TCP_CORK for currently queued segments. 3673 */ 3674 void __tcp_sock_set_nodelay(struct sock *sk, bool on) 3675 { 3676 if (on) { 3677 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 3678 tcp_push_pending_frames(sk); 3679 } else { 3680 tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 3681 } 3682 } 3683 3684 void tcp_sock_set_nodelay(struct sock *sk) 3685 { 3686 lock_sock(sk); 3687 __tcp_sock_set_nodelay(sk, true); 3688 release_sock(sk); 3689 } 3690 EXPORT_SYMBOL(tcp_sock_set_nodelay); 3691 3692 static void __tcp_sock_set_quickack(struct sock *sk, int val) 3693 { 3694 if (!val) { 3695 inet_csk_enter_pingpong_mode(sk); 3696 return; 3697 } 3698 3699 inet_csk_exit_pingpong_mode(sk); 3700 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3701 inet_csk_ack_scheduled(sk)) { 3702 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3703 tcp_cleanup_rbuf(sk, 1); 3704 if (!(val & 1)) 3705 inet_csk_enter_pingpong_mode(sk); 3706 } 3707 } 3708 3709 void tcp_sock_set_quickack(struct sock *sk, int val) 3710 { 3711 lock_sock(sk); 3712 __tcp_sock_set_quickack(sk, val); 3713 release_sock(sk); 3714 } 3715 EXPORT_SYMBOL(tcp_sock_set_quickack); 3716 3717 int tcp_sock_set_syncnt(struct sock *sk, int val) 3718 { 3719 if (val < 1 || val > MAX_TCP_SYNCNT) 3720 return -EINVAL; 3721 3722 WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); 3723 return 0; 3724 } 3725 EXPORT_SYMBOL(tcp_sock_set_syncnt); 3726 3727 int tcp_sock_set_user_timeout(struct sock *sk, int val) 3728 { 3729 /* Cap the max time in ms TCP will retry or probe the window 3730 * before giving up and aborting (ETIMEDOUT) a connection. 3731 */ 3732 if (val < 0) 3733 return -EINVAL; 3734 3735 WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); 3736 return 0; 3737 } 3738 EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3739 3740 int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 3741 { 3742 struct tcp_sock *tp = tcp_sk(sk); 3743 3744 if (val < 1 || val > MAX_TCP_KEEPIDLE) 3745 return -EINVAL; 3746 3747 /* Paired with WRITE_ONCE() in keepalive_time_when() */ 3748 WRITE_ONCE(tp->keepalive_time, val * HZ); 3749 if (sock_flag(sk, SOCK_KEEPOPEN) && 3750 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 3751 u32 elapsed = keepalive_time_elapsed(tp); 3752 3753 if (tp->keepalive_time > elapsed) 3754 elapsed = tp->keepalive_time - elapsed; 3755 else 3756 elapsed = 0; 3757 tcp_reset_keepalive_timer(sk, elapsed); 3758 } 3759 3760 return 0; 3761 } 3762 3763 int tcp_sock_set_keepidle(struct sock *sk, int val) 3764 { 3765 int err; 3766 3767 lock_sock(sk); 3768 err = tcp_sock_set_keepidle_locked(sk, val); 3769 release_sock(sk); 3770 return err; 3771 } 3772 EXPORT_SYMBOL(tcp_sock_set_keepidle); 3773 3774 int tcp_sock_set_keepintvl(struct sock *sk, int val) 3775 { 3776 if (val < 1 || val > MAX_TCP_KEEPINTVL) 3777 return -EINVAL; 3778 3779 WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); 3780 return 0; 3781 } 3782 EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3783 3784 int tcp_sock_set_keepcnt(struct sock *sk, int val) 3785 { 3786 if (val < 1 || val > MAX_TCP_KEEPCNT) 3787 return -EINVAL; 3788 3789 /* Paired with READ_ONCE() in keepalive_probes() */ 3790 WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); 3791 return 0; 3792 } 3793 EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3794 3795 int tcp_set_window_clamp(struct sock *sk, int val) 3796 { 3797 u32 old_window_clamp, new_window_clamp, new_rcv_ssthresh; 3798 struct tcp_sock *tp = tcp_sk(sk); 3799 3800 if (!val) { 3801 if (sk->sk_state != TCP_CLOSE) 3802 return -EINVAL; 3803 WRITE_ONCE(tp->window_clamp, 0); 3804 return 0; 3805 } 3806 3807 old_window_clamp = tp->window_clamp; 3808 new_window_clamp = max_t(int, SOCK_MIN_RCVBUF / 2, val); 3809 3810 if (new_window_clamp == old_window_clamp) 3811 return 0; 3812 3813 WRITE_ONCE(tp->window_clamp, new_window_clamp); 3814 3815 /* Need to apply the reserved mem provisioning only 3816 * when shrinking the window clamp. 3817 */ 3818 if (new_window_clamp < old_window_clamp) { 3819 __tcp_adjust_rcv_ssthresh(sk, new_window_clamp); 3820 } else { 3821 new_rcv_ssthresh = min(tp->rcv_wnd, new_window_clamp); 3822 tp->rcv_ssthresh = max(new_rcv_ssthresh, tp->rcv_ssthresh); 3823 } 3824 return 0; 3825 } 3826 3827 int tcp_sock_set_maxseg(struct sock *sk, int val) 3828 { 3829 /* Values greater than interface MTU won't take effect. However 3830 * at the point when this call is done we typically don't yet 3831 * know which interface is going to be used 3832 */ 3833 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) 3834 return -EINVAL; 3835 3836 WRITE_ONCE(tcp_sk(sk)->rx_opt.user_mss, val); 3837 return 0; 3838 } 3839 3840 /* 3841 * Socket option code for TCP. 3842 */ 3843 int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3844 sockptr_t optval, unsigned int optlen) 3845 { 3846 struct tcp_sock *tp = tcp_sk(sk); 3847 struct inet_connection_sock *icsk = inet_csk(sk); 3848 struct net *net = sock_net(sk); 3849 int val; 3850 int err = 0; 3851 3852 /* These are data/string values, all the others are ints */ 3853 switch (optname) { 3854 case TCP_CONGESTION: { 3855 char name[TCP_CA_NAME_MAX]; 3856 3857 if (optlen < 1) 3858 return -EINVAL; 3859 3860 val = strncpy_from_sockptr(name, optval, 3861 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 3862 if (val < 0) 3863 return -EFAULT; 3864 name[val] = 0; 3865 3866 sockopt_lock_sock(sk); 3867 err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3868 sockopt_ns_capable(sock_net(sk)->user_ns, 3869 CAP_NET_ADMIN)); 3870 sockopt_release_sock(sk); 3871 return err; 3872 } 3873 case TCP_ULP: { 3874 char name[TCP_ULP_NAME_MAX]; 3875 3876 if (optlen < 1) 3877 return -EINVAL; 3878 3879 val = strncpy_from_sockptr(name, optval, 3880 min_t(long, TCP_ULP_NAME_MAX - 1, 3881 optlen)); 3882 if (val < 0) 3883 return -EFAULT; 3884 name[val] = 0; 3885 3886 sockopt_lock_sock(sk); 3887 err = tcp_set_ulp(sk, name); 3888 sockopt_release_sock(sk); 3889 return err; 3890 } 3891 case TCP_FASTOPEN_KEY: { 3892 __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 3893 __u8 *backup_key = NULL; 3894 3895 /* Allow a backup key as well to facilitate key rotation 3896 * First key is the active one. 3897 */ 3898 if (optlen != TCP_FASTOPEN_KEY_LENGTH && 3899 optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 3900 return -EINVAL; 3901 3902 if (copy_from_sockptr(key, optval, optlen)) 3903 return -EFAULT; 3904 3905 if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 3906 backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 3907 3908 return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 3909 } 3910 default: 3911 /* fallthru */ 3912 break; 3913 } 3914 3915 if (optlen < sizeof(int)) 3916 return -EINVAL; 3917 3918 if (copy_from_sockptr(&val, optval, sizeof(val))) 3919 return -EFAULT; 3920 3921 /* Handle options that can be set without locking the socket. */ 3922 switch (optname) { 3923 case TCP_SYNCNT: 3924 return tcp_sock_set_syncnt(sk, val); 3925 case TCP_USER_TIMEOUT: 3926 return tcp_sock_set_user_timeout(sk, val); 3927 case TCP_KEEPINTVL: 3928 return tcp_sock_set_keepintvl(sk, val); 3929 case TCP_KEEPCNT: 3930 return tcp_sock_set_keepcnt(sk, val); 3931 case TCP_LINGER2: 3932 if (val < 0) 3933 WRITE_ONCE(tp->linger2, -1); 3934 else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3935 WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); 3936 else 3937 WRITE_ONCE(tp->linger2, val * HZ); 3938 return 0; 3939 case TCP_DEFER_ACCEPT: 3940 /* Translate value in seconds to number of retransmits */ 3941 WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, 3942 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 3943 TCP_RTO_MAX / HZ)); 3944 return 0; 3945 case TCP_RTO_MAX_MS: 3946 if (val < MSEC_PER_SEC || val > TCP_RTO_MAX_SEC * MSEC_PER_SEC) 3947 return -EINVAL; 3948 WRITE_ONCE(inet_csk(sk)->icsk_rto_max, msecs_to_jiffies(val)); 3949 return 0; 3950 case TCP_RTO_MIN_US: { 3951 int rto_min = usecs_to_jiffies(val); 3952 3953 if (rto_min > TCP_RTO_MIN || rto_min < TCP_TIMEOUT_MIN) 3954 return -EINVAL; 3955 WRITE_ONCE(inet_csk(sk)->icsk_rto_min, rto_min); 3956 return 0; 3957 } 3958 case TCP_DELACK_MAX_US: { 3959 int delack_max = usecs_to_jiffies(val); 3960 3961 if (delack_max > TCP_DELACK_MAX || delack_max < TCP_TIMEOUT_MIN) 3962 return -EINVAL; 3963 WRITE_ONCE(inet_csk(sk)->icsk_delack_max, delack_max); 3964 return 0; 3965 } 3966 case TCP_MAXSEG: 3967 return tcp_sock_set_maxseg(sk, val); 3968 } 3969 3970 sockopt_lock_sock(sk); 3971 3972 switch (optname) { 3973 case TCP_NODELAY: 3974 __tcp_sock_set_nodelay(sk, val); 3975 break; 3976 3977 case TCP_THIN_LINEAR_TIMEOUTS: 3978 if (val < 0 || val > 1) 3979 err = -EINVAL; 3980 else 3981 tp->thin_lto = val; 3982 break; 3983 3984 case TCP_THIN_DUPACK: 3985 if (val < 0 || val > 1) 3986 err = -EINVAL; 3987 break; 3988 3989 case TCP_REPAIR: 3990 if (!tcp_can_repair_sock(sk)) 3991 err = -EPERM; 3992 else if (val == TCP_REPAIR_ON) { 3993 tp->repair = 1; 3994 sk->sk_reuse = SK_FORCE_REUSE; 3995 tp->repair_queue = TCP_NO_QUEUE; 3996 } else if (val == TCP_REPAIR_OFF) { 3997 tp->repair = 0; 3998 sk->sk_reuse = SK_NO_REUSE; 3999 tcp_send_window_probe(sk); 4000 } else if (val == TCP_REPAIR_OFF_NO_WP) { 4001 tp->repair = 0; 4002 sk->sk_reuse = SK_NO_REUSE; 4003 } else 4004 err = -EINVAL; 4005 4006 break; 4007 4008 case TCP_REPAIR_QUEUE: 4009 if (!tp->repair) 4010 err = -EPERM; 4011 else if ((unsigned int)val < TCP_QUEUES_NR) 4012 tp->repair_queue = val; 4013 else 4014 err = -EINVAL; 4015 break; 4016 4017 case TCP_QUEUE_SEQ: 4018 if (sk->sk_state != TCP_CLOSE) { 4019 err = -EPERM; 4020 } else if (tp->repair_queue == TCP_SEND_QUEUE) { 4021 if (!tcp_rtx_queue_empty(sk)) 4022 err = -EPERM; 4023 else 4024 WRITE_ONCE(tp->write_seq, val); 4025 } else if (tp->repair_queue == TCP_RECV_QUEUE) { 4026 if (tp->rcv_nxt != tp->copied_seq) { 4027 err = -EPERM; 4028 } else { 4029 WRITE_ONCE(tp->rcv_nxt, val); 4030 WRITE_ONCE(tp->copied_seq, val); 4031 } 4032 } else { 4033 err = -EINVAL; 4034 } 4035 break; 4036 4037 case TCP_REPAIR_OPTIONS: 4038 if (!tp->repair) 4039 err = -EINVAL; 4040 else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 4041 err = tcp_repair_options_est(sk, optval, optlen); 4042 else 4043 err = -EPERM; 4044 break; 4045 4046 case TCP_CORK: 4047 __tcp_sock_set_cork(sk, val); 4048 break; 4049 4050 case TCP_KEEPIDLE: 4051 err = tcp_sock_set_keepidle_locked(sk, val); 4052 break; 4053 case TCP_SAVE_SYN: 4054 /* 0: disable, 1: enable, 2: start from ether_header */ 4055 if (val < 0 || val > 2) 4056 err = -EINVAL; 4057 else 4058 tp->save_syn = val; 4059 break; 4060 4061 case TCP_WINDOW_CLAMP: 4062 err = tcp_set_window_clamp(sk, val); 4063 break; 4064 4065 case TCP_QUICKACK: 4066 __tcp_sock_set_quickack(sk, val); 4067 break; 4068 4069 case TCP_AO_REPAIR: 4070 if (!tcp_can_repair_sock(sk)) { 4071 err = -EPERM; 4072 break; 4073 } 4074 err = tcp_ao_set_repair(sk, optval, optlen); 4075 break; 4076 #ifdef CONFIG_TCP_AO 4077 case TCP_AO_ADD_KEY: 4078 case TCP_AO_DEL_KEY: 4079 case TCP_AO_INFO: { 4080 /* If this is the first TCP-AO setsockopt() on the socket, 4081 * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR 4082 * in any state. 4083 */ 4084 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 4085 goto ao_parse; 4086 if (rcu_dereference_protected(tcp_sk(sk)->ao_info, 4087 lockdep_sock_is_held(sk))) 4088 goto ao_parse; 4089 if (tp->repair) 4090 goto ao_parse; 4091 err = -EISCONN; 4092 break; 4093 ao_parse: 4094 err = tp->af_specific->ao_parse(sk, optname, optval, optlen); 4095 break; 4096 } 4097 #endif 4098 #ifdef CONFIG_TCP_MD5SIG 4099 case TCP_MD5SIG: 4100 case TCP_MD5SIG_EXT: 4101 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 4102 break; 4103 #endif 4104 case TCP_FASTOPEN: 4105 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 4106 TCPF_LISTEN))) { 4107 tcp_fastopen_init_key_once(net); 4108 4109 fastopen_queue_tune(sk, val); 4110 } else { 4111 err = -EINVAL; 4112 } 4113 break; 4114 case TCP_FASTOPEN_CONNECT: 4115 if (val > 1 || val < 0) { 4116 err = -EINVAL; 4117 } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 4118 TFO_CLIENT_ENABLE) { 4119 if (sk->sk_state == TCP_CLOSE) 4120 tp->fastopen_connect = val; 4121 else 4122 err = -EINVAL; 4123 } else { 4124 err = -EOPNOTSUPP; 4125 } 4126 break; 4127 case TCP_FASTOPEN_NO_COOKIE: 4128 if (val > 1 || val < 0) 4129 err = -EINVAL; 4130 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 4131 err = -EINVAL; 4132 else 4133 tp->fastopen_no_cookie = val; 4134 break; 4135 case TCP_TIMESTAMP: 4136 if (!tp->repair) { 4137 err = -EPERM; 4138 break; 4139 } 4140 /* val is an opaque field, 4141 * and low order bit contains usec_ts enable bit. 4142 * Its a best effort, and we do not care if user makes an error. 4143 */ 4144 tp->tcp_usec_ts = val & 1; 4145 WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts)); 4146 break; 4147 case TCP_REPAIR_WINDOW: 4148 err = tcp_repair_set_window(tp, optval, optlen); 4149 break; 4150 case TCP_NOTSENT_LOWAT: 4151 WRITE_ONCE(tp->notsent_lowat, val); 4152 READ_ONCE(sk->sk_write_space)(sk); 4153 break; 4154 case TCP_INQ: 4155 if (val > 1 || val < 0) 4156 err = -EINVAL; 4157 else 4158 tp->recvmsg_inq = val; 4159 break; 4160 case TCP_TX_DELAY: 4161 /* tp->srtt_us is u32, and is shifted by 3 */ 4162 if (val < 0 || val >= (1U << (31 - 3))) { 4163 err = -EINVAL; 4164 break; 4165 } 4166 tcp_enable_tx_delay(sk, val); 4167 WRITE_ONCE(tp->tcp_tx_delay, val); 4168 break; 4169 default: 4170 err = -ENOPROTOOPT; 4171 break; 4172 } 4173 4174 sockopt_release_sock(sk); 4175 return err; 4176 } 4177 4178 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 4179 unsigned int optlen) 4180 { 4181 const struct inet_connection_sock *icsk = inet_csk(sk); 4182 4183 if (level != SOL_TCP) 4184 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4185 return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 4186 optval, optlen); 4187 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 4188 } 4189 4190 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 4191 struct tcp_info *info) 4192 { 4193 u64 stats[__TCP_CHRONO_MAX], total = 0; 4194 enum tcp_chrono i; 4195 4196 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 4197 stats[i] = tp->chrono_stat[i - 1]; 4198 if (i == tp->chrono_type) 4199 stats[i] += tcp_jiffies32 - tp->chrono_start; 4200 stats[i] *= USEC_PER_SEC / HZ; 4201 total += stats[i]; 4202 } 4203 4204 info->tcpi_busy_time = total; 4205 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 4206 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 4207 } 4208 4209 /* Return information about state of tcp endpoint in API format. */ 4210 void tcp_get_info(struct sock *sk, struct tcp_info *info) 4211 { 4212 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 4213 const struct inet_connection_sock *icsk = inet_csk(sk); 4214 const u8 ect1_idx = INET_ECN_ECT_1 - 1; 4215 const u8 ect0_idx = INET_ECN_ECT_0 - 1; 4216 const u8 ce_idx = INET_ECN_CE - 1; 4217 unsigned long rate; 4218 u32 now; 4219 u64 rate64; 4220 bool slow; 4221 4222 memset(info, 0, sizeof(*info)); 4223 if (sk->sk_type != SOCK_STREAM) 4224 return; 4225 4226 info->tcpi_state = inet_sk_state_load(sk); 4227 4228 /* Report meaningful fields for all TCP states, including listeners */ 4229 rate = READ_ONCE(sk->sk_pacing_rate); 4230 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4231 info->tcpi_pacing_rate = rate64; 4232 4233 rate = READ_ONCE(sk->sk_max_pacing_rate); 4234 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4235 info->tcpi_max_pacing_rate = rate64; 4236 4237 info->tcpi_reordering = tp->reordering; 4238 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 4239 4240 if (info->tcpi_state == TCP_LISTEN) { 4241 /* listeners aliased fields : 4242 * tcpi_unacked -> Number of children ready for accept() 4243 * tcpi_sacked -> max backlog 4244 */ 4245 info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 4246 info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 4247 return; 4248 } 4249 4250 slow = lock_sock_fast(sk); 4251 4252 info->tcpi_ca_state = icsk->icsk_ca_state; 4253 info->tcpi_retransmits = icsk->icsk_retransmits; 4254 info->tcpi_probes = icsk->icsk_probes_out; 4255 info->tcpi_backoff = icsk->icsk_backoff; 4256 4257 if (tp->rx_opt.tstamp_ok) 4258 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 4259 if (tcp_is_sack(tp)) 4260 info->tcpi_options |= TCPI_OPT_SACK; 4261 if (tp->rx_opt.wscale_ok) { 4262 info->tcpi_options |= TCPI_OPT_WSCALE; 4263 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 4264 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 4265 } 4266 4267 if (tcp_ecn_mode_any(tp)) 4268 info->tcpi_options |= TCPI_OPT_ECN; 4269 if (tp->ecn_flags & TCP_ECN_SEEN) 4270 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 4271 if (tp->syn_data_acked) 4272 info->tcpi_options |= TCPI_OPT_SYN_DATA; 4273 if (tp->tcp_usec_ts) 4274 info->tcpi_options |= TCPI_OPT_USEC_TS; 4275 if (tp->syn_fastopen_child) 4276 info->tcpi_options |= TCPI_OPT_TFO_CHILD; 4277 4278 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 4279 info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato, 4280 tcp_delack_max(sk))); 4281 info->tcpi_snd_mss = tp->mss_cache; 4282 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 4283 4284 info->tcpi_unacked = tp->packets_out; 4285 info->tcpi_sacked = tp->sacked_out; 4286 4287 info->tcpi_lost = tp->lost_out; 4288 info->tcpi_retrans = tp->retrans_out; 4289 4290 now = tcp_jiffies32; 4291 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 4292 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 4293 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 4294 4295 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 4296 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 4297 info->tcpi_rtt = tp->srtt_us >> 3; 4298 info->tcpi_rttvar = tp->mdev_us >> 2; 4299 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 4300 info->tcpi_advmss = tp->advmss; 4301 4302 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 4303 info->tcpi_rcv_space = tp->rcvq_space.space; 4304 4305 info->tcpi_total_retrans = tp->total_retrans; 4306 4307 info->tcpi_bytes_acked = tp->bytes_acked; 4308 info->tcpi_bytes_received = tp->bytes_received; 4309 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 4310 tcp_get_info_chrono_stats(tp, info); 4311 4312 info->tcpi_segs_out = tp->segs_out; 4313 4314 /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 4315 info->tcpi_segs_in = READ_ONCE(tp->segs_in); 4316 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 4317 4318 info->tcpi_min_rtt = tcp_min_rtt(tp); 4319 info->tcpi_data_segs_out = tp->data_segs_out; 4320 4321 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 4322 rate64 = tcp_compute_delivery_rate(tp); 4323 if (rate64) 4324 info->tcpi_delivery_rate = rate64; 4325 info->tcpi_delivered = tp->delivered; 4326 info->tcpi_delivered_ce = tp->delivered_ce; 4327 info->tcpi_bytes_sent = tp->bytes_sent; 4328 info->tcpi_bytes_retrans = tp->bytes_retrans; 4329 info->tcpi_dsack_dups = tp->dsack_dups; 4330 info->tcpi_reord_seen = tp->reord_seen; 4331 info->tcpi_rcv_ooopack = tp->rcv_ooopack; 4332 info->tcpi_snd_wnd = tp->snd_wnd; 4333 info->tcpi_rcv_wnd = tp->rcv_wnd; 4334 info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 4335 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 4336 4337 info->tcpi_total_rto = tp->total_rto; 4338 info->tcpi_total_rto_recoveries = tp->total_rto_recoveries; 4339 info->tcpi_total_rto_time = tp->total_rto_time; 4340 if (tp->rto_stamp) 4341 info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp; 4342 4343 if (tcp_ecn_disabled(tp)) 4344 info->tcpi_ecn_mode = TCPI_ECN_MODE_DISABLED; 4345 else if (tcp_ecn_mode_rfc3168(tp)) 4346 info->tcpi_ecn_mode = TCPI_ECN_MODE_RFC3168; 4347 else if (tcp_ecn_mode_accecn(tp)) 4348 info->tcpi_ecn_mode = TCPI_ECN_MODE_ACCECN; 4349 else if (tcp_ecn_mode_pending(tp)) 4350 info->tcpi_ecn_mode = TCPI_ECN_MODE_PENDING; 4351 info->tcpi_accecn_fail_mode = tp->accecn_fail_mode; 4352 info->tcpi_accecn_opt_seen = tp->saw_accecn_opt; 4353 info->tcpi_received_ce = tp->received_ce; 4354 info->tcpi_delivered_e1_bytes = tp->delivered_ecn_bytes[ect1_idx]; 4355 info->tcpi_delivered_e0_bytes = tp->delivered_ecn_bytes[ect0_idx]; 4356 info->tcpi_delivered_ce_bytes = tp->delivered_ecn_bytes[ce_idx]; 4357 info->tcpi_received_e1_bytes = tp->received_ecn_bytes[ect1_idx]; 4358 info->tcpi_received_e0_bytes = tp->received_ecn_bytes[ect0_idx]; 4359 info->tcpi_received_ce_bytes = tp->received_ecn_bytes[ce_idx]; 4360 4361 unlock_sock_fast(sk, slow); 4362 } 4363 EXPORT_SYMBOL_GPL(tcp_get_info); 4364 4365 static size_t tcp_opt_stats_get_size(void) 4366 { 4367 return 4368 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 4369 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 4370 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 4371 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 4372 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 4373 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 4374 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 4375 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 4376 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 4377 nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 4378 nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 4379 nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 4380 nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 4381 nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 4382 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 4383 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 4384 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 4385 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 4386 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 4387 nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 4388 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 4389 nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 4390 nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 4391 nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 4392 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 4393 nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 4394 nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 4395 0; 4396 } 4397 4398 /* Returns TTL or hop limit of an incoming packet from skb. */ 4399 static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 4400 { 4401 if (skb->protocol == htons(ETH_P_IP)) 4402 return ip_hdr(skb)->ttl; 4403 else if (skb->protocol == htons(ETH_P_IPV6)) 4404 return ipv6_hdr(skb)->hop_limit; 4405 else 4406 return 0; 4407 } 4408 4409 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 4410 const struct sk_buff *orig_skb, 4411 const struct sk_buff *ack_skb) 4412 { 4413 const struct tcp_sock *tp = tcp_sk(sk); 4414 struct sk_buff *stats; 4415 struct tcp_info info; 4416 unsigned long rate; 4417 u64 rate64; 4418 4419 stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 4420 if (!stats) 4421 return NULL; 4422 4423 tcp_get_info_chrono_stats(tp, &info); 4424 nla_put_u64_64bit(stats, TCP_NLA_BUSY, 4425 info.tcpi_busy_time, TCP_NLA_PAD); 4426 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 4427 info.tcpi_rwnd_limited, TCP_NLA_PAD); 4428 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 4429 info.tcpi_sndbuf_limited, TCP_NLA_PAD); 4430 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 4431 tp->data_segs_out, TCP_NLA_PAD); 4432 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 4433 tp->total_retrans, TCP_NLA_PAD); 4434 4435 rate = READ_ONCE(sk->sk_pacing_rate); 4436 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4437 nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 4438 4439 rate64 = tcp_compute_delivery_rate(tp); 4440 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 4441 4442 nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 4443 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 4444 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 4445 4446 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, 4447 READ_ONCE(inet_csk(sk)->icsk_retransmits)); 4448 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 4449 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 4450 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 4451 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 4452 4453 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 4454 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 4455 4456 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 4457 TCP_NLA_PAD); 4458 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 4459 TCP_NLA_PAD); 4460 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 4461 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 4462 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 4463 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 4464 nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 4465 max_t(int, 0, tp->write_seq - tp->snd_nxt)); 4466 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 4467 TCP_NLA_PAD); 4468 if (ack_skb) 4469 nla_put_u8(stats, TCP_NLA_TTL, 4470 tcp_skb_ttl_or_hop_limit(ack_skb)); 4471 4472 nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 4473 return stats; 4474 } 4475 4476 int do_tcp_getsockopt(struct sock *sk, int level, 4477 int optname, sockptr_t optval, sockptr_t optlen) 4478 { 4479 struct inet_connection_sock *icsk = inet_csk(sk); 4480 struct tcp_sock *tp = tcp_sk(sk); 4481 struct net *net = sock_net(sk); 4482 int user_mss; 4483 int val, len; 4484 4485 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4486 return -EFAULT; 4487 4488 if (len < 0) 4489 return -EINVAL; 4490 4491 len = min_t(unsigned int, len, sizeof(int)); 4492 4493 switch (optname) { 4494 case TCP_MAXSEG: 4495 val = tp->mss_cache; 4496 user_mss = READ_ONCE(tp->rx_opt.user_mss); 4497 if (user_mss && 4498 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 4499 val = user_mss; 4500 if (tp->repair) 4501 val = tp->rx_opt.mss_clamp; 4502 break; 4503 case TCP_NODELAY: 4504 val = !!(tp->nonagle&TCP_NAGLE_OFF); 4505 break; 4506 case TCP_CORK: 4507 val = !!(tp->nonagle&TCP_NAGLE_CORK); 4508 break; 4509 case TCP_KEEPIDLE: 4510 val = keepalive_time_when(tp) / HZ; 4511 break; 4512 case TCP_KEEPINTVL: 4513 val = keepalive_intvl_when(tp) / HZ; 4514 break; 4515 case TCP_KEEPCNT: 4516 val = keepalive_probes(tp); 4517 break; 4518 case TCP_SYNCNT: 4519 val = READ_ONCE(icsk->icsk_syn_retries) ? : 4520 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 4521 break; 4522 case TCP_LINGER2: 4523 val = READ_ONCE(tp->linger2); 4524 if (val >= 0) 4525 val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 4526 break; 4527 case TCP_DEFER_ACCEPT: 4528 val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); 4529 val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ, 4530 TCP_RTO_MAX / HZ); 4531 break; 4532 case TCP_WINDOW_CLAMP: 4533 val = READ_ONCE(tp->window_clamp); 4534 break; 4535 case TCP_INFO: { 4536 struct tcp_info info; 4537 4538 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4539 return -EFAULT; 4540 4541 tcp_get_info(sk, &info); 4542 4543 len = min_t(unsigned int, len, sizeof(info)); 4544 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4545 return -EFAULT; 4546 if (copy_to_sockptr(optval, &info, len)) 4547 return -EFAULT; 4548 return 0; 4549 } 4550 case TCP_CC_INFO: { 4551 const struct tcp_congestion_ops *ca_ops; 4552 union tcp_cc_info info; 4553 size_t sz = 0; 4554 int attr; 4555 4556 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4557 return -EFAULT; 4558 4559 ca_ops = icsk->icsk_ca_ops; 4560 if (ca_ops && ca_ops->get_info) 4561 sz = ca_ops->get_info(sk, ~0U, &attr, &info); 4562 4563 len = min_t(unsigned int, len, sz); 4564 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4565 return -EFAULT; 4566 if (copy_to_sockptr(optval, &info, len)) 4567 return -EFAULT; 4568 return 0; 4569 } 4570 case TCP_QUICKACK: 4571 val = !inet_csk_in_pingpong_mode(sk); 4572 break; 4573 4574 case TCP_CONGESTION: 4575 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4576 return -EFAULT; 4577 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 4578 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4579 return -EFAULT; 4580 if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 4581 return -EFAULT; 4582 return 0; 4583 4584 case TCP_ULP: 4585 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4586 return -EFAULT; 4587 len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4588 if (!icsk->icsk_ulp_ops) { 4589 len = 0; 4590 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4591 return -EFAULT; 4592 return 0; 4593 } 4594 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4595 return -EFAULT; 4596 if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4597 return -EFAULT; 4598 return 0; 4599 4600 case TCP_FASTOPEN_KEY: { 4601 u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4602 unsigned int key_len; 4603 4604 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4605 return -EFAULT; 4606 4607 key_len = tcp_fastopen_get_cipher(net, icsk, key) * 4608 TCP_FASTOPEN_KEY_LENGTH; 4609 len = min_t(unsigned int, len, key_len); 4610 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4611 return -EFAULT; 4612 if (copy_to_sockptr(optval, key, len)) 4613 return -EFAULT; 4614 return 0; 4615 } 4616 case TCP_THIN_LINEAR_TIMEOUTS: 4617 val = tp->thin_lto; 4618 break; 4619 4620 case TCP_THIN_DUPACK: 4621 val = 0; 4622 break; 4623 4624 case TCP_REPAIR: 4625 val = tp->repair; 4626 break; 4627 4628 case TCP_REPAIR_QUEUE: 4629 if (tp->repair) 4630 val = tp->repair_queue; 4631 else 4632 return -EINVAL; 4633 break; 4634 4635 case TCP_REPAIR_WINDOW: { 4636 struct tcp_repair_window opt; 4637 4638 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4639 return -EFAULT; 4640 4641 if (len != sizeof(opt)) 4642 return -EINVAL; 4643 4644 if (!tp->repair) 4645 return -EPERM; 4646 4647 opt.snd_wl1 = tp->snd_wl1; 4648 opt.snd_wnd = tp->snd_wnd; 4649 opt.max_window = tp->max_window; 4650 opt.rcv_wnd = tp->rcv_wnd; 4651 opt.rcv_wup = tp->rcv_wup; 4652 4653 if (copy_to_sockptr(optval, &opt, len)) 4654 return -EFAULT; 4655 return 0; 4656 } 4657 case TCP_QUEUE_SEQ: 4658 if (tp->repair_queue == TCP_SEND_QUEUE) 4659 val = tp->write_seq; 4660 else if (tp->repair_queue == TCP_RECV_QUEUE) 4661 val = tp->rcv_nxt; 4662 else 4663 return -EINVAL; 4664 break; 4665 4666 case TCP_USER_TIMEOUT: 4667 val = READ_ONCE(icsk->icsk_user_timeout); 4668 break; 4669 4670 case TCP_FASTOPEN: 4671 val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); 4672 break; 4673 4674 case TCP_FASTOPEN_CONNECT: 4675 val = tp->fastopen_connect; 4676 break; 4677 4678 case TCP_FASTOPEN_NO_COOKIE: 4679 val = tp->fastopen_no_cookie; 4680 break; 4681 4682 case TCP_TX_DELAY: 4683 val = READ_ONCE(tp->tcp_tx_delay); 4684 break; 4685 4686 case TCP_TIMESTAMP: 4687 val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset); 4688 if (tp->tcp_usec_ts) 4689 val |= 1; 4690 else 4691 val &= ~1; 4692 break; 4693 case TCP_NOTSENT_LOWAT: 4694 val = READ_ONCE(tp->notsent_lowat); 4695 break; 4696 case TCP_INQ: 4697 val = tp->recvmsg_inq; 4698 break; 4699 case TCP_SAVE_SYN: 4700 val = tp->save_syn; 4701 break; 4702 case TCP_SAVED_SYN: { 4703 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4704 return -EFAULT; 4705 4706 sockopt_lock_sock(sk); 4707 if (tp->saved_syn) { 4708 if (len < tcp_saved_syn_len(tp->saved_syn)) { 4709 len = tcp_saved_syn_len(tp->saved_syn); 4710 if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4711 sockopt_release_sock(sk); 4712 return -EFAULT; 4713 } 4714 sockopt_release_sock(sk); 4715 return -EINVAL; 4716 } 4717 len = tcp_saved_syn_len(tp->saved_syn); 4718 if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4719 sockopt_release_sock(sk); 4720 return -EFAULT; 4721 } 4722 if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4723 sockopt_release_sock(sk); 4724 return -EFAULT; 4725 } 4726 tcp_saved_syn_free(tp); 4727 sockopt_release_sock(sk); 4728 } else { 4729 sockopt_release_sock(sk); 4730 len = 0; 4731 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4732 return -EFAULT; 4733 } 4734 return 0; 4735 } 4736 #ifdef CONFIG_MMU 4737 case TCP_ZEROCOPY_RECEIVE: { 4738 struct scm_timestamping_internal tss; 4739 struct tcp_zerocopy_receive zc = {}; 4740 int err; 4741 4742 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4743 return -EFAULT; 4744 if (len < 0 || 4745 len < offsetofend(struct tcp_zerocopy_receive, length)) 4746 return -EINVAL; 4747 if (unlikely(len > sizeof(zc))) { 4748 err = check_zeroed_sockptr(optval, sizeof(zc), 4749 len - sizeof(zc)); 4750 if (err < 1) 4751 return err == 0 ? -EINVAL : err; 4752 len = sizeof(zc); 4753 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4754 return -EFAULT; 4755 } 4756 if (copy_from_sockptr(&zc, optval, len)) 4757 return -EFAULT; 4758 if (zc.reserved) 4759 return -EINVAL; 4760 if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 4761 return -EINVAL; 4762 sockopt_lock_sock(sk); 4763 err = tcp_zerocopy_receive(sk, &zc, &tss); 4764 err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 4765 &zc, &len, err); 4766 sockopt_release_sock(sk); 4767 if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 4768 goto zerocopy_rcv_cmsg; 4769 switch (len) { 4770 case offsetofend(struct tcp_zerocopy_receive, msg_flags): 4771 goto zerocopy_rcv_cmsg; 4772 case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 4773 case offsetofend(struct tcp_zerocopy_receive, msg_control): 4774 case offsetofend(struct tcp_zerocopy_receive, flags): 4775 case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 4776 case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 4777 case offsetofend(struct tcp_zerocopy_receive, err): 4778 goto zerocopy_rcv_sk_err; 4779 case offsetofend(struct tcp_zerocopy_receive, inq): 4780 goto zerocopy_rcv_inq; 4781 case offsetofend(struct tcp_zerocopy_receive, length): 4782 default: 4783 goto zerocopy_rcv_out; 4784 } 4785 zerocopy_rcv_cmsg: 4786 if (zc.msg_flags & TCP_CMSG_TS) 4787 tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 4788 else 4789 zc.msg_flags = 0; 4790 zerocopy_rcv_sk_err: 4791 if (!err) 4792 zc.err = sock_error(sk); 4793 zerocopy_rcv_inq: 4794 zc.inq = tcp_inq_hint(sk); 4795 zerocopy_rcv_out: 4796 if (!err && copy_to_sockptr(optval, &zc, len)) 4797 err = -EFAULT; 4798 return err; 4799 } 4800 #endif 4801 case TCP_AO_REPAIR: 4802 if (!tcp_can_repair_sock(sk)) 4803 return -EPERM; 4804 return tcp_ao_get_repair(sk, optval, optlen); 4805 case TCP_AO_GET_KEYS: 4806 case TCP_AO_INFO: { 4807 int err; 4808 4809 sockopt_lock_sock(sk); 4810 if (optname == TCP_AO_GET_KEYS) 4811 err = tcp_ao_get_mkts(sk, optval, optlen); 4812 else 4813 err = tcp_ao_get_sock_info(sk, optval, optlen); 4814 sockopt_release_sock(sk); 4815 4816 return err; 4817 } 4818 case TCP_IS_MPTCP: 4819 val = 0; 4820 break; 4821 case TCP_RTO_MAX_MS: 4822 val = jiffies_to_msecs(tcp_rto_max(sk)); 4823 break; 4824 case TCP_RTO_MIN_US: 4825 val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_rto_min)); 4826 break; 4827 case TCP_DELACK_MAX_US: 4828 val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_delack_max)); 4829 break; 4830 default: 4831 return -ENOPROTOOPT; 4832 } 4833 4834 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4835 return -EFAULT; 4836 if (copy_to_sockptr(optval, &val, len)) 4837 return -EFAULT; 4838 return 0; 4839 } 4840 4841 bool tcp_bpf_bypass_getsockopt(int level, int optname) 4842 { 4843 /* TCP do_tcp_getsockopt has optimized getsockopt implementation 4844 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 4845 */ 4846 if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 4847 return true; 4848 4849 return false; 4850 } 4851 4852 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 4853 int __user *optlen) 4854 { 4855 struct inet_connection_sock *icsk = inet_csk(sk); 4856 4857 if (level != SOL_TCP) 4858 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4859 return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 4860 optval, optlen); 4861 return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 4862 USER_SOCKPTR(optlen)); 4863 } 4864 4865 #ifdef CONFIG_TCP_MD5SIG 4866 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb, 4867 unsigned int header_len) 4868 { 4869 const unsigned int head_data_len = skb_headlen(skb) > header_len ? 4870 skb_headlen(skb) - header_len : 0; 4871 const struct skb_shared_info *shi = skb_shinfo(skb); 4872 struct sk_buff *frag_iter; 4873 unsigned int i; 4874 4875 md5_update(ctx, (const u8 *)tcp_hdr(skb) + header_len, head_data_len); 4876 4877 for (i = 0; i < shi->nr_frags; ++i) { 4878 const skb_frag_t *f = &shi->frags[i]; 4879 u32 p_off, p_len, copied; 4880 const void *vaddr; 4881 struct page *p; 4882 4883 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 4884 p, p_off, p_len, copied) { 4885 vaddr = kmap_local_page(p); 4886 md5_update(ctx, vaddr + p_off, p_len); 4887 kunmap_local(vaddr); 4888 } 4889 } 4890 4891 skb_walk_frags(skb, frag_iter) 4892 tcp_md5_hash_skb_data(ctx, frag_iter, 0); 4893 } 4894 4895 void tcp_md5_hash_key(struct md5_ctx *ctx, 4896 const struct tcp_md5sig_key *key) 4897 { 4898 u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 4899 4900 /* We use data_race() because tcp_md5_do_add() might change 4901 * key->key under us 4902 */ 4903 data_race(({ md5_update(ctx, key->key, keylen), 0; })); 4904 } 4905 4906 /* Called with rcu_read_lock() */ 4907 static enum skb_drop_reason 4908 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4909 const void *saddr, const void *daddr, 4910 int family, int l3index, const __u8 *hash_location) 4911 { 4912 /* This gets called for each TCP segment that has TCP-MD5 option. 4913 * We have 2 drop cases: 4914 * o An MD5 signature is present, but we're not expecting one. 4915 * o The MD5 signature is wrong. 4916 */ 4917 const struct tcp_sock *tp = tcp_sk(sk); 4918 struct tcp_md5sig_key *key; 4919 u8 newhash[16]; 4920 4921 key = tcp_md5_do_lookup(sk, l3index, saddr, family); 4922 if (!key) { 4923 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 4924 trace_tcp_hash_md5_unexpected(sk, skb); 4925 return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 4926 } 4927 4928 /* Check the signature. 4929 * To support dual stack listeners, we need to handle 4930 * IPv4-mapped case. 4931 */ 4932 if (family == AF_INET) 4933 tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 4934 else 4935 tp->af_specific->calc_md5_hash(newhash, key, NULL, skb); 4936 if (crypto_memneq(hash_location, newhash, 16)) { 4937 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 4938 trace_tcp_hash_md5_mismatch(sk, skb); 4939 return SKB_DROP_REASON_TCP_MD5FAILURE; 4940 } 4941 return SKB_NOT_DROPPED_YET; 4942 } 4943 #else 4944 static inline enum skb_drop_reason 4945 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4946 const void *saddr, const void *daddr, 4947 int family, int l3index, const __u8 *hash_location) 4948 { 4949 return SKB_NOT_DROPPED_YET; 4950 } 4951 4952 #endif 4953 4954 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 4955 /* 4956 * Parse Signature options 4957 */ 4958 int tcp_do_parse_auth_options(const struct tcphdr *th, 4959 const u8 **md5_hash, const u8 **ao_hash) 4960 { 4961 int length = (th->doff << 2) - sizeof(*th); 4962 const u8 *ptr = (const u8 *)(th + 1); 4963 unsigned int minlen = TCPOLEN_MD5SIG; 4964 4965 if (IS_ENABLED(CONFIG_TCP_AO)) 4966 minlen = sizeof(struct tcp_ao_hdr) + 1; 4967 4968 *md5_hash = NULL; 4969 *ao_hash = NULL; 4970 4971 /* If not enough data remaining, we can short cut */ 4972 while (length >= minlen) { 4973 int opcode = *ptr++; 4974 int opsize; 4975 4976 switch (opcode) { 4977 case TCPOPT_EOL: 4978 return 0; 4979 case TCPOPT_NOP: 4980 length--; 4981 continue; 4982 default: 4983 opsize = *ptr++; 4984 if (opsize < 2 || opsize > length) 4985 return -EINVAL; 4986 if (opcode == TCPOPT_MD5SIG) { 4987 if (opsize != TCPOLEN_MD5SIG) 4988 return -EINVAL; 4989 if (unlikely(*md5_hash || *ao_hash)) 4990 return -EEXIST; 4991 *md5_hash = ptr; 4992 } else if (opcode == TCPOPT_AO) { 4993 if (opsize <= sizeof(struct tcp_ao_hdr)) 4994 return -EINVAL; 4995 if (unlikely(*md5_hash || *ao_hash)) 4996 return -EEXIST; 4997 *ao_hash = ptr; 4998 } 4999 } 5000 ptr += opsize - 2; 5001 length -= opsize; 5002 } 5003 return 0; 5004 } 5005 #endif 5006 5007 /* Called with rcu_read_lock() */ 5008 enum skb_drop_reason 5009 tcp_inbound_hash(struct sock *sk, const struct request_sock *req, 5010 const struct sk_buff *skb, 5011 const void *saddr, const void *daddr, 5012 int family, int dif, int sdif) 5013 { 5014 const struct tcphdr *th = tcp_hdr(skb); 5015 const struct tcp_ao_hdr *aoh; 5016 const __u8 *md5_location; 5017 int l3index; 5018 5019 /* Invalid option or two times meet any of auth options */ 5020 if (tcp_parse_auth_options(th, &md5_location, &aoh)) { 5021 trace_tcp_hash_bad_header(sk, skb); 5022 return SKB_DROP_REASON_TCP_AUTH_HDR; 5023 } 5024 5025 if (req) { 5026 if (tcp_rsk_used_ao(req) != !!aoh) { 5027 u8 keyid, rnext, maclen; 5028 5029 if (aoh) { 5030 keyid = aoh->keyid; 5031 rnext = aoh->rnext_keyid; 5032 maclen = tcp_ao_hdr_maclen(aoh); 5033 } else { 5034 keyid = rnext = maclen = 0; 5035 } 5036 5037 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); 5038 trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen); 5039 return SKB_DROP_REASON_TCP_AOFAILURE; 5040 } 5041 } 5042 5043 /* sdif set, means packet ingressed via a device 5044 * in an L3 domain and dif is set to the l3mdev 5045 */ 5046 l3index = sdif ? dif : 0; 5047 5048 /* Fast path: unsigned segments */ 5049 if (likely(!md5_location && !aoh)) { 5050 /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid 5051 * for the remote peer. On TCP-AO established connection 5052 * the last key is impossible to remove, so there's 5053 * always at least one current_key. 5054 */ 5055 if (tcp_ao_required(sk, saddr, family, l3index, true)) { 5056 trace_tcp_hash_ao_required(sk, skb); 5057 return SKB_DROP_REASON_TCP_AONOTFOUND; 5058 } 5059 if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { 5060 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 5061 trace_tcp_hash_md5_required(sk, skb); 5062 return SKB_DROP_REASON_TCP_MD5NOTFOUND; 5063 } 5064 return SKB_NOT_DROPPED_YET; 5065 } 5066 5067 if (aoh) 5068 return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); 5069 5070 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, 5071 l3index, md5_location); 5072 } 5073 5074 void tcp_done(struct sock *sk) 5075 { 5076 struct request_sock *req; 5077 5078 /* We might be called with a new socket, after 5079 * inet_csk_prepare_forced_close() has been called 5080 * so we can not use lockdep_sock_is_held(sk) 5081 */ 5082 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 5083 5084 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 5085 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 5086 5087 tcp_set_state(sk, TCP_CLOSE); 5088 tcp_clear_xmit_timers(sk); 5089 if (req) 5090 reqsk_fastopen_remove(sk, req, false); 5091 5092 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 5093 5094 if (!sock_flag(sk, SOCK_DEAD)) 5095 sk->sk_state_change(sk); 5096 else 5097 inet_csk_destroy_sock(sk); 5098 } 5099 EXPORT_SYMBOL_GPL(tcp_done); 5100 5101 int tcp_abort(struct sock *sk, int err) 5102 { 5103 int state = inet_sk_state_load(sk); 5104 5105 if (state == TCP_NEW_SYN_RECV) { 5106 struct request_sock *req = inet_reqsk(sk); 5107 5108 local_bh_disable(); 5109 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 5110 local_bh_enable(); 5111 return 0; 5112 } 5113 if (state == TCP_TIME_WAIT) { 5114 struct inet_timewait_sock *tw = inet_twsk(sk); 5115 5116 refcount_inc(&tw->tw_refcnt); 5117 local_bh_disable(); 5118 inet_twsk_deschedule_put(tw); 5119 local_bh_enable(); 5120 return 0; 5121 } 5122 5123 /* BPF context ensures sock locking. */ 5124 if (!has_current_bpf_ctx()) 5125 /* Don't race with userspace socket closes such as tcp_close. */ 5126 lock_sock(sk); 5127 5128 /* Avoid closing the same socket twice. */ 5129 if (sk->sk_state == TCP_CLOSE) { 5130 if (!has_current_bpf_ctx()) 5131 release_sock(sk); 5132 return -ENOENT; 5133 } 5134 5135 if (sk->sk_state == TCP_LISTEN) { 5136 tcp_set_state(sk, TCP_CLOSE); 5137 inet_csk_listen_stop(sk); 5138 } 5139 5140 /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 5141 local_bh_disable(); 5142 bh_lock_sock(sk); 5143 5144 if (tcp_need_reset(sk->sk_state)) 5145 tcp_send_active_reset(sk, GFP_ATOMIC, 5146 SK_RST_REASON_TCP_STATE); 5147 tcp_done_with_error(sk, err); 5148 5149 bh_unlock_sock(sk); 5150 local_bh_enable(); 5151 if (!has_current_bpf_ctx()) 5152 release_sock(sk); 5153 return 0; 5154 } 5155 EXPORT_SYMBOL_GPL(tcp_abort); 5156 5157 extern struct tcp_congestion_ops tcp_reno; 5158 5159 static __initdata unsigned long thash_entries; 5160 static int __init set_thash_entries(char *str) 5161 { 5162 ssize_t ret; 5163 5164 if (!str) 5165 return 0; 5166 5167 ret = kstrtoul(str, 0, &thash_entries); 5168 if (ret) 5169 return 0; 5170 5171 return 1; 5172 } 5173 __setup("thash_entries=", set_thash_entries); 5174 5175 static void __init tcp_init_mem(void) 5176 { 5177 unsigned long limit = nr_free_buffer_pages() / 16; 5178 5179 limit = max(limit, 128UL); 5180 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 5181 sysctl_tcp_mem[1] = limit; /* 6.25 % */ 5182 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 5183 } 5184 5185 static void __init tcp_struct_check(void) 5186 { 5187 /* TX read-mostly hotpath cache lines */ 5188 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window); 5189 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh); 5190 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering); 5191 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat); 5192 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs); 5193 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint); 5194 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5195 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, tcp_clean_acked); 5196 #endif 5197 5198 /* TXRX read-mostly hotpath cache lines */ 5199 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset); 5200 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd); 5201 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache); 5202 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd); 5203 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out); 5204 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out); 5205 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out); 5206 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio); 5207 5208 /* RX read-mostly hotpath cache lines */ 5209 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq); 5210 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1); 5211 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq); 5212 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us); 5213 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out); 5214 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss); 5215 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data); 5216 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost); 5217 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min); 5218 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue); 5219 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh); 5220 5221 /* TX read-write hotpath cache lines */ 5222 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out); 5223 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out); 5224 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent); 5225 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml); 5226 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start); 5227 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat); 5228 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq); 5229 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq); 5230 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); 5231 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); 5232 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); 5233 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, accecn_opt_tstamp); 5234 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); 5235 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); 5236 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); 5237 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); 5238 5239 /* TXRX read-write hotpath cache lines */ 5240 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); 5241 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache); 5242 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp); 5243 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); 5244 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); 5245 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); 5246 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp); 5247 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us); 5248 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out); 5249 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up); 5250 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered); 5251 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce); 5252 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, received_ce); 5253 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, received_ecn_bytes); 5254 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); 5255 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); 5256 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_mwnd_seq); 5257 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_tstamp); 5258 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); 5259 5260 /* RX read-write hotpath cache lines */ 5261 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); 5262 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in); 5263 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in); 5264 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup); 5265 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out); 5266 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq); 5267 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered); 5268 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us); 5269 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr); 5270 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_ecn_bytes); 5271 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, pkts_acked_ewma); 5272 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp); 5273 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp); 5274 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked); 5275 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est); 5276 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space); 5277 } 5278 5279 void __init tcp_init(void) 5280 { 5281 int max_rshare, max_wshare, cnt; 5282 unsigned long limit; 5283 unsigned int i; 5284 5285 BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 5286 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 5287 sizeof_field(struct sk_buff, cb)); 5288 5289 tcp_struct_check(); 5290 5291 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 5292 5293 timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 5294 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 5295 5296 inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 5297 thash_entries, 21, /* one slot per 2 MB*/ 5298 0, 64 * 1024); 5299 tcp_hashinfo.bind_bucket_cachep = 5300 kmem_cache_create("tcp_bind_bucket", 5301 sizeof(struct inet_bind_bucket), 0, 5302 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 5303 SLAB_ACCOUNT, 5304 NULL); 5305 tcp_hashinfo.bind2_bucket_cachep = 5306 kmem_cache_create("tcp_bind2_bucket", 5307 sizeof(struct inet_bind2_bucket), 0, 5308 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 5309 SLAB_ACCOUNT, 5310 NULL); 5311 5312 /* Size and allocate the main established and bind bucket 5313 * hash tables. 5314 * 5315 * The methodology is similar to that of the buffer cache. 5316 */ 5317 tcp_hashinfo.ehash = 5318 alloc_large_system_hash("TCP established", 5319 sizeof(struct inet_ehash_bucket), 5320 thash_entries, 5321 17, /* one slot per 128 KB of memory */ 5322 0, 5323 NULL, 5324 &tcp_hashinfo.ehash_mask, 5325 0, 5326 thash_entries ? 0 : 512 * 1024); 5327 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 5328 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 5329 5330 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 5331 panic("TCP: failed to alloc ehash_locks"); 5332 tcp_hashinfo.bhash = 5333 alloc_large_system_hash("TCP bind", 5334 2 * sizeof(struct inet_bind_hashbucket), 5335 tcp_hashinfo.ehash_mask + 1, 5336 17, /* one slot per 128 KB of memory */ 5337 0, 5338 &tcp_hashinfo.bhash_size, 5339 NULL, 5340 0, 5341 64 * 1024); 5342 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 5343 tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 5344 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 5345 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 5346 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 5347 spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 5348 INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 5349 } 5350 5351 tcp_hashinfo.pernet = false; 5352 5353 cnt = tcp_hashinfo.ehash_mask + 1; 5354 sysctl_tcp_max_orphans = cnt / 2; 5355 5356 tcp_init_mem(); 5357 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 5358 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 5359 max_wshare = min(4UL*1024*1024, limit); 5360 max_rshare = min(32UL*1024*1024, limit); 5361 5362 init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 5363 init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 5364 init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 5365 5366 init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 5367 init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 5368 init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 5369 5370 pr_info("Hash tables configured (established %u bind %u)\n", 5371 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 5372 5373 tcp_v4_init(); 5374 tcp_metrics_init(); 5375 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 5376 tcp_tsq_work_init(); 5377 mptcp_init(); 5378 } 5379