1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 /*
23 * Changes: Pedro Roque : Retransmit queue handled by TCP.
24 * : Fragmentation on mtu decrease
25 * : Segment collapse on retransmit
26 * : AF independence
27 *
28 * Linus Torvalds : send_delayed_ack
29 * David S. Miller : Charge memory using the right skb
30 * during syn/ack processing.
31 * David S. Miller : Output engine completely rewritten.
32 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr.
33 * Cacophonix Gaul : draft-minshall-nagle-01
34 * J Hadi Salim : ECN support
35 *
36 */
37
38 #define pr_fmt(fmt) "TCP: " fmt
39
40 #include <net/tcp.h>
41 #include <net/tcp_ecn.h>
42 #include <net/mptcp.h>
43 #include <net/smc.h>
44 #include <net/proto_memory.h>
45 #include <net/psp.h>
46
47 #include <linux/compiler.h>
48 #include <linux/gfp.h>
49 #include <linux/module.h>
50 #include <linux/static_key.h>
51 #include <linux/skbuff_ref.h>
52
53 #include <trace/events/tcp.h>
54
55 /* Refresh clocks of a TCP socket,
56 * ensuring monotically increasing values.
57 */
tcp_mstamp_refresh(struct tcp_sock * tp)58 void tcp_mstamp_refresh(struct tcp_sock *tp)
59 {
60 u64 val = tcp_clock_ns();
61
62 tp->tcp_clock_cache = val;
63 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
64 }
65
66 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
67 int push_one, gfp_t gfp);
68
69 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
tcp_rbtree_insert(struct rb_root * root,struct sk_buff * skb)70 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
71 {
72 struct rb_node **p = &root->rb_node;
73 struct rb_node *parent = NULL;
74 struct sk_buff *skb1;
75
76 while (*p) {
77 parent = *p;
78 skb1 = rb_to_skb(parent);
79 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
80 p = &parent->rb_left;
81 else
82 p = &parent->rb_right;
83 }
84 rb_link_node(&skb->rbnode, parent, p);
85 rb_insert_color(&skb->rbnode, root);
86 }
87
88 /* Account for new data that has been sent to the network. */
tcp_event_new_data_sent(struct sock * sk,struct sk_buff * skb)89 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
90 {
91 struct inet_connection_sock *icsk = inet_csk(sk);
92 struct tcp_sock *tp = tcp_sk(sk);
93 unsigned int prior_packets = tp->packets_out;
94
95 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
96
97 __skb_unlink(skb, &sk->sk_write_queue);
98 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
99
100 if (tp->highest_sack == NULL)
101 tp->highest_sack = skb;
102
103 tp->packets_out += tcp_skb_pcount(skb);
104 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
105 tcp_rearm_rto(sk);
106
107 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
108 tcp_skb_pcount(skb));
109 tcp_check_space(sk);
110 }
111
112 /* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
113 * window scaling factor due to loss of precision.
114 * If window has been shrunk, what should we make? It is not clear at all.
115 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
116 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
117 * invalid. OK, let's make this for now:
118 */
tcp_acceptable_seq(const struct sock * sk)119 static inline __u32 tcp_acceptable_seq(const struct sock *sk)
120 {
121 const struct tcp_sock *tp = tcp_sk(sk);
122
123 if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
124 (tp->rx_opt.wscale_ok &&
125 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
126 return tp->snd_nxt;
127 else
128 return tcp_wnd_end(tp);
129 }
130
131 /* Calculate mss to advertise in SYN segment.
132 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
133 *
134 * 1. It is independent of path mtu.
135 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
136 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
137 * attached devices, because some buggy hosts are confused by
138 * large MSS.
139 * 4. We do not make 3, we advertise MSS, calculated from first
140 * hop device mtu, but allow to raise it to ip_rt_min_advmss.
141 * This may be overridden via information stored in routing table.
142 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
143 * probably even Jumbo".
144 */
tcp_advertise_mss(struct sock * sk)145 static __u16 tcp_advertise_mss(struct sock *sk)
146 {
147 struct tcp_sock *tp = tcp_sk(sk);
148 const struct dst_entry *dst = __sk_dst_get(sk);
149 int mss = tp->advmss;
150
151 if (dst) {
152 unsigned int metric = dst_metric_advmss(dst);
153
154 if (metric < mss) {
155 mss = metric;
156 tp->advmss = mss;
157 }
158 }
159
160 return (__u16)mss;
161 }
162
163 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
164 * This is the first part of cwnd validation mechanism.
165 */
tcp_cwnd_restart(struct sock * sk,s32 delta)166 void tcp_cwnd_restart(struct sock *sk, s32 delta)
167 {
168 struct tcp_sock *tp = tcp_sk(sk);
169 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
170 u32 cwnd = tcp_snd_cwnd(tp);
171
172 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
173
174 tp->snd_ssthresh = tcp_current_ssthresh(sk);
175 restart_cwnd = min(restart_cwnd, cwnd);
176
177 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
178 cwnd >>= 1;
179 tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
180 tp->snd_cwnd_stamp = tcp_jiffies32;
181 tp->snd_cwnd_used = 0;
182 }
183
184 /* Congestion state accounting after a packet has been sent. */
tcp_event_data_sent(struct tcp_sock * tp,struct sock * sk)185 static void tcp_event_data_sent(struct tcp_sock *tp,
186 struct sock *sk)
187 {
188 struct inet_connection_sock *icsk = inet_csk(sk);
189 const u32 now = tcp_jiffies32;
190
191 if (tcp_packets_in_flight(tp) == 0)
192 tcp_ca_event(sk, CA_EVENT_TX_START);
193
194 tp->lsndtime = now;
195
196 /* If it is a reply for ato after last received
197 * packet, increase pingpong count.
198 */
199 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
200 inet_csk_inc_pingpong_cnt(sk);
201 }
202
203 /* Account for an ACK we sent. */
tcp_event_ack_sent(struct sock * sk,u32 rcv_nxt)204 static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
205 {
206 struct tcp_sock *tp = tcp_sk(sk);
207
208 if (unlikely(tp->compressed_ack)) {
209 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
210 tp->compressed_ack);
211 tp->compressed_ack = 0;
212 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
213 __sock_put(sk);
214 }
215
216 if (unlikely(rcv_nxt != tp->rcv_nxt))
217 return; /* Special ACK sent by DCTCP to reflect ECN */
218 tcp_dec_quickack_mode(sk);
219 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
220 }
221
222 /* Determine a window scaling and initial window to offer.
223 * Based on the assumption that the given amount of space
224 * will be offered. Store the results in the tp structure.
225 * NOTE: for smooth operation initial space offering should
226 * be a multiple of mss if possible. We assume here that mss >= 1.
227 * This MUST be enforced by all callers.
228 */
tcp_select_initial_window(const struct sock * sk,int __space,__u32 mss,__u32 * rcv_wnd,__u32 * __window_clamp,int wscale_ok,__u8 * rcv_wscale,__u32 init_rcv_wnd)229 void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
230 __u32 *rcv_wnd, __u32 *__window_clamp,
231 int wscale_ok, __u8 *rcv_wscale,
232 __u32 init_rcv_wnd)
233 {
234 unsigned int space = (__space < 0 ? 0 : __space);
235 u32 window_clamp = READ_ONCE(*__window_clamp);
236
237 /* If no clamp set the clamp to the max possible scaled window */
238 if (window_clamp == 0)
239 window_clamp = (U16_MAX << TCP_MAX_WSCALE);
240 space = min(window_clamp, space);
241
242 /* Quantize space offering to a multiple of mss if possible. */
243 if (space > mss)
244 space = rounddown(space, mss);
245
246 /* NOTE: offering an initial window larger than 32767
247 * will break some buggy TCP stacks. If the admin tells us
248 * it is likely we could be speaking with such a buggy stack
249 * we will truncate our initial window offering to 32K-1
250 * unless the remote has sent us a window scaling option,
251 * which we interpret as a sign the remote TCP is not
252 * misinterpreting the window field as a signed quantity.
253 */
254 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
255 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
256 else
257 (*rcv_wnd) = space;
258
259 if (init_rcv_wnd)
260 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
261
262 *rcv_wscale = 0;
263 if (wscale_ok) {
264 /* Set window scaling on max possible window */
265 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
266 space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
267 space = min_t(u32, space, window_clamp);
268 *rcv_wscale = clamp_t(int, ilog2(space) - 15,
269 0, TCP_MAX_WSCALE);
270 }
271 /* Set the clamp no higher than max representable value */
272 WRITE_ONCE(*__window_clamp,
273 min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp));
274 }
275 EXPORT_IPV6_MOD(tcp_select_initial_window);
276
277 /* Chose a new window to advertise, update state in tcp_sock for the
278 * socket, and return result with RFC1323 scaling applied. The return
279 * value can be stuffed directly into th->window for an outgoing
280 * frame.
281 */
tcp_select_window(struct sock * sk)282 static u16 tcp_select_window(struct sock *sk)
283 {
284 struct tcp_sock *tp = tcp_sk(sk);
285 struct net *net = sock_net(sk);
286 u32 old_win = tp->rcv_wnd;
287 u32 cur_win, new_win;
288
289 /* Make the window 0 if we failed to queue the data because we
290 * are out of memory.
291 */
292 if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) {
293 tp->pred_flags = 0;
294 tp->rcv_wnd = 0;
295 tp->rcv_wup = tp->rcv_nxt;
296 return 0;
297 }
298
299 cur_win = tcp_receive_window(tp);
300 new_win = __tcp_select_window(sk);
301 if (new_win < cur_win) {
302 /* Danger Will Robinson!
303 * Don't update rcv_wup/rcv_wnd here or else
304 * we will not be able to advertise a zero
305 * window in time. --DaveM
306 *
307 * Relax Will Robinson.
308 */
309 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) {
310 /* Never shrink the offered window */
311 if (new_win == 0)
312 NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV);
313 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
314 }
315 }
316
317 tp->rcv_wnd = new_win;
318 tp->rcv_wup = tp->rcv_nxt;
319
320 /* Make sure we do not exceed the maximum possible
321 * scaled window.
322 */
323 if (!tp->rx_opt.rcv_wscale &&
324 READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows))
325 new_win = min(new_win, MAX_TCP_WINDOW);
326 else
327 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
328
329 /* RFC1323 scaling applied */
330 new_win >>= tp->rx_opt.rcv_wscale;
331
332 /* If we advertise zero window, disable fast path. */
333 if (new_win == 0) {
334 tp->pred_flags = 0;
335 if (old_win)
336 NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV);
337 } else if (old_win == 0) {
338 NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV);
339 }
340
341 return new_win;
342 }
343
344 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to
345 * be sent.
346 */
tcp_ecn_send(struct sock * sk,struct sk_buff * skb,struct tcphdr * th,int tcp_header_len)347 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
348 struct tcphdr *th, int tcp_header_len)
349 {
350 struct tcp_sock *tp = tcp_sk(sk);
351
352 if (!tcp_ecn_mode_any(tp))
353 return;
354
355 if (tcp_ecn_mode_accecn(tp)) {
356 if (!tcp_accecn_ace_fail_recv(tp) &&
357 !tcp_accecn_ace_fail_send(tp))
358 INET_ECN_xmit(sk);
359 else
360 INET_ECN_dontxmit(sk);
361 tcp_accecn_set_ace(tp, skb, th);
362 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ACCECN;
363 } else {
364 /* Not-retransmitted data segment: set ECT and inject CWR. */
365 if (skb->len != tcp_header_len &&
366 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
367 INET_ECN_xmit(sk);
368 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
369 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
370 th->cwr = 1;
371 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
372 }
373 } else if (!tcp_ca_needs_ecn(sk)) {
374 /* ACK or retransmitted segment: clear ECT|CE */
375 INET_ECN_dontxmit(sk);
376 }
377 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
378 th->ece = 1;
379 }
380 }
381
382 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
383 * auto increment end seqno.
384 */
tcp_init_nondata_skb(struct sk_buff * skb,struct sock * sk,u32 seq,u16 flags)385 static void tcp_init_nondata_skb(struct sk_buff *skb, struct sock *sk,
386 u32 seq, u16 flags)
387 {
388 skb->ip_summed = CHECKSUM_PARTIAL;
389
390 TCP_SKB_CB(skb)->tcp_flags = flags;
391
392 tcp_skb_pcount_set(skb, 1);
393 psp_enqueue_set_decrypted(sk, skb);
394
395 TCP_SKB_CB(skb)->seq = seq;
396 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
397 seq++;
398 TCP_SKB_CB(skb)->end_seq = seq;
399 }
400
tcp_urg_mode(const struct tcp_sock * tp)401 static inline bool tcp_urg_mode(const struct tcp_sock *tp)
402 {
403 return tp->snd_una != tp->snd_up;
404 }
405
406 #define OPTION_SACK_ADVERTISE BIT(0)
407 #define OPTION_TS BIT(1)
408 #define OPTION_MD5 BIT(2)
409 #define OPTION_WSCALE BIT(3)
410 #define OPTION_FAST_OPEN_COOKIE BIT(8)
411 #define OPTION_SMC BIT(9)
412 #define OPTION_MPTCP BIT(10)
413 #define OPTION_AO BIT(11)
414 #define OPTION_ACCECN BIT(12)
415
smc_options_write(__be32 * ptr,u16 * options)416 static void smc_options_write(__be32 *ptr, u16 *options)
417 {
418 #if IS_ENABLED(CONFIG_SMC)
419 if (static_branch_unlikely(&tcp_have_smc)) {
420 if (unlikely(OPTION_SMC & *options)) {
421 *ptr++ = htonl((TCPOPT_NOP << 24) |
422 (TCPOPT_NOP << 16) |
423 (TCPOPT_EXP << 8) |
424 (TCPOLEN_EXP_SMC_BASE));
425 *ptr++ = htonl(TCPOPT_SMC_MAGIC);
426 }
427 }
428 #endif
429 }
430
431 struct tcp_out_options {
432 u16 options; /* bit field of OPTION_* */
433 u16 mss; /* 0 to disable */
434 u8 ws; /* window scale, 0 to disable */
435 u8 num_sack_blocks; /* number of SACK blocks to include */
436 u8 num_accecn_fields:7, /* number of AccECN fields needed */
437 use_synack_ecn_bytes:1; /* Use synack_ecn_bytes or not */
438 u8 hash_size; /* bytes in hash_location */
439 u8 bpf_opt_len; /* length of BPF hdr option */
440 __u8 *hash_location; /* temporary pointer, overloaded */
441 __u32 tsval, tsecr; /* need to include OPTION_TS */
442 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
443 struct mptcp_out_options mptcp;
444 };
445
mptcp_options_write(struct tcphdr * th,__be32 * ptr,struct tcp_sock * tp,struct tcp_out_options * opts)446 static void mptcp_options_write(struct tcphdr *th, __be32 *ptr,
447 struct tcp_sock *tp,
448 struct tcp_out_options *opts)
449 {
450 #if IS_ENABLED(CONFIG_MPTCP)
451 if (unlikely(OPTION_MPTCP & opts->options))
452 mptcp_write_options(th, ptr, tp, &opts->mptcp);
453 #endif
454 }
455
456 #ifdef CONFIG_CGROUP_BPF
bpf_skops_write_hdr_opt_arg0(struct sk_buff * skb,enum tcp_synack_type synack_type)457 static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
458 enum tcp_synack_type synack_type)
459 {
460 if (unlikely(!skb))
461 return BPF_WRITE_HDR_TCP_CURRENT_MSS;
462
463 if (unlikely(synack_type == TCP_SYNACK_COOKIE))
464 return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
465
466 return 0;
467 }
468
469 /* req, syn_skb and synack_type are used when writing synack */
bpf_skops_hdr_opt_len(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct sk_buff * syn_skb,enum tcp_synack_type synack_type,struct tcp_out_options * opts,unsigned int * remaining)470 static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
471 struct request_sock *req,
472 struct sk_buff *syn_skb,
473 enum tcp_synack_type synack_type,
474 struct tcp_out_options *opts,
475 unsigned int *remaining)
476 {
477 struct bpf_sock_ops_kern sock_ops;
478 int err;
479
480 if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
481 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
482 !*remaining)
483 return;
484
485 /* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
486
487 /* init sock_ops */
488 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
489
490 sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
491
492 if (req) {
493 /* The listen "sk" cannot be passed here because
494 * it is not locked. It would not make too much
495 * sense to do bpf_setsockopt(listen_sk) based
496 * on individual connection request also.
497 *
498 * Thus, "req" is passed here and the cgroup-bpf-progs
499 * of the listen "sk" will be run.
500 *
501 * "req" is also used here for fastopen even the "sk" here is
502 * a fullsock "child" sk. It is to keep the behavior
503 * consistent between fastopen and non-fastopen on
504 * the bpf programming side.
505 */
506 sock_ops.sk = (struct sock *)req;
507 sock_ops.syn_skb = syn_skb;
508 } else {
509 sock_owned_by_me(sk);
510
511 sock_ops.is_fullsock = 1;
512 sock_ops.is_locked_tcp_sock = 1;
513 sock_ops.sk = sk;
514 }
515
516 sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
517 sock_ops.remaining_opt_len = *remaining;
518 /* tcp_current_mss() does not pass a skb */
519 if (skb)
520 bpf_skops_init_skb(&sock_ops, skb, 0);
521
522 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
523
524 if (err || sock_ops.remaining_opt_len == *remaining)
525 return;
526
527 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
528 /* round up to 4 bytes */
529 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
530
531 *remaining -= opts->bpf_opt_len;
532 }
533
bpf_skops_write_hdr_opt(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct sk_buff * syn_skb,enum tcp_synack_type synack_type,struct tcp_out_options * opts)534 static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
535 struct request_sock *req,
536 struct sk_buff *syn_skb,
537 enum tcp_synack_type synack_type,
538 struct tcp_out_options *opts)
539 {
540 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
541 struct bpf_sock_ops_kern sock_ops;
542 int err;
543
544 if (likely(!max_opt_len))
545 return;
546
547 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
548
549 sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
550
551 if (req) {
552 sock_ops.sk = (struct sock *)req;
553 sock_ops.syn_skb = syn_skb;
554 } else {
555 sock_owned_by_me(sk);
556
557 sock_ops.is_fullsock = 1;
558 sock_ops.is_locked_tcp_sock = 1;
559 sock_ops.sk = sk;
560 }
561
562 sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
563 sock_ops.remaining_opt_len = max_opt_len;
564 first_opt_off = tcp_hdrlen(skb) - max_opt_len;
565 bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
566
567 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
568
569 if (err)
570 nr_written = 0;
571 else
572 nr_written = max_opt_len - sock_ops.remaining_opt_len;
573
574 if (nr_written < max_opt_len)
575 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
576 max_opt_len - nr_written);
577 }
578 #else
bpf_skops_hdr_opt_len(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct sk_buff * syn_skb,enum tcp_synack_type synack_type,struct tcp_out_options * opts,unsigned int * remaining)579 static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
580 struct request_sock *req,
581 struct sk_buff *syn_skb,
582 enum tcp_synack_type synack_type,
583 struct tcp_out_options *opts,
584 unsigned int *remaining)
585 {
586 }
587
bpf_skops_write_hdr_opt(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct sk_buff * syn_skb,enum tcp_synack_type synack_type,struct tcp_out_options * opts)588 static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
589 struct request_sock *req,
590 struct sk_buff *syn_skb,
591 enum tcp_synack_type synack_type,
592 struct tcp_out_options *opts)
593 {
594 }
595 #endif
596
process_tcp_ao_options(struct tcp_sock * tp,const struct tcp_request_sock * tcprsk,struct tcp_out_options * opts,struct tcp_key * key,__be32 * ptr)597 static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
598 const struct tcp_request_sock *tcprsk,
599 struct tcp_out_options *opts,
600 struct tcp_key *key, __be32 *ptr)
601 {
602 #ifdef CONFIG_TCP_AO
603 u8 maclen = tcp_ao_maclen(key->ao_key);
604
605 if (tcprsk) {
606 u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
607
608 *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
609 (tcprsk->ao_keyid << 8) |
610 (tcprsk->ao_rcv_next));
611 } else {
612 struct tcp_ao_key *rnext_key;
613 struct tcp_ao_info *ao_info;
614
615 ao_info = rcu_dereference_check(tp->ao_info,
616 lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
617 rnext_key = READ_ONCE(ao_info->rnext_key);
618 if (WARN_ON_ONCE(!rnext_key))
619 return ptr;
620 *ptr++ = htonl((TCPOPT_AO << 24) |
621 (tcp_ao_len(key->ao_key) << 16) |
622 (key->ao_key->sndid << 8) |
623 (rnext_key->rcvid));
624 }
625 opts->hash_location = (__u8 *)ptr;
626 ptr += maclen / sizeof(*ptr);
627 if (unlikely(maclen % sizeof(*ptr))) {
628 memset(ptr, TCPOPT_NOP, sizeof(*ptr));
629 ptr++;
630 }
631 #endif
632 return ptr;
633 }
634
635 /* Initial values for AccECN option, ordered is based on ECN field bits
636 * similar to received_ecn_bytes. Used for SYN/ACK AccECN option.
637 */
638 static const u32 synack_ecn_bytes[3] = { 0, 0, 0 };
639
640 /* Write previously computed TCP options to the packet.
641 *
642 * Beware: Something in the Internet is very sensitive to the ordering of
643 * TCP options, we learned this through the hard way, so be careful here.
644 * Luckily we can at least blame others for their non-compliance but from
645 * inter-operability perspective it seems that we're somewhat stuck with
646 * the ordering which we have been using if we want to keep working with
647 * those broken things (not that it currently hurts anybody as there isn't
648 * particular reason why the ordering would need to be changed).
649 *
650 * At least SACK_PERM as the first option is known to lead to a disaster
651 * (but it may well be that other scenarios fail similarly).
652 */
tcp_options_write(struct tcphdr * th,struct tcp_sock * tp,const struct tcp_request_sock * tcprsk,struct tcp_out_options * opts,struct tcp_key * key)653 static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
654 const struct tcp_request_sock *tcprsk,
655 struct tcp_out_options *opts,
656 struct tcp_key *key)
657 {
658 u8 leftover_highbyte = TCPOPT_NOP; /* replace 1st NOP if avail */
659 u8 leftover_lowbyte = TCPOPT_NOP; /* replace 2nd NOP in succession */
660 __be32 *ptr = (__be32 *)(th + 1);
661 u16 options = opts->options; /* mungable copy */
662
663 if (tcp_key_is_md5(key)) {
664 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
665 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
666 /* overload cookie hash location */
667 opts->hash_location = (__u8 *)ptr;
668 ptr += 4;
669 } else if (tcp_key_is_ao(key)) {
670 ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr);
671 }
672 if (unlikely(opts->mss)) {
673 *ptr++ = htonl((TCPOPT_MSS << 24) |
674 (TCPOLEN_MSS << 16) |
675 opts->mss);
676 }
677
678 if (likely(OPTION_TS & options)) {
679 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
680 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
681 (TCPOLEN_SACK_PERM << 16) |
682 (TCPOPT_TIMESTAMP << 8) |
683 TCPOLEN_TIMESTAMP);
684 options &= ~OPTION_SACK_ADVERTISE;
685 } else {
686 *ptr++ = htonl((TCPOPT_NOP << 24) |
687 (TCPOPT_NOP << 16) |
688 (TCPOPT_TIMESTAMP << 8) |
689 TCPOLEN_TIMESTAMP);
690 }
691 *ptr++ = htonl(opts->tsval);
692 *ptr++ = htonl(opts->tsecr);
693 }
694
695 if (OPTION_ACCECN & options) {
696 const u32 *ecn_bytes = opts->use_synack_ecn_bytes ?
697 synack_ecn_bytes :
698 tp->received_ecn_bytes;
699 const u8 ect0_idx = INET_ECN_ECT_0 - 1;
700 const u8 ect1_idx = INET_ECN_ECT_1 - 1;
701 const u8 ce_idx = INET_ECN_CE - 1;
702 u32 e0b;
703 u32 e1b;
704 u32 ceb;
705 u8 len;
706
707 e0b = ecn_bytes[ect0_idx] + TCP_ACCECN_E0B_INIT_OFFSET;
708 e1b = ecn_bytes[ect1_idx] + TCP_ACCECN_E1B_INIT_OFFSET;
709 ceb = ecn_bytes[ce_idx] + TCP_ACCECN_CEB_INIT_OFFSET;
710 len = TCPOLEN_ACCECN_BASE +
711 opts->num_accecn_fields * TCPOLEN_ACCECN_PERFIELD;
712
713 if (opts->num_accecn_fields == 2) {
714 *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
715 ((e1b >> 8) & 0xffff));
716 *ptr++ = htonl(((e1b & 0xff) << 24) |
717 (ceb & 0xffffff));
718 } else if (opts->num_accecn_fields == 1) {
719 *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
720 ((e1b >> 8) & 0xffff));
721 leftover_highbyte = e1b & 0xff;
722 leftover_lowbyte = TCPOPT_NOP;
723 } else if (opts->num_accecn_fields == 0) {
724 leftover_highbyte = TCPOPT_ACCECN1;
725 leftover_lowbyte = len;
726 } else if (opts->num_accecn_fields == 3) {
727 *ptr++ = htonl((TCPOPT_ACCECN1 << 24) | (len << 16) |
728 ((e1b >> 8) & 0xffff));
729 *ptr++ = htonl(((e1b & 0xff) << 24) |
730 (ceb & 0xffffff));
731 *ptr++ = htonl(((e0b & 0xffffff) << 8) |
732 TCPOPT_NOP);
733 }
734 if (tp) {
735 tp->accecn_minlen = 0;
736 tp->accecn_opt_tstamp = tp->tcp_mstamp;
737 tp->accecn_opt_sent_w_dsack = tp->rx_opt.dsack;
738 if (tp->accecn_opt_demand)
739 tp->accecn_opt_demand--;
740 }
741 } else if (tp) {
742 tp->accecn_opt_sent_w_dsack = 0;
743 }
744
745 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
746 *ptr++ = htonl((leftover_highbyte << 24) |
747 (leftover_lowbyte << 16) |
748 (TCPOPT_SACK_PERM << 8) |
749 TCPOLEN_SACK_PERM);
750 leftover_highbyte = TCPOPT_NOP;
751 leftover_lowbyte = TCPOPT_NOP;
752 }
753
754 if (unlikely(OPTION_WSCALE & options)) {
755 u8 highbyte = TCPOPT_NOP;
756
757 /* Do not split the leftover 2-byte to fit into a single
758 * NOP, i.e., replace this NOP only when 1 byte is leftover
759 * within leftover_highbyte.
760 */
761 if (unlikely(leftover_highbyte != TCPOPT_NOP &&
762 leftover_lowbyte == TCPOPT_NOP)) {
763 highbyte = leftover_highbyte;
764 leftover_highbyte = TCPOPT_NOP;
765 }
766 *ptr++ = htonl((highbyte << 24) |
767 (TCPOPT_WINDOW << 16) |
768 (TCPOLEN_WINDOW << 8) |
769 opts->ws);
770 }
771
772 if (unlikely(opts->num_sack_blocks)) {
773 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
774 tp->duplicate_sack : tp->selective_acks;
775 int this_sack;
776
777 *ptr++ = htonl((leftover_highbyte << 24) |
778 (leftover_lowbyte << 16) |
779 (TCPOPT_SACK << 8) |
780 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
781 TCPOLEN_SACK_PERBLOCK)));
782 leftover_highbyte = TCPOPT_NOP;
783 leftover_lowbyte = TCPOPT_NOP;
784
785 for (this_sack = 0; this_sack < opts->num_sack_blocks;
786 ++this_sack) {
787 *ptr++ = htonl(sp[this_sack].start_seq);
788 *ptr++ = htonl(sp[this_sack].end_seq);
789 }
790
791 tp->rx_opt.dsack = 0;
792 } else if (unlikely(leftover_highbyte != TCPOPT_NOP ||
793 leftover_lowbyte != TCPOPT_NOP)) {
794 *ptr++ = htonl((leftover_highbyte << 24) |
795 (leftover_lowbyte << 16) |
796 (TCPOPT_NOP << 8) |
797 TCPOPT_NOP);
798 leftover_highbyte = TCPOPT_NOP;
799 leftover_lowbyte = TCPOPT_NOP;
800 }
801
802 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
803 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
804 u8 *p = (u8 *)ptr;
805 u32 len; /* Fast Open option length */
806
807 if (foc->exp) {
808 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
809 *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
810 TCPOPT_FASTOPEN_MAGIC);
811 p += TCPOLEN_EXP_FASTOPEN_BASE;
812 } else {
813 len = TCPOLEN_FASTOPEN_BASE + foc->len;
814 *p++ = TCPOPT_FASTOPEN;
815 *p++ = len;
816 }
817
818 memcpy(p, foc->val, foc->len);
819 if ((len & 3) == 2) {
820 p[foc->len] = TCPOPT_NOP;
821 p[foc->len + 1] = TCPOPT_NOP;
822 }
823 ptr += (len + 3) >> 2;
824 }
825
826 smc_options_write(ptr, &options);
827
828 mptcp_options_write(th, ptr, tp, opts);
829 }
830
smc_set_option(struct tcp_sock * tp,struct tcp_out_options * opts,unsigned int * remaining)831 static void smc_set_option(struct tcp_sock *tp,
832 struct tcp_out_options *opts,
833 unsigned int *remaining)
834 {
835 #if IS_ENABLED(CONFIG_SMC)
836 if (static_branch_unlikely(&tcp_have_smc) && tp->syn_smc) {
837 tp->syn_smc = !!smc_call_hsbpf(1, tp, syn_option);
838 /* re-check syn_smc */
839 if (tp->syn_smc &&
840 *remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
841 opts->options |= OPTION_SMC;
842 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
843 }
844 }
845 #endif
846 }
847
smc_set_option_cond(const struct tcp_sock * tp,struct inet_request_sock * ireq,struct tcp_out_options * opts,unsigned int * remaining)848 static void smc_set_option_cond(const struct tcp_sock *tp,
849 struct inet_request_sock *ireq,
850 struct tcp_out_options *opts,
851 unsigned int *remaining)
852 {
853 #if IS_ENABLED(CONFIG_SMC)
854 if (static_branch_unlikely(&tcp_have_smc) && tp->syn_smc && ireq->smc_ok) {
855 ireq->smc_ok = !!smc_call_hsbpf(1, tp, synack_option, ireq);
856 /* re-check smc_ok */
857 if (ireq->smc_ok &&
858 *remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
859 opts->options |= OPTION_SMC;
860 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
861 }
862 }
863 #endif
864 }
865
mptcp_set_option_cond(const struct request_sock * req,struct tcp_out_options * opts,unsigned int * remaining)866 static void mptcp_set_option_cond(const struct request_sock *req,
867 struct tcp_out_options *opts,
868 unsigned int *remaining)
869 {
870 if (rsk_is_mptcp(req)) {
871 unsigned int size;
872
873 if (mptcp_synack_options(req, &size, &opts->mptcp)) {
874 if (*remaining >= size) {
875 opts->options |= OPTION_MPTCP;
876 *remaining -= size;
877 }
878 }
879 }
880 }
881
tcp_synack_options_combine_saving(struct tcp_out_options * opts)882 static u32 tcp_synack_options_combine_saving(struct tcp_out_options *opts)
883 {
884 /* How much there's room for combining with the alignment padding? */
885 if ((opts->options & (OPTION_SACK_ADVERTISE | OPTION_TS)) ==
886 OPTION_SACK_ADVERTISE)
887 return 2;
888 else if (opts->options & OPTION_WSCALE)
889 return 1;
890 return 0;
891 }
892
893 /* Calculates how long AccECN option will fit to @remaining option space.
894 *
895 * AccECN option can sometimes replace NOPs used for alignment of other
896 * TCP options (up to @max_combine_saving available).
897 *
898 * Only solutions with at least @required AccECN fields are accepted.
899 *
900 * Returns: The size of the AccECN option excluding space repurposed from
901 * the alignment of the other options.
902 */
tcp_options_fit_accecn(struct tcp_out_options * opts,int required,int remaining)903 static int tcp_options_fit_accecn(struct tcp_out_options *opts, int required,
904 int remaining)
905 {
906 int size = TCP_ACCECN_MAXSIZE;
907 int sack_blocks_reduce = 0;
908 int max_combine_saving;
909 int rem = remaining;
910 int align_size;
911
912 if (opts->use_synack_ecn_bytes)
913 max_combine_saving = tcp_synack_options_combine_saving(opts);
914 else
915 max_combine_saving = opts->num_sack_blocks > 0 ? 2 : 0;
916 opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS;
917 while (opts->num_accecn_fields >= required) {
918 /* Pad to dword if cannot combine */
919 if ((size & 0x3) > max_combine_saving)
920 align_size = ALIGN(size, 4);
921 else
922 align_size = ALIGN_DOWN(size, 4);
923
924 if (rem >= align_size) {
925 size = align_size;
926 break;
927 } else if (opts->num_accecn_fields == required &&
928 opts->num_sack_blocks > 2 &&
929 required > 0) {
930 /* Try to fit the option by removing one SACK block */
931 opts->num_sack_blocks--;
932 sack_blocks_reduce++;
933 rem = rem + TCPOLEN_SACK_PERBLOCK;
934
935 opts->num_accecn_fields = TCP_ACCECN_NUMFIELDS;
936 size = TCP_ACCECN_MAXSIZE;
937 continue;
938 }
939
940 opts->num_accecn_fields--;
941 size -= TCPOLEN_ACCECN_PERFIELD;
942 }
943 if (sack_blocks_reduce > 0) {
944 if (opts->num_accecn_fields >= required)
945 size -= sack_blocks_reduce * TCPOLEN_SACK_PERBLOCK;
946 else
947 opts->num_sack_blocks += sack_blocks_reduce;
948 }
949 if (opts->num_accecn_fields < required)
950 return 0;
951
952 opts->options |= OPTION_ACCECN;
953 return size;
954 }
955
956 /* Compute TCP options for SYN packets. This is not the final
957 * network wire format yet.
958 */
tcp_syn_options(struct sock * sk,struct sk_buff * skb,struct tcp_out_options * opts,struct tcp_key * key)959 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
960 struct tcp_out_options *opts,
961 struct tcp_key *key)
962 {
963 struct tcp_sock *tp = tcp_sk(sk);
964 unsigned int remaining = MAX_TCP_OPTION_SPACE;
965 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
966 bool timestamps;
967
968 /* Better than switch (key.type) as it has static branches */
969 if (tcp_key_is_md5(key)) {
970 timestamps = false;
971 opts->options |= OPTION_MD5;
972 remaining -= TCPOLEN_MD5SIG_ALIGNED;
973 } else {
974 timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
975 if (tcp_key_is_ao(key)) {
976 opts->options |= OPTION_AO;
977 remaining -= tcp_ao_len_aligned(key->ao_key);
978 }
979 }
980
981 /* We always get an MSS option. The option bytes which will be seen in
982 * normal data packets should timestamps be used, must be in the MSS
983 * advertised. But we subtract them from tp->mss_cache so that
984 * calculations in tcp_sendmsg are simpler etc. So account for this
985 * fact here if necessary. If we don't do this correctly, as a
986 * receiver we won't recognize data packets as being full sized when we
987 * should, and thus we won't abide by the delayed ACK rules correctly.
988 * SACKs don't matter, we never delay an ACK when we have any of those
989 * going out. */
990 opts->mss = tcp_advertise_mss(sk);
991 remaining -= TCPOLEN_MSS_ALIGNED;
992
993 if (likely(timestamps)) {
994 opts->options |= OPTION_TS;
995 opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset;
996 opts->tsecr = tp->rx_opt.ts_recent;
997 remaining -= TCPOLEN_TSTAMP_ALIGNED;
998 }
999 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
1000 opts->ws = tp->rx_opt.rcv_wscale;
1001 opts->options |= OPTION_WSCALE;
1002 remaining -= TCPOLEN_WSCALE_ALIGNED;
1003 }
1004 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
1005 opts->options |= OPTION_SACK_ADVERTISE;
1006 if (unlikely(!(OPTION_TS & opts->options)))
1007 remaining -= TCPOLEN_SACKPERM_ALIGNED;
1008 }
1009
1010 if (fastopen && fastopen->cookie.len >= 0) {
1011 u32 need = fastopen->cookie.len;
1012
1013 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
1014 TCPOLEN_FASTOPEN_BASE;
1015 need = (need + 3) & ~3U; /* Align to 32 bits */
1016 if (remaining >= need) {
1017 opts->options |= OPTION_FAST_OPEN_COOKIE;
1018 opts->fastopen_cookie = &fastopen->cookie;
1019 remaining -= need;
1020 tp->syn_fastopen = 1;
1021 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
1022 }
1023 }
1024
1025 smc_set_option(tp, opts, &remaining);
1026
1027 if (sk_is_mptcp(sk)) {
1028 unsigned int size;
1029
1030 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
1031 if (remaining >= size) {
1032 opts->options |= OPTION_MPTCP;
1033 remaining -= size;
1034 }
1035 }
1036 }
1037
1038 /* Simultaneous open SYN/ACK needs AccECN option but not SYN.
1039 * It is attempted to negotiate the use of AccECN also on the first
1040 * retransmitted SYN, as mentioned in "3.1.4.1. Retransmitted SYNs"
1041 * of AccECN draft.
1042 */
1043 if (unlikely((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK) &&
1044 tcp_ecn_mode_accecn(tp) &&
1045 inet_csk(sk)->icsk_retransmits < 2 &&
1046 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) &&
1047 remaining >= TCPOLEN_ACCECN_BASE)) {
1048 opts->use_synack_ecn_bytes = 1;
1049 remaining -= tcp_options_fit_accecn(opts, 0, remaining);
1050 }
1051
1052 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
1053
1054 return MAX_TCP_OPTION_SPACE - remaining;
1055 }
1056
1057 /* Set up TCP options for SYN-ACKs. */
tcp_synack_options(const struct sock * sk,struct request_sock * req,unsigned int mss,struct sk_buff * skb,struct tcp_out_options * opts,const struct tcp_key * key,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)1058 static unsigned int tcp_synack_options(const struct sock *sk,
1059 struct request_sock *req,
1060 unsigned int mss, struct sk_buff *skb,
1061 struct tcp_out_options *opts,
1062 const struct tcp_key *key,
1063 struct tcp_fastopen_cookie *foc,
1064 enum tcp_synack_type synack_type,
1065 struct sk_buff *syn_skb)
1066 {
1067 struct inet_request_sock *ireq = inet_rsk(req);
1068 unsigned int remaining = MAX_TCP_OPTION_SPACE;
1069 struct tcp_request_sock *treq = tcp_rsk(req);
1070
1071 if (tcp_key_is_md5(key)) {
1072 opts->options |= OPTION_MD5;
1073 remaining -= TCPOLEN_MD5SIG_ALIGNED;
1074
1075 /* We can't fit any SACK blocks in a packet with MD5 + TS
1076 * options. There was discussion about disabling SACK
1077 * rather than TS in order to fit in better with old,
1078 * buggy kernels, but that was deemed to be unnecessary.
1079 */
1080 if (synack_type != TCP_SYNACK_COOKIE)
1081 ireq->tstamp_ok &= !ireq->sack_ok;
1082 } else if (tcp_key_is_ao(key)) {
1083 opts->options |= OPTION_AO;
1084 remaining -= tcp_ao_len_aligned(key->ao_key);
1085 ireq->tstamp_ok &= !ireq->sack_ok;
1086 }
1087
1088 /* We always send an MSS option. */
1089 opts->mss = mss;
1090 remaining -= TCPOLEN_MSS_ALIGNED;
1091
1092 if (likely(ireq->wscale_ok)) {
1093 opts->ws = ireq->rcv_wscale;
1094 opts->options |= OPTION_WSCALE;
1095 remaining -= TCPOLEN_WSCALE_ALIGNED;
1096 }
1097 if (likely(ireq->tstamp_ok)) {
1098 opts->options |= OPTION_TS;
1099 opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) +
1100 tcp_rsk(req)->ts_off;
1101 if (!tcp_rsk(req)->snt_tsval_first) {
1102 if (!opts->tsval)
1103 opts->tsval = ~0U;
1104 tcp_rsk(req)->snt_tsval_first = opts->tsval;
1105 }
1106 WRITE_ONCE(tcp_rsk(req)->snt_tsval_last, opts->tsval);
1107 opts->tsecr = req->ts_recent;
1108 remaining -= TCPOLEN_TSTAMP_ALIGNED;
1109 }
1110 if (likely(ireq->sack_ok)) {
1111 opts->options |= OPTION_SACK_ADVERTISE;
1112 if (unlikely(!ireq->tstamp_ok))
1113 remaining -= TCPOLEN_SACKPERM_ALIGNED;
1114 }
1115 if (foc != NULL && foc->len >= 0) {
1116 u32 need = foc->len;
1117
1118 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
1119 TCPOLEN_FASTOPEN_BASE;
1120 need = (need + 3) & ~3U; /* Align to 32 bits */
1121 if (remaining >= need) {
1122 opts->options |= OPTION_FAST_OPEN_COOKIE;
1123 opts->fastopen_cookie = foc;
1124 remaining -= need;
1125 }
1126 }
1127
1128 mptcp_set_option_cond(req, opts, &remaining);
1129
1130 smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
1131
1132 if (treq->accecn_ok &&
1133 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option) &&
1134 synack_type != TCP_SYNACK_RETRANS && remaining >= TCPOLEN_ACCECN_BASE) {
1135 opts->use_synack_ecn_bytes = 1;
1136 remaining -= tcp_options_fit_accecn(opts, 0, remaining);
1137 }
1138
1139 bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
1140 synack_type, opts, &remaining);
1141
1142 return MAX_TCP_OPTION_SPACE - remaining;
1143 }
1144
1145 /* Compute TCP options for ESTABLISHED sockets. This is not the
1146 * final wire format yet.
1147 */
tcp_established_options(struct sock * sk,struct sk_buff * skb,struct tcp_out_options * opts,struct tcp_key * key)1148 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
1149 struct tcp_out_options *opts,
1150 struct tcp_key *key)
1151 {
1152 struct tcp_sock *tp = tcp_sk(sk);
1153 unsigned int size = 0;
1154 unsigned int eff_sacks;
1155
1156 opts->options = 0;
1157
1158 /* Better than switch (key.type) as it has static branches */
1159 if (tcp_key_is_md5(key)) {
1160 opts->options |= OPTION_MD5;
1161 size += TCPOLEN_MD5SIG_ALIGNED;
1162 } else if (tcp_key_is_ao(key)) {
1163 opts->options |= OPTION_AO;
1164 size += tcp_ao_len_aligned(key->ao_key);
1165 }
1166
1167 if (likely(tp->rx_opt.tstamp_ok)) {
1168 opts->options |= OPTION_TS;
1169 opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) +
1170 tp->tsoffset : 0;
1171 opts->tsecr = tp->rx_opt.ts_recent;
1172 size += TCPOLEN_TSTAMP_ALIGNED;
1173 }
1174
1175 /* MPTCP options have precedence over SACK for the limited TCP
1176 * option space because a MPTCP connection would be forced to
1177 * fall back to regular TCP if a required multipath option is
1178 * missing. SACK still gets a chance to use whatever space is
1179 * left.
1180 */
1181 if (sk_is_mptcp(sk)) {
1182 unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1183 unsigned int opt_size = 0;
1184
1185 if (mptcp_established_options(sk, skb, &opt_size, remaining,
1186 &opts->mptcp)) {
1187 opts->options |= OPTION_MPTCP;
1188 size += opt_size;
1189 }
1190 }
1191
1192 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
1193 if (unlikely(eff_sacks)) {
1194 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1195 if (likely(remaining >= TCPOLEN_SACK_BASE_ALIGNED +
1196 TCPOLEN_SACK_PERBLOCK)) {
1197 opts->num_sack_blocks =
1198 min_t(unsigned int, eff_sacks,
1199 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
1200 TCPOLEN_SACK_PERBLOCK);
1201
1202 size += TCPOLEN_SACK_BASE_ALIGNED +
1203 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
1204 } else {
1205 opts->num_sack_blocks = 0;
1206 }
1207 } else {
1208 opts->num_sack_blocks = 0;
1209 }
1210
1211 if (tcp_ecn_mode_accecn(tp)) {
1212 int ecn_opt = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option);
1213
1214 if (ecn_opt && tp->saw_accecn_opt &&
1215 (ecn_opt >= TCP_ACCECN_OPTION_PERSIST ||
1216 !tcp_accecn_opt_fail_send(tp)) &&
1217 (ecn_opt >= TCP_ACCECN_OPTION_FULL || tp->accecn_opt_demand ||
1218 tcp_accecn_option_beacon_check(sk))) {
1219 opts->use_synack_ecn_bytes = 0;
1220 size += tcp_options_fit_accecn(opts, tp->accecn_minlen,
1221 MAX_TCP_OPTION_SPACE - size);
1222 }
1223 }
1224
1225 if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
1226 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
1227 unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1228
1229 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
1230
1231 size = MAX_TCP_OPTION_SPACE - remaining;
1232 }
1233
1234 return size;
1235 }
1236
1237
1238 /* TCP SMALL QUEUES (TSQ)
1239 *
1240 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
1241 * to reduce RTT and bufferbloat.
1242 * We do this using a special skb destructor (tcp_wfree).
1243 *
1244 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
1245 * needs to be reallocated in a driver.
1246 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
1247 *
1248 * Since transmit from skb destructor is forbidden, we use a BH work item
1249 * to process all sockets that eventually need to send more skbs.
1250 * We use one work item per cpu, with its own queue of sockets.
1251 */
1252 struct tsq_work {
1253 struct work_struct work;
1254 struct list_head head; /* queue of tcp sockets */
1255 };
1256 static DEFINE_PER_CPU(struct tsq_work, tsq_work);
1257
tcp_tsq_write(struct sock * sk)1258 static void tcp_tsq_write(struct sock *sk)
1259 {
1260 if ((1 << sk->sk_state) &
1261 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1262 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) {
1263 struct tcp_sock *tp = tcp_sk(sk);
1264
1265 if (tp->lost_out > tp->retrans_out &&
1266 tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
1267 tcp_mstamp_refresh(tp);
1268 tcp_xmit_retransmit_queue(sk);
1269 }
1270
1271 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1272 0, GFP_ATOMIC);
1273 }
1274 }
1275
tcp_tsq_handler(struct sock * sk)1276 static void tcp_tsq_handler(struct sock *sk)
1277 {
1278 bh_lock_sock(sk);
1279 if (!sock_owned_by_user(sk))
1280 tcp_tsq_write(sk);
1281 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
1282 sock_hold(sk);
1283 bh_unlock_sock(sk);
1284 }
1285 /*
1286 * One work item per cpu tries to send more skbs.
1287 * We run in BH context but need to disable irqs when
1288 * transferring tsq->head because tcp_wfree() might
1289 * interrupt us (non NAPI drivers)
1290 */
tcp_tsq_workfn(struct work_struct * work)1291 static void tcp_tsq_workfn(struct work_struct *work)
1292 {
1293 struct tsq_work *tsq = container_of(work, struct tsq_work, work);
1294 LIST_HEAD(list);
1295 unsigned long flags;
1296 struct list_head *q, *n;
1297 struct tcp_sock *tp;
1298 struct sock *sk;
1299
1300 local_irq_save(flags);
1301 list_splice_init(&tsq->head, &list);
1302 local_irq_restore(flags);
1303
1304 list_for_each_safe(q, n, &list) {
1305 tp = list_entry(q, struct tcp_sock, tsq_node);
1306 list_del(&tp->tsq_node);
1307
1308 sk = (struct sock *)tp;
1309 smp_mb__before_atomic();
1310 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
1311
1312 tcp_tsq_handler(sk);
1313 sk_free(sk);
1314 }
1315 }
1316
1317 #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \
1318 TCPF_WRITE_TIMER_DEFERRED | \
1319 TCPF_DELACK_TIMER_DEFERRED | \
1320 TCPF_MTU_REDUCED_DEFERRED | \
1321 TCPF_ACK_DEFERRED)
1322 /**
1323 * tcp_release_cb - tcp release_sock() callback
1324 * @sk: socket
1325 *
1326 * called from release_sock() to perform protocol dependent
1327 * actions before socket release.
1328 */
tcp_release_cb(struct sock * sk)1329 void tcp_release_cb(struct sock *sk)
1330 {
1331 unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags);
1332 unsigned long nflags;
1333
1334 /* perform an atomic operation only if at least one flag is set */
1335 do {
1336 if (!(flags & TCP_DEFERRED_ALL))
1337 return;
1338 nflags = flags & ~TCP_DEFERRED_ALL;
1339 } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags));
1340
1341 if (flags & TCPF_TSQ_DEFERRED) {
1342 tcp_tsq_write(sk);
1343 __sock_put(sk);
1344 }
1345
1346 if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1347 tcp_write_timer_handler(sk);
1348 __sock_put(sk);
1349 }
1350 if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1351 tcp_delack_timer_handler(sk);
1352 __sock_put(sk);
1353 }
1354 if (flags & TCPF_MTU_REDUCED_DEFERRED) {
1355 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1356 __sock_put(sk);
1357 }
1358 if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
1359 tcp_send_ack(sk);
1360 }
1361 EXPORT_IPV6_MOD(tcp_release_cb);
1362
tcp_tsq_work_init(void)1363 void __init tcp_tsq_work_init(void)
1364 {
1365 int i;
1366
1367 for_each_possible_cpu(i) {
1368 struct tsq_work *tsq = &per_cpu(tsq_work, i);
1369
1370 INIT_LIST_HEAD(&tsq->head);
1371 INIT_WORK(&tsq->work, tcp_tsq_workfn);
1372 }
1373 }
1374
1375 /*
1376 * Write buffer destructor automatically called from kfree_skb.
1377 * We can't xmit new skbs from this context, as we might already
1378 * hold qdisc lock.
1379 */
tcp_wfree(struct sk_buff * skb)1380 void tcp_wfree(struct sk_buff *skb)
1381 {
1382 struct sock *sk = skb->sk;
1383 struct tcp_sock *tp = tcp_sk(sk);
1384 unsigned long flags, nval, oval;
1385 struct tsq_work *tsq;
1386 bool empty;
1387
1388 /* Keep one reference on sk_wmem_alloc.
1389 * Will be released by sk_free() from here or tcp_tsq_workfn()
1390 */
1391 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
1392
1393 /* If this softirq is serviced by ksoftirqd, we are likely under stress.
1394 * Wait until our queues (qdisc + devices) are drained.
1395 * This gives :
1396 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
1397 * - chance for incoming ACK (processed by another cpu maybe)
1398 * to migrate this flow (skb->ooo_okay will be eventually set)
1399 */
1400 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
1401 goto out;
1402
1403 oval = smp_load_acquire(&sk->sk_tsq_flags);
1404 do {
1405 if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1406 goto out;
1407
1408 nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1409 } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
1410
1411 /* queue this socket to BH workqueue */
1412 local_irq_save(flags);
1413 tsq = this_cpu_ptr(&tsq_work);
1414 empty = list_empty(&tsq->head);
1415 list_add(&tp->tsq_node, &tsq->head);
1416 if (empty)
1417 queue_work(system_bh_wq, &tsq->work);
1418 local_irq_restore(flags);
1419 return;
1420 out:
1421 sk_free(sk);
1422 }
1423
1424 /* Note: Called under soft irq.
1425 * We can call TCP stack right away, unless socket is owned by user.
1426 */
tcp_pace_kick(struct hrtimer * timer)1427 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1428 {
1429 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1430 struct sock *sk = (struct sock *)tp;
1431
1432 tcp_tsq_handler(sk);
1433 sock_put(sk);
1434
1435 return HRTIMER_NORESTART;
1436 }
1437
tcp_update_skb_after_send(struct sock * sk,struct sk_buff * skb,u64 prior_wstamp)1438 static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1439 u64 prior_wstamp)
1440 {
1441 struct tcp_sock *tp = tcp_sk(sk);
1442
1443 if (sk->sk_pacing_status != SK_PACING_NONE) {
1444 unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
1445
1446 /* Original sch_fq does not pace first 10 MSS
1447 * Note that tp->data_segs_out overflows after 2^32 packets,
1448 * this is a minor annoyance.
1449 */
1450 if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1451 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1452 u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1453
1454 /* take into account OS jitter */
1455 len_ns -= min_t(u64, len_ns / 2, credit);
1456 tp->tcp_wstamp_ns += len_ns;
1457 }
1458 }
1459 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1460 }
1461
1462 /* Snapshot the current delivery information in the skb, to generate
1463 * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
1464 */
tcp_rate_skb_sent(struct sock * sk,struct sk_buff * skb)1465 static void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
1466 {
1467 struct tcp_sock *tp = tcp_sk(sk);
1468
1469 /* In general we need to start delivery rate samples from the
1470 * time we received the most recent ACK, to ensure we include
1471 * the full time the network needs to deliver all in-flight
1472 * packets. If there are no packets in flight yet, then we
1473 * know that any ACKs after now indicate that the network was
1474 * able to deliver those packets completely in the sampling
1475 * interval between now and the next ACK.
1476 *
1477 * Note that we use packets_out instead of tcp_packets_in_flight(tp)
1478 * because the latter is a guess based on RTO and loss-marking
1479 * heuristics. We don't want spurious RTOs or loss markings to cause
1480 * a spuriously small time interval, causing a spuriously high
1481 * bandwidth estimate.
1482 */
1483 if (!tp->packets_out) {
1484 u64 tstamp_us = tcp_skb_timestamp_us(skb);
1485
1486 tp->first_tx_mstamp = tstamp_us;
1487 tp->delivered_mstamp = tstamp_us;
1488 }
1489
1490 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
1491 TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
1492 TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
1493 TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
1494 TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
1495 }
1496
1497 INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1498 INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1499 INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1500
1501 /* This routine actually transmits TCP packets queued in by
1502 * tcp_do_sendmsg(). This is used by both the initial
1503 * transmission and possible later retransmissions.
1504 * All SKB's seen here are completely headerless. It is our
1505 * job to build the TCP header, and pass the packet down to
1506 * IP so it can do the same plus pass the packet off to the
1507 * device.
1508 *
1509 * We are working here with either a clone of the original
1510 * SKB, or a fresh unique copy made by the retransmit engine.
1511 */
__tcp_transmit_skb(struct sock * sk,struct sk_buff * skb,int clone_it,gfp_t gfp_mask,u32 rcv_nxt)1512 static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1513 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1514 {
1515 const struct inet_connection_sock *icsk = inet_csk(sk);
1516 struct inet_sock *inet;
1517 struct tcp_sock *tp;
1518 struct tcp_skb_cb *tcb;
1519 struct tcp_out_options opts;
1520 unsigned int tcp_options_size, tcp_header_size;
1521 struct sk_buff *oskb = NULL;
1522 struct tcp_key key;
1523 struct tcphdr *th;
1524 u64 prior_wstamp;
1525 int err;
1526
1527 BUG_ON(!skb || !tcp_skb_pcount(skb));
1528 tp = tcp_sk(sk);
1529 prior_wstamp = tp->tcp_wstamp_ns;
1530 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1531 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
1532 if (clone_it) {
1533 oskb = skb;
1534
1535 tcp_skb_tsorted_save(oskb) {
1536 if (unlikely(skb_cloned(oskb)))
1537 skb = pskb_copy(oskb, gfp_mask);
1538 else
1539 skb = skb_clone(oskb, gfp_mask);
1540 } tcp_skb_tsorted_restore(oskb);
1541
1542 if (unlikely(!skb))
1543 return -ENOBUFS;
1544 /* retransmit skbs might have a non zero value in skb->dev
1545 * because skb->dev is aliased with skb->rbnode.rb_left
1546 */
1547 skb->dev = NULL;
1548 }
1549
1550 inet = inet_sk(sk);
1551 tcb = TCP_SKB_CB(skb);
1552 memset(&opts, 0, sizeof(opts));
1553
1554 tcp_get_current_key(sk, &key);
1555 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
1556 tcp_options_size = tcp_syn_options(sk, skb, &opts, &key);
1557 } else {
1558 tcp_options_size = tcp_established_options(sk, skb, &opts, &key);
1559 /* Force a PSH flag on all (GSO) packets to expedite GRO flush
1560 * at receiver : This slightly improve GRO performance.
1561 * Note that we do not force the PSH flag for non GSO packets,
1562 * because they might be sent under high congestion events,
1563 * and in this case it is better to delay the delivery of 1-MSS
1564 * packets and thus the corresponding ACK packet that would
1565 * release the following packet.
1566 */
1567 if (tcp_skb_pcount(skb) > 1)
1568 tcb->tcp_flags |= TCPHDR_PSH;
1569 }
1570 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
1571
1572 /* We set skb->ooo_okay to one if this packet can select
1573 * a different TX queue than prior packets of this flow,
1574 * to avoid self inflicted reorders.
1575 * The 'other' queue decision is based on current cpu number
1576 * if XPS is enabled, or sk->sk_txhash otherwise.
1577 * We can switch to another (and better) queue if:
1578 * 1) No packet with payload is in qdisc/device queues.
1579 * Delays in TX completion can defeat the test
1580 * even if packets were already sent.
1581 * 2) Or rtx queue is empty.
1582 * This mitigates above case if ACK packets for
1583 * all prior packets were already processed.
1584 */
1585 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) ||
1586 tcp_rtx_queue_empty(sk);
1587
1588 /* If we had to use memory reserve to allocate this skb,
1589 * this might cause drops if packet is looped back :
1590 * Other socket might not have SOCK_MEMALLOC.
1591 * Packets not looped back do not care about pfmemalloc.
1592 */
1593 skb->pfmemalloc = 0;
1594
1595 __skb_push(skb, tcp_header_size);
1596 skb_reset_transport_header(skb);
1597
1598 skb_orphan(skb);
1599 skb->sk = sk;
1600 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1601 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1602
1603 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
1604
1605 /* Build TCP header and checksum it. */
1606 th = (struct tcphdr *)skb->data;
1607 th->source = inet->inet_sport;
1608 th->dest = inet->inet_dport;
1609 th->seq = htonl(tcb->seq);
1610 th->ack_seq = htonl(rcv_nxt);
1611 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
1612 (tcb->tcp_flags & TCPHDR_FLAGS_MASK));
1613
1614 th->check = 0;
1615 th->urg_ptr = 0;
1616
1617 /* The urg_mode check is necessary during a below snd_una win probe */
1618 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
1619 if (before(tp->snd_up, tcb->seq + 0x10000)) {
1620 th->urg_ptr = htons(tp->snd_up - tcb->seq);
1621 th->urg = 1;
1622 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
1623 th->urg_ptr = htons(0xFFFF);
1624 th->urg = 1;
1625 }
1626 }
1627
1628 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1629 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1630 th->window = htons(tcp_select_window(sk));
1631 tcp_ecn_send(sk, skb, th, tcp_header_size);
1632 } else {
1633 /* RFC1323: The window in SYN & SYN/ACK segments
1634 * is never scaled.
1635 */
1636 th->window = htons(min(tp->rcv_wnd, 65535U));
1637 }
1638
1639 tcp_options_write(th, tp, NULL, &opts, &key);
1640
1641 if (tcp_key_is_md5(&key)) {
1642 #ifdef CONFIG_TCP_MD5SIG
1643 /* Calculate the MD5 hash, as we have all we need now */
1644 sk_gso_disable(sk);
1645 tp->af_specific->calc_md5_hash(opts.hash_location,
1646 key.md5_key, sk, skb);
1647 #endif
1648 } else if (tcp_key_is_ao(&key)) {
1649 int err;
1650
1651 err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th,
1652 opts.hash_location);
1653 if (err) {
1654 sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_NOT_SPECIFIED);
1655 return -ENOMEM;
1656 }
1657 }
1658
1659 /* BPF prog is the last one writing header option */
1660 bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1661
1662 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1663 tcp_v6_send_check, tcp_v4_send_check,
1664 sk, skb);
1665
1666 if (likely(tcb->tcp_flags & TCPHDR_ACK))
1667 tcp_event_ack_sent(sk, rcv_nxt);
1668
1669 if (skb->len != tcp_header_size) {
1670 tcp_event_data_sent(tp, sk);
1671 tp->data_segs_out += tcp_skb_pcount(skb);
1672 tp->bytes_sent += skb->len - tcp_header_size;
1673 }
1674
1675 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1676 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1677 tcp_skb_pcount(skb));
1678
1679 tp->segs_out += tcp_skb_pcount(skb);
1680 skb_set_hash_from_sk(skb, sk);
1681 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1682 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1683 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1684
1685 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1686
1687 /* Cleanup our debris for IP stacks */
1688 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1689 sizeof(struct inet6_skb_parm)));
1690
1691 tcp_add_tx_delay(skb, tp);
1692
1693 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
1694 inet6_csk_xmit, ip_queue_xmit,
1695 sk, skb, &inet->cork.fl);
1696
1697 if (unlikely(err > 0)) {
1698 tcp_enter_cwr(sk);
1699 err = net_xmit_eval(err);
1700 }
1701 if (!err && oskb) {
1702 tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1703 tcp_rate_skb_sent(sk, oskb);
1704 }
1705 return err;
1706 }
1707
tcp_transmit_skb(struct sock * sk,struct sk_buff * skb,int clone_it,gfp_t gfp_mask)1708 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1709 gfp_t gfp_mask)
1710 {
1711 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1712 tcp_sk(sk)->rcv_nxt);
1713 }
1714
1715 /* This routine just queues the buffer for sending.
1716 *
1717 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1718 * otherwise socket can stall.
1719 */
tcp_queue_skb(struct sock * sk,struct sk_buff * skb)1720 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1721 {
1722 struct tcp_sock *tp = tcp_sk(sk);
1723
1724 /* Advance write_seq and place onto the write_queue. */
1725 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1726 __skb_header_release(skb);
1727 psp_enqueue_set_decrypted(sk, skb);
1728 tcp_add_write_queue_tail(sk, skb);
1729 sk_wmem_queued_add(sk, skb->truesize);
1730 sk_mem_charge(sk, skb->truesize);
1731 }
1732
1733 /* Initialize TSO segments for a packet. */
tcp_set_skb_tso_segs(struct sk_buff * skb,unsigned int mss_now)1734 static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1735 {
1736 int tso_segs;
1737
1738 if (skb->len <= mss_now) {
1739 /* Avoid the costly divide in the normal
1740 * non-TSO case.
1741 */
1742 TCP_SKB_CB(skb)->tcp_gso_size = 0;
1743 tcp_skb_pcount_set(skb, 1);
1744 return 1;
1745 }
1746 TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1747 tso_segs = DIV_ROUND_UP(skb->len, mss_now);
1748 tcp_skb_pcount_set(skb, tso_segs);
1749 return tso_segs;
1750 }
1751
1752 /* Pcount in the middle of the write queue got changed, we need to do various
1753 * tweaks to fix counters
1754 */
tcp_adjust_pcount(struct sock * sk,const struct sk_buff * skb,int decr)1755 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1756 {
1757 struct tcp_sock *tp = tcp_sk(sk);
1758
1759 tp->packets_out -= decr;
1760
1761 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1762 tp->sacked_out -= decr;
1763 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1764 tp->retrans_out -= decr;
1765 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1766 tp->lost_out -= decr;
1767
1768 /* Reno case is special. Sigh... */
1769 if (tcp_is_reno(tp) && decr > 0)
1770 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1771
1772 tcp_verify_left_out(tp);
1773 }
1774
tcp_has_tx_tstamp(const struct sk_buff * skb)1775 static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1776 {
1777 return TCP_SKB_CB(skb)->txstamp_ack ||
1778 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1779 }
1780
tcp_fragment_tstamp(struct sk_buff * skb,struct sk_buff * skb2)1781 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1782 {
1783 struct skb_shared_info *shinfo = skb_shinfo(skb);
1784
1785 if (unlikely(tcp_has_tx_tstamp(skb)) &&
1786 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1787 struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1788 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1789
1790 shinfo->tx_flags &= ~tsflags;
1791 shinfo2->tx_flags |= tsflags;
1792 swap(shinfo->tskey, shinfo2->tskey);
1793 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1794 TCP_SKB_CB(skb)->txstamp_ack = 0;
1795 }
1796 }
1797
tcp_skb_fragment_eor(struct sk_buff * skb,struct sk_buff * skb2)1798 static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1799 {
1800 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1801 TCP_SKB_CB(skb)->eor = 0;
1802 }
1803
1804 /* Insert buff after skb on the write or rtx queue of sk. */
tcp_insert_write_queue_after(struct sk_buff * skb,struct sk_buff * buff,struct sock * sk,enum tcp_queue tcp_queue)1805 static void tcp_insert_write_queue_after(struct sk_buff *skb,
1806 struct sk_buff *buff,
1807 struct sock *sk,
1808 enum tcp_queue tcp_queue)
1809 {
1810 if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1811 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1812 else
1813 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1814 }
1815
1816 /* Function to create two new TCP segments. Shrinks the given segment
1817 * to the specified size and appends a new segment with the rest of the
1818 * packet to the list. This won't be called frequently, I hope.
1819 * Remember, these are still headerless SKBs at this point.
1820 */
tcp_fragment(struct sock * sk,enum tcp_queue tcp_queue,struct sk_buff * skb,u32 len,unsigned int mss_now,gfp_t gfp)1821 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1822 struct sk_buff *skb, u32 len,
1823 unsigned int mss_now, gfp_t gfp)
1824 {
1825 struct tcp_sock *tp = tcp_sk(sk);
1826 struct sk_buff *buff;
1827 int old_factor;
1828 long limit;
1829 u16 flags;
1830 int nlen;
1831
1832 if (WARN_ON(len > skb->len))
1833 return -EINVAL;
1834
1835 DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
1836
1837 /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1838 * We need some allowance to not penalize applications setting small
1839 * SO_SNDBUF values.
1840 * Also allow first and last skb in retransmit queue to be split.
1841 */
1842 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
1843 if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1844 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1845 skb != tcp_rtx_queue_head(sk) &&
1846 skb != tcp_rtx_queue_tail(sk))) {
1847 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1848 return -ENOMEM;
1849 }
1850
1851 if (skb_unclone_keeptruesize(skb, gfp))
1852 return -ENOMEM;
1853
1854 /* Get a new skb... force flag on. */
1855 buff = tcp_stream_alloc_skb(sk, gfp, true);
1856 if (!buff)
1857 return -ENOMEM; /* We'll just try again later. */
1858 skb_copy_decrypted(buff, skb);
1859 mptcp_skb_ext_copy(buff, skb);
1860
1861 sk_wmem_queued_add(sk, buff->truesize);
1862 sk_mem_charge(sk, buff->truesize);
1863 nlen = skb->len - len;
1864 buff->truesize += nlen;
1865 skb->truesize -= nlen;
1866
1867 /* Correct the sequence numbers. */
1868 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1869 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1870 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1871
1872 /* PSH and FIN should only be set in the second packet. */
1873 flags = TCP_SKB_CB(skb)->tcp_flags;
1874 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1875 TCP_SKB_CB(buff)->tcp_flags = flags;
1876 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1877 tcp_skb_fragment_eor(skb, buff);
1878
1879 skb_split(skb, buff, len);
1880
1881 skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC);
1882 tcp_fragment_tstamp(skb, buff);
1883
1884 old_factor = tcp_skb_pcount(skb);
1885
1886 /* Fix up tso_factor for both original and new SKB. */
1887 tcp_set_skb_tso_segs(skb, mss_now);
1888 tcp_set_skb_tso_segs(buff, mss_now);
1889
1890 /* Update delivered info for the new segment */
1891 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1892
1893 /* If this packet has been sent out already, we must
1894 * adjust the various packet counters.
1895 */
1896 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1897 int diff = old_factor - tcp_skb_pcount(skb) -
1898 tcp_skb_pcount(buff);
1899
1900 if (diff)
1901 tcp_adjust_pcount(sk, skb, diff);
1902 }
1903
1904 /* Link BUFF into the send queue. */
1905 __skb_header_release(buff);
1906 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1907 if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1908 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
1909
1910 return 0;
1911 }
1912
1913 /* This is similar to __pskb_pull_tail(). The difference is that pulled
1914 * data is not copied, but immediately discarded.
1915 */
__pskb_trim_head(struct sk_buff * skb,int len)1916 static int __pskb_trim_head(struct sk_buff *skb, int len)
1917 {
1918 struct skb_shared_info *shinfo;
1919 int i, k, eat;
1920
1921 DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
1922 eat = len;
1923 k = 0;
1924 shinfo = skb_shinfo(skb);
1925 for (i = 0; i < shinfo->nr_frags; i++) {
1926 int size = skb_frag_size(&shinfo->frags[i]);
1927
1928 if (size <= eat) {
1929 skb_frag_unref(skb, i);
1930 eat -= size;
1931 } else {
1932 shinfo->frags[k] = shinfo->frags[i];
1933 if (eat) {
1934 skb_frag_off_add(&shinfo->frags[k], eat);
1935 skb_frag_size_sub(&shinfo->frags[k], eat);
1936 eat = 0;
1937 }
1938 k++;
1939 }
1940 }
1941 shinfo->nr_frags = k;
1942
1943 skb->data_len -= len;
1944 skb->len = skb->data_len;
1945 return len;
1946 }
1947
1948 /* Remove acked data from a packet in the transmit queue. */
tcp_trim_head(struct sock * sk,struct sk_buff * skb,u32 len)1949 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1950 {
1951 u32 delta_truesize;
1952
1953 if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
1954 return -ENOMEM;
1955
1956 delta_truesize = __pskb_trim_head(skb, len);
1957
1958 TCP_SKB_CB(skb)->seq += len;
1959
1960 skb->truesize -= delta_truesize;
1961 sk_wmem_queued_add(sk, -delta_truesize);
1962 if (!skb_zcopy_pure(skb))
1963 sk_mem_uncharge(sk, delta_truesize);
1964
1965 /* Any change of skb->len requires recalculation of tso factor. */
1966 if (tcp_skb_pcount(skb) > 1)
1967 tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1968
1969 return 0;
1970 }
1971
1972 /* Calculate MSS not accounting any TCP options. */
__tcp_mtu_to_mss(struct sock * sk,int pmtu)1973 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1974 {
1975 const struct tcp_sock *tp = tcp_sk(sk);
1976 const struct inet_connection_sock *icsk = inet_csk(sk);
1977 int mss_now;
1978
1979 /* Calculate base mss without TCP options:
1980 It is MMS_S - sizeof(tcphdr) of rfc1122
1981 */
1982 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1983
1984 /* Clamp it (mss_clamp does not include tcp options) */
1985 if (mss_now > tp->rx_opt.mss_clamp)
1986 mss_now = tp->rx_opt.mss_clamp;
1987
1988 /* Now subtract optional transport overhead */
1989 mss_now -= icsk->icsk_ext_hdr_len;
1990
1991 /* Then reserve room for full set of TCP options and 8 bytes of data */
1992 mss_now = max(mss_now,
1993 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
1994 return mss_now;
1995 }
1996
1997 /* Calculate MSS. Not accounting for SACKs here. */
tcp_mtu_to_mss(struct sock * sk,int pmtu)1998 int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1999 {
2000 /* Subtract TCP options size, not including SACKs */
2001 return __tcp_mtu_to_mss(sk, pmtu) -
2002 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
2003 }
2004 EXPORT_IPV6_MOD(tcp_mtu_to_mss);
2005
2006 /* Inverse of above */
tcp_mss_to_mtu(struct sock * sk,int mss)2007 int tcp_mss_to_mtu(struct sock *sk, int mss)
2008 {
2009 const struct tcp_sock *tp = tcp_sk(sk);
2010 const struct inet_connection_sock *icsk = inet_csk(sk);
2011
2012 return mss +
2013 tp->tcp_header_len +
2014 icsk->icsk_ext_hdr_len +
2015 icsk->icsk_af_ops->net_header_len;
2016 }
2017 EXPORT_SYMBOL(tcp_mss_to_mtu);
2018
2019 /* MTU probing init per socket */
tcp_mtup_init(struct sock * sk)2020 void tcp_mtup_init(struct sock *sk)
2021 {
2022 struct tcp_sock *tp = tcp_sk(sk);
2023 struct inet_connection_sock *icsk = inet_csk(sk);
2024 struct net *net = sock_net(sk);
2025
2026 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
2027 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
2028 icsk->icsk_af_ops->net_header_len;
2029 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
2030 icsk->icsk_mtup.probe_size = 0;
2031 if (icsk->icsk_mtup.enabled)
2032 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2033 }
2034
2035 /* This function synchronize snd mss to current pmtu/exthdr set.
2036
2037 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
2038 for TCP options, but includes only bare TCP header.
2039
2040 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
2041 It is minimum of user_mss and mss received with SYN.
2042 It also does not include TCP options.
2043
2044 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
2045
2046 tp->mss_cache is current effective sending mss, including
2047 all tcp options except for SACKs. It is evaluated,
2048 taking into account current pmtu, but never exceeds
2049 tp->rx_opt.mss_clamp.
2050
2051 NOTE1. rfc1122 clearly states that advertised MSS
2052 DOES NOT include either tcp or ip options.
2053
2054 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
2055 are READ ONLY outside this function. --ANK (980731)
2056 */
tcp_sync_mss(struct sock * sk,u32 pmtu)2057 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
2058 {
2059 struct tcp_sock *tp = tcp_sk(sk);
2060 struct inet_connection_sock *icsk = inet_csk(sk);
2061 int mss_now;
2062
2063 if (icsk->icsk_mtup.search_high > pmtu)
2064 icsk->icsk_mtup.search_high = pmtu;
2065
2066 mss_now = tcp_mtu_to_mss(sk, pmtu);
2067 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
2068
2069 /* And store cached results */
2070 icsk->icsk_pmtu_cookie = pmtu;
2071 if (icsk->icsk_mtup.enabled)
2072 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
2073 tp->mss_cache = mss_now;
2074
2075 return mss_now;
2076 }
2077 EXPORT_IPV6_MOD(tcp_sync_mss);
2078
2079 /* Compute the current effective MSS, taking SACKs and IP options,
2080 * and even PMTU discovery events into account.
2081 */
tcp_current_mss(struct sock * sk)2082 unsigned int tcp_current_mss(struct sock *sk)
2083 {
2084 const struct tcp_sock *tp = tcp_sk(sk);
2085 const struct dst_entry *dst = __sk_dst_get(sk);
2086 u32 mss_now;
2087 unsigned int header_len;
2088 struct tcp_out_options opts;
2089 struct tcp_key key;
2090
2091 mss_now = tp->mss_cache;
2092
2093 if (dst) {
2094 u32 mtu = dst_mtu(dst);
2095 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
2096 mss_now = tcp_sync_mss(sk, mtu);
2097 }
2098 tcp_get_current_key(sk, &key);
2099 header_len = tcp_established_options(sk, NULL, &opts, &key) +
2100 sizeof(struct tcphdr);
2101 /* The mss_cache is sized based on tp->tcp_header_len, which assumes
2102 * some common options. If this is an odd packet (because we have SACK
2103 * blocks etc) then our calculated header_len will be different, and
2104 * we have to adjust mss_now correspondingly */
2105 if (header_len != tp->tcp_header_len) {
2106 int delta = (int) header_len - tp->tcp_header_len;
2107 mss_now -= delta;
2108 }
2109
2110 return mss_now;
2111 }
2112
2113 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
2114 * As additional protections, we do not touch cwnd in retransmission phases,
2115 * and if application hit its sndbuf limit recently.
2116 */
tcp_cwnd_application_limited(struct sock * sk)2117 static void tcp_cwnd_application_limited(struct sock *sk)
2118 {
2119 struct tcp_sock *tp = tcp_sk(sk);
2120
2121 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
2122 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
2123 /* Limited by application or receiver window. */
2124 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
2125 u32 win_used = max(tp->snd_cwnd_used, init_win);
2126 if (win_used < tcp_snd_cwnd(tp)) {
2127 tp->snd_ssthresh = tcp_current_ssthresh(sk);
2128 tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
2129 }
2130 tp->snd_cwnd_used = 0;
2131 }
2132 tp->snd_cwnd_stamp = tcp_jiffies32;
2133 }
2134
tcp_cwnd_validate(struct sock * sk,bool is_cwnd_limited)2135 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
2136 {
2137 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2138 struct tcp_sock *tp = tcp_sk(sk);
2139
2140 /* Track the strongest available signal of the degree to which the cwnd
2141 * is fully utilized. If cwnd-limited then remember that fact for the
2142 * current window. If not cwnd-limited then track the maximum number of
2143 * outstanding packets in the current window. (If cwnd-limited then we
2144 * chose to not update tp->max_packets_out to avoid an extra else
2145 * clause with no functional impact.)
2146 */
2147 if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
2148 is_cwnd_limited ||
2149 (!tp->is_cwnd_limited &&
2150 tp->packets_out > tp->max_packets_out)) {
2151 tp->is_cwnd_limited = is_cwnd_limited;
2152 tp->max_packets_out = tp->packets_out;
2153 tp->cwnd_usage_seq = tp->snd_nxt;
2154 }
2155
2156 if (tcp_is_cwnd_limited(sk)) {
2157 /* Network is feed fully. */
2158 tp->snd_cwnd_used = 0;
2159 tp->snd_cwnd_stamp = tcp_jiffies32;
2160 } else {
2161 /* Network starves. */
2162 if (tp->packets_out > tp->snd_cwnd_used)
2163 tp->snd_cwnd_used = tp->packets_out;
2164
2165 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
2166 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
2167 !ca_ops->cong_control)
2168 tcp_cwnd_application_limited(sk);
2169
2170 /* The following conditions together indicate the starvation
2171 * is caused by insufficient sender buffer:
2172 * 1) just sent some data (see tcp_write_xmit)
2173 * 2) not cwnd limited (this else condition)
2174 * 3) no more data to send (tcp_write_queue_empty())
2175 * 4) application is hitting buffer limit (SOCK_NOSPACE)
2176 */
2177 if (tcp_write_queue_empty(sk) && sk->sk_socket &&
2178 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
2179 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
2180 tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
2181 }
2182 }
2183
2184 /* Minshall's variant of the Nagle send check. */
tcp_minshall_check(const struct tcp_sock * tp)2185 static bool tcp_minshall_check(const struct tcp_sock *tp)
2186 {
2187 return after(tp->snd_sml, tp->snd_una) &&
2188 !after(tp->snd_sml, tp->snd_nxt);
2189 }
2190
2191 /* Update snd_sml if this skb is under mss
2192 * Note that a TSO packet might end with a sub-mss segment
2193 * The test is really :
2194 * if ((skb->len % mss) != 0)
2195 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
2196 * But we can avoid doing the divide again given we already have
2197 * skb_pcount = skb->len / mss_now
2198 */
tcp_minshall_update(struct tcp_sock * tp,unsigned int mss_now,const struct sk_buff * skb)2199 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
2200 const struct sk_buff *skb)
2201 {
2202 if (skb->len < tcp_skb_pcount(skb) * mss_now)
2203 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
2204 }
2205
2206 /* Return false, if packet can be sent now without violation Nagle's rules:
2207 * 1. It is full sized. (provided by caller in %partial bool)
2208 * 2. Or it contains FIN. (already checked by caller)
2209 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
2210 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
2211 * With Minshall's modification: all sent small packets are ACKed.
2212 */
tcp_nagle_check(bool partial,const struct tcp_sock * tp,int nonagle)2213 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
2214 int nonagle)
2215 {
2216 return partial &&
2217 ((nonagle & TCP_NAGLE_CORK) ||
2218 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
2219 }
2220
2221 /* Return how many segs we'd like on a TSO packet,
2222 * depending on current pacing rate, and how close the peer is.
2223 *
2224 * Rationale is:
2225 * - For close peers, we rather send bigger packets to reduce
2226 * cpu costs, because occasional losses will be repaired fast.
2227 * - For long distance/rtt flows, we would like to get ACK clocking
2228 * with 1 ACK per ms.
2229 *
2230 * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
2231 * in bigger TSO bursts. We we cut the RTT-based allowance in half
2232 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
2233 * is below 1500 bytes after 6 * ~500 usec = 3ms.
2234 */
tcp_tso_autosize(const struct sock * sk,unsigned int mss_now,int min_tso_segs)2235 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
2236 int min_tso_segs)
2237 {
2238 unsigned long bytes;
2239 u32 r;
2240
2241 bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
2242
2243 r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
2244 if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
2245 bytes += sk->sk_gso_max_size >> r;
2246
2247 bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
2248
2249 return max_t(u32, bytes / mss_now, min_tso_segs);
2250 }
2251
2252 /* Return the number of segments we want in the skb we are transmitting.
2253 * See if congestion control module wants to decide; otherwise, autosize.
2254 */
tcp_tso_segs(struct sock * sk,unsigned int mss_now)2255 static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
2256 {
2257 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2258 u32 min_tso, tso_segs;
2259
2260 min_tso = ca_ops->min_tso_segs ?
2261 ca_ops->min_tso_segs(sk) :
2262 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
2263
2264 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2265 return min_t(u32, tso_segs, sk->sk_gso_max_segs);
2266 }
2267
2268 /* Returns the portion of skb which can be sent right away */
tcp_mss_split_point(const struct sock * sk,const struct sk_buff * skb,unsigned int mss_now,unsigned int max_segs,int nonagle)2269 static unsigned int tcp_mss_split_point(const struct sock *sk,
2270 const struct sk_buff *skb,
2271 unsigned int mss_now,
2272 unsigned int max_segs,
2273 int nonagle)
2274 {
2275 const struct tcp_sock *tp = tcp_sk(sk);
2276 u32 partial, needed, window, max_len;
2277
2278 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2279 max_len = mss_now * max_segs;
2280
2281 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
2282 return max_len;
2283
2284 needed = min(skb->len, window);
2285
2286 if (max_len <= needed)
2287 return max_len;
2288
2289 partial = needed % mss_now;
2290 /* If last segment is not a full MSS, check if Nagle rules allow us
2291 * to include this last segment in this skb.
2292 * Otherwise, we'll split the skb at last MSS boundary
2293 */
2294 if (tcp_nagle_check(partial != 0, tp, nonagle))
2295 return needed - partial;
2296
2297 return needed;
2298 }
2299
2300 /* Can at least one segment of SKB be sent right now, according to the
2301 * congestion window rules? If so, return how many segments are allowed.
2302 */
tcp_cwnd_test(const struct tcp_sock * tp)2303 static u32 tcp_cwnd_test(const struct tcp_sock *tp)
2304 {
2305 u32 in_flight, cwnd, halfcwnd;
2306
2307 in_flight = tcp_packets_in_flight(tp);
2308 cwnd = tcp_snd_cwnd(tp);
2309 if (in_flight >= cwnd)
2310 return 0;
2311
2312 /* For better scheduling, ensure we have at least
2313 * 2 GSO packets in flight.
2314 */
2315 halfcwnd = max(cwnd >> 1, 1U);
2316 return min(halfcwnd, cwnd - in_flight);
2317 }
2318
2319 /* Initialize TSO state of a skb.
2320 * This must be invoked the first time we consider transmitting
2321 * SKB onto the wire.
2322 */
tcp_init_tso_segs(struct sk_buff * skb,unsigned int mss_now)2323 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2324 {
2325 int tso_segs = tcp_skb_pcount(skb);
2326
2327 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now))
2328 return tcp_set_skb_tso_segs(skb, mss_now);
2329
2330 return tso_segs;
2331 }
2332
2333
2334 /* Return true if the Nagle test allows this packet to be
2335 * sent now.
2336 */
tcp_nagle_test(const struct tcp_sock * tp,const struct sk_buff * skb,unsigned int cur_mss,int nonagle)2337 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2338 unsigned int cur_mss, int nonagle)
2339 {
2340 /* Nagle rule does not apply to frames, which sit in the middle of the
2341 * write_queue (they have no chances to get new data).
2342 *
2343 * This is implemented in the callers, where they modify the 'nonagle'
2344 * argument based upon the location of SKB in the send queue.
2345 */
2346 if (nonagle & TCP_NAGLE_PUSH)
2347 return true;
2348
2349 /* Don't use the nagle rule for urgent data (or for the final FIN). */
2350 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2351 return true;
2352
2353 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2354 return true;
2355
2356 return false;
2357 }
2358
2359 /* Does at least the first segment of SKB fit into the send window? */
tcp_snd_wnd_test(const struct tcp_sock * tp,const struct sk_buff * skb,unsigned int cur_mss)2360 static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2361 const struct sk_buff *skb,
2362 unsigned int cur_mss)
2363 {
2364 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2365
2366 if (skb->len > cur_mss)
2367 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2368
2369 return !after(end_seq, tcp_wnd_end(tp));
2370 }
2371
2372 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2373 * which is put after SKB on the list. It is very much like
2374 * tcp_fragment() except that it may make several kinds of assumptions
2375 * in order to speed up the splitting operation. In particular, we
2376 * know that all the data is in scatter-gather pages, and that the
2377 * packet has never been sent out before (and thus is not cloned).
2378 */
tso_fragment(struct sock * sk,struct sk_buff * skb,unsigned int len,unsigned int mss_now,gfp_t gfp)2379 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2380 unsigned int mss_now, gfp_t gfp)
2381 {
2382 int nlen = skb->len - len;
2383 struct sk_buff *buff;
2384 u16 flags;
2385
2386 /* All of a TSO frame must be composed of paged data. */
2387 DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len);
2388
2389 buff = tcp_stream_alloc_skb(sk, gfp, true);
2390 if (unlikely(!buff))
2391 return -ENOMEM;
2392 skb_copy_decrypted(buff, skb);
2393 mptcp_skb_ext_copy(buff, skb);
2394
2395 sk_wmem_queued_add(sk, buff->truesize);
2396 sk_mem_charge(sk, buff->truesize);
2397 buff->truesize += nlen;
2398 skb->truesize -= nlen;
2399
2400 /* Correct the sequence numbers. */
2401 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2402 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2403 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2404
2405 /* PSH and FIN should only be set in the second packet. */
2406 flags = TCP_SKB_CB(skb)->tcp_flags;
2407 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
2408 TCP_SKB_CB(buff)->tcp_flags = flags;
2409
2410 tcp_skb_fragment_eor(skb, buff);
2411
2412 skb_split(skb, buff, len);
2413 tcp_fragment_tstamp(skb, buff);
2414
2415 /* Fix up tso_factor for both original and new SKB. */
2416 tcp_set_skb_tso_segs(skb, mss_now);
2417 tcp_set_skb_tso_segs(buff, mss_now);
2418
2419 /* Link BUFF into the send queue. */
2420 __skb_header_release(buff);
2421 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2422
2423 return 0;
2424 }
2425
2426 /* Try to defer sending, if possible, in order to minimize the amount
2427 * of TSO splitting we do. View it as a kind of TSO Nagle test.
2428 *
2429 * This algorithm is from John Heffner.
2430 */
tcp_tso_should_defer(struct sock * sk,struct sk_buff * skb,bool * is_cwnd_limited,bool * is_rwnd_limited,u32 max_segs)2431 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2432 bool *is_cwnd_limited,
2433 bool *is_rwnd_limited,
2434 u32 max_segs)
2435 {
2436 const struct inet_connection_sock *icsk = inet_csk(sk);
2437 u32 send_win, cong_win, limit, in_flight, threshold;
2438 u64 srtt_in_ns, expected_ack, how_far_is_the_ack;
2439 struct tcp_sock *tp = tcp_sk(sk);
2440 struct sk_buff *head;
2441 int win_divisor;
2442 s64 delta;
2443
2444 if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2445 goto send_now;
2446
2447 /* Avoid bursty behavior by allowing defer
2448 * only if the last write was recent (1 ms).
2449 * Note that tp->tcp_wstamp_ns can be in the future if we have
2450 * packets waiting in a qdisc or device for EDT delivery.
2451 */
2452 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2453 if (delta > 0)
2454 goto send_now;
2455
2456 in_flight = tcp_packets_in_flight(tp);
2457
2458 BUG_ON(tcp_skb_pcount(skb) <= 1);
2459 BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
2460
2461 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2462
2463 /* From in_flight test above, we know that cwnd > in_flight. */
2464 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
2465
2466 limit = min(send_win, cong_win);
2467
2468 /* If a full-sized TSO skb can be sent, do it. */
2469 if (limit >= max_segs * tp->mss_cache)
2470 goto send_now;
2471
2472 /* Middle in queue won't get any more data, full sendable already? */
2473 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
2474 goto send_now;
2475
2476 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2477 if (win_divisor) {
2478 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
2479
2480 /* If at least some fraction of a window is available,
2481 * just use it.
2482 */
2483 chunk /= win_divisor;
2484 if (limit >= chunk)
2485 goto send_now;
2486 } else {
2487 /* Different approach, try not to defer past a single
2488 * ACK. Receiver should ACK every other full sized
2489 * frame, so if we have space for more than 3 frames
2490 * then send now.
2491 */
2492 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2493 goto send_now;
2494 }
2495
2496 /* TODO : use tsorted_sent_queue ? */
2497 head = tcp_rtx_queue_head(sk);
2498 if (!head)
2499 goto send_now;
2500
2501 srtt_in_ns = (u64)(NSEC_PER_USEC >> 3) * tp->srtt_us;
2502 /* When is the ACK expected ? */
2503 expected_ack = head->tstamp + srtt_in_ns;
2504 /* How far from now is the ACK expected ? */
2505 how_far_is_the_ack = expected_ack - tp->tcp_clock_cache;
2506
2507 /* If next ACK is likely to come too late,
2508 * ie in more than min(1ms, half srtt), do not defer.
2509 */
2510 threshold = min(srtt_in_ns >> 1, NSEC_PER_MSEC);
2511
2512 if ((s64)(how_far_is_the_ack - threshold) > 0)
2513 goto send_now;
2514
2515 /* Ok, it looks like it is advisable to defer.
2516 * Three cases are tracked :
2517 * 1) We are cwnd-limited
2518 * 2) We are rwnd-limited
2519 * 3) We are application limited.
2520 */
2521 if (cong_win < send_win) {
2522 if (cong_win <= skb->len) {
2523 *is_cwnd_limited = true;
2524 return true;
2525 }
2526 } else {
2527 if (send_win <= skb->len) {
2528 *is_rwnd_limited = true;
2529 return true;
2530 }
2531 }
2532
2533 /* If this packet won't get more data, do not wait. */
2534 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2535 TCP_SKB_CB(skb)->eor)
2536 goto send_now;
2537
2538 return true;
2539
2540 send_now:
2541 return false;
2542 }
2543
tcp_mtu_check_reprobe(struct sock * sk)2544 static inline void tcp_mtu_check_reprobe(struct sock *sk)
2545 {
2546 struct inet_connection_sock *icsk = inet_csk(sk);
2547 struct tcp_sock *tp = tcp_sk(sk);
2548 struct net *net = sock_net(sk);
2549 u32 interval;
2550 s32 delta;
2551
2552 interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
2553 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
2554 if (unlikely(delta >= interval * HZ)) {
2555 int mss = tcp_current_mss(sk);
2556
2557 /* Update current search range */
2558 icsk->icsk_mtup.probe_size = 0;
2559 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
2560 sizeof(struct tcphdr) +
2561 icsk->icsk_af_ops->net_header_len;
2562 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
2563
2564 /* Update probe time stamp */
2565 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2566 }
2567 }
2568
tcp_can_coalesce_send_queue_head(struct sock * sk,int len)2569 static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2570 {
2571 struct sk_buff *skb, *next;
2572
2573 skb = tcp_send_head(sk);
2574 tcp_for_write_queue_from_safe(skb, next, sk) {
2575 if (len <= skb->len)
2576 break;
2577
2578 if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next))
2579 return false;
2580
2581 len -= skb->len;
2582 }
2583
2584 return true;
2585 }
2586
tcp_clone_payload(struct sock * sk,struct sk_buff * to,int probe_size)2587 static int tcp_clone_payload(struct sock *sk, struct sk_buff *to,
2588 int probe_size)
2589 {
2590 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags;
2591 int i, todo, len = 0, nr_frags = 0;
2592 const struct sk_buff *skb;
2593
2594 if (!sk_wmem_schedule(sk, to->truesize + probe_size))
2595 return -ENOMEM;
2596
2597 skb_queue_walk(&sk->sk_write_queue, skb) {
2598 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags;
2599
2600 if (skb_headlen(skb))
2601 return -EINVAL;
2602
2603 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) {
2604 if (len >= probe_size)
2605 goto commit;
2606 todo = min_t(int, skb_frag_size(fragfrom),
2607 probe_size - len);
2608 len += todo;
2609 if (lastfrag &&
2610 skb_frag_page(fragfrom) == skb_frag_page(lastfrag) &&
2611 skb_frag_off(fragfrom) == skb_frag_off(lastfrag) +
2612 skb_frag_size(lastfrag)) {
2613 skb_frag_size_add(lastfrag, todo);
2614 continue;
2615 }
2616 if (unlikely(nr_frags == MAX_SKB_FRAGS))
2617 return -E2BIG;
2618 skb_frag_page_copy(fragto, fragfrom);
2619 skb_frag_off_copy(fragto, fragfrom);
2620 skb_frag_size_set(fragto, todo);
2621 nr_frags++;
2622 lastfrag = fragto++;
2623 }
2624 }
2625 commit:
2626 WARN_ON_ONCE(len != probe_size);
2627 for (i = 0; i < nr_frags; i++)
2628 skb_frag_ref(to, i);
2629
2630 skb_shinfo(to)->nr_frags = nr_frags;
2631 to->truesize += probe_size;
2632 to->len += probe_size;
2633 to->data_len += probe_size;
2634 __skb_header_release(to);
2635 return 0;
2636 }
2637
2638 /* tcp_mtu_probe() and tcp_grow_skb() can both eat an skb (src) if
2639 * all its payload was moved to another one (dst).
2640 * Make sure to transfer tcp_flags, eor, and tstamp.
2641 */
tcp_eat_one_skb(struct sock * sk,struct sk_buff * dst,struct sk_buff * src)2642 static void tcp_eat_one_skb(struct sock *sk,
2643 struct sk_buff *dst,
2644 struct sk_buff *src)
2645 {
2646 TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags;
2647 TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor;
2648 tcp_skb_collapse_tstamp(dst, src);
2649 tcp_unlink_write_queue(src, sk);
2650 tcp_wmem_free_skb(sk, src);
2651 }
2652
2653 /* Create a new MTU probe if we are ready.
2654 * MTU probe is regularly attempting to increase the path MTU by
2655 * deliberately sending larger packets. This discovers routing
2656 * changes resulting in larger path MTUs.
2657 *
2658 * Returns 0 if we should wait to probe (no cwnd available),
2659 * 1 if a probe was sent,
2660 * -1 otherwise
2661 */
tcp_mtu_probe(struct sock * sk)2662 static int tcp_mtu_probe(struct sock *sk)
2663 {
2664 struct inet_connection_sock *icsk = inet_csk(sk);
2665 struct tcp_sock *tp = tcp_sk(sk);
2666 struct sk_buff *skb, *nskb, *next;
2667 struct net *net = sock_net(sk);
2668 int probe_size;
2669 int size_needed;
2670 int copy, len;
2671 int mss_now;
2672 int interval;
2673
2674 /* Not currently probing/verifying,
2675 * not in recovery,
2676 * have enough cwnd, and
2677 * not SACKing (the variable headers throw things off)
2678 */
2679 if (likely(!icsk->icsk_mtup.enabled ||
2680 icsk->icsk_mtup.probe_size ||
2681 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
2682 tcp_snd_cwnd(tp) < 11 ||
2683 tp->rx_opt.num_sacks || tp->rx_opt.dsack))
2684 return -1;
2685
2686 /* Use binary search for probe_size between tcp_mss_base,
2687 * and current mss_clamp. if (search_high - search_low)
2688 * smaller than a threshold, backoff from probing.
2689 */
2690 mss_now = tcp_current_mss(sk);
2691 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
2692 icsk->icsk_mtup.search_low) >> 1);
2693 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
2694 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
2695 /* When misfortune happens, we are reprobing actively,
2696 * and then reprobe timer has expired. We stick with current
2697 * probing process by not resetting search range to its orignal.
2698 */
2699 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
2700 interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
2701 /* Check whether enough time has elaplased for
2702 * another round of probing.
2703 */
2704 tcp_mtu_check_reprobe(sk);
2705 return -1;
2706 }
2707
2708 /* Have enough data in the send queue to probe? */
2709 if (tp->write_seq - tp->snd_nxt < size_needed)
2710 return -1;
2711
2712 if (tp->snd_wnd < size_needed)
2713 return -1;
2714 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
2715 return 0;
2716
2717 /* Do we need to wait to drain cwnd? With none in flight, don't stall */
2718 if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
2719 if (!tcp_packets_in_flight(tp))
2720 return -1;
2721 else
2722 return 0;
2723 }
2724
2725 if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2726 return -1;
2727
2728 /* We're allowed to probe. Build it now. */
2729 nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false);
2730 if (!nskb)
2731 return -1;
2732
2733 /* build the payload, and be prepared to abort if this fails. */
2734 if (tcp_clone_payload(sk, nskb, probe_size)) {
2735 tcp_skb_tsorted_anchor_cleanup(nskb);
2736 consume_skb(nskb);
2737 return -1;
2738 }
2739 sk_wmem_queued_add(sk, nskb->truesize);
2740 sk_mem_charge(sk, nskb->truesize);
2741
2742 skb = tcp_send_head(sk);
2743 skb_copy_decrypted(nskb, skb);
2744 mptcp_skb_ext_copy(nskb, skb);
2745
2746 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2747 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2748 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
2749
2750 tcp_insert_write_queue_before(nskb, skb, sk);
2751 tcp_highest_sack_replace(sk, skb, nskb);
2752
2753 len = 0;
2754 tcp_for_write_queue_from_safe(skb, next, sk) {
2755 copy = min_t(int, skb->len, probe_size - len);
2756
2757 if (skb->len <= copy) {
2758 tcp_eat_one_skb(sk, nskb, skb);
2759 } else {
2760 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2761 ~(TCPHDR_FIN|TCPHDR_PSH);
2762 __pskb_trim_head(skb, copy);
2763 tcp_set_skb_tso_segs(skb, mss_now);
2764 TCP_SKB_CB(skb)->seq += copy;
2765 }
2766
2767 len += copy;
2768
2769 if (len >= probe_size)
2770 break;
2771 }
2772 tcp_init_tso_segs(nskb, nskb->len);
2773
2774 /* We're ready to send. If this fails, the probe will
2775 * be resegmented into mss-sized pieces by tcp_write_xmit().
2776 */
2777 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2778 /* Decrement cwnd here because we are sending
2779 * effectively two packets. */
2780 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
2781 tcp_event_new_data_sent(sk, nskb);
2782
2783 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2784 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2785 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2786
2787 return 1;
2788 }
2789
2790 return -1;
2791 }
2792
tcp_pacing_check(struct sock * sk)2793 static bool tcp_pacing_check(struct sock *sk)
2794 {
2795 struct tcp_sock *tp = tcp_sk(sk);
2796
2797 if (!tcp_needs_internal_pacing(sk))
2798 return false;
2799
2800 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2801 return false;
2802
2803 if (!hrtimer_is_queued(&tp->pacing_timer)) {
2804 hrtimer_start(&tp->pacing_timer,
2805 ns_to_ktime(tp->tcp_wstamp_ns),
2806 HRTIMER_MODE_ABS_PINNED_SOFT);
2807 sock_hold(sk);
2808 }
2809 return true;
2810 }
2811
tcp_rtx_queue_empty_or_single_skb(const struct sock * sk)2812 static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
2813 {
2814 const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
2815
2816 /* No skb in the rtx queue. */
2817 if (!node)
2818 return true;
2819
2820 /* Only one skb in rtx queue. */
2821 return !node->rb_left && !node->rb_right;
2822 }
2823
2824 /* TCP Small Queues :
2825 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2826 * (These limits are doubled for retransmits)
2827 * This allows for :
2828 * - better RTT estimation and ACK scheduling
2829 * - faster recovery
2830 * - high rates
2831 * Alas, some drivers / subsystems require a fair amount
2832 * of queued bytes to ensure line rate.
2833 * One example is wifi aggregation (802.11 AMPDU)
2834 */
tcp_small_queue_check(struct sock * sk,const struct sk_buff * skb,unsigned int factor)2835 static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2836 unsigned int factor)
2837 {
2838 unsigned long limit;
2839
2840 limit = max_t(unsigned long,
2841 2 * skb->truesize,
2842 READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
2843 limit = min_t(unsigned long, limit,
2844 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
2845 limit <<= factor;
2846
2847 if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2848 tcp_sk(sk)->tcp_tx_delay) {
2849 u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
2850 tcp_sk(sk)->tcp_tx_delay;
2851
2852 /* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2853 * approximate our needs assuming an ~100% skb->truesize overhead.
2854 * USEC_PER_SEC is approximated by 2^20.
2855 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2856 */
2857 extra_bytes >>= (20 - 1);
2858 limit += extra_bytes;
2859 }
2860 if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2861 /* Always send skb if rtx queue is empty or has one skb.
2862 * No need to wait for TX completion to call us back,
2863 * after softirq schedule.
2864 * This helps when TX completions are delayed too much.
2865 */
2866 if (tcp_rtx_queue_empty_or_single_skb(sk))
2867 return false;
2868
2869 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2870 /* It is possible TX completion already happened
2871 * before we set TSQ_THROTTLED, so we must
2872 * test again the condition.
2873 */
2874 smp_mb__after_atomic();
2875 if (refcount_read(&sk->sk_wmem_alloc) > limit)
2876 return true;
2877 }
2878 return false;
2879 }
2880
tcp_chrono_set(struct tcp_sock * tp,const enum tcp_chrono new)2881 static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2882 {
2883 const u32 now = tcp_jiffies32;
2884 enum tcp_chrono old = tp->chrono_type;
2885
2886 if (old > TCP_CHRONO_UNSPEC)
2887 tp->chrono_stat[old - 1] += now - tp->chrono_start;
2888 tp->chrono_start = now;
2889 tp->chrono_type = new;
2890 }
2891
tcp_chrono_start(struct sock * sk,const enum tcp_chrono type)2892 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2893 {
2894 struct tcp_sock *tp = tcp_sk(sk);
2895
2896 /* If there are multiple conditions worthy of tracking in a
2897 * chronograph then the highest priority enum takes precedence
2898 * over the other conditions. So that if something "more interesting"
2899 * starts happening, stop the previous chrono and start a new one.
2900 */
2901 if (type > tp->chrono_type)
2902 tcp_chrono_set(tp, type);
2903 }
2904
tcp_chrono_stop(struct sock * sk,const enum tcp_chrono type)2905 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2906 {
2907 struct tcp_sock *tp = tcp_sk(sk);
2908
2909
2910 /* There are multiple conditions worthy of tracking in a
2911 * chronograph, so that the highest priority enum takes
2912 * precedence over the other conditions (see tcp_chrono_start).
2913 * If a condition stops, we only stop chrono tracking if
2914 * it's the "most interesting" or current chrono we are
2915 * tracking and starts busy chrono if we have pending data.
2916 */
2917 if (tcp_rtx_and_write_queues_empty(sk))
2918 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2919 else if (type == tp->chrono_type)
2920 tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2921 }
2922
2923 /* First skb in the write queue is smaller than ideal packet size.
2924 * Check if we can move payload from the second skb in the queue.
2925 */
tcp_grow_skb(struct sock * sk,struct sk_buff * skb,int amount)2926 static void tcp_grow_skb(struct sock *sk, struct sk_buff *skb, int amount)
2927 {
2928 struct sk_buff *next_skb = skb->next;
2929 unsigned int nlen;
2930
2931 if (tcp_skb_is_last(sk, skb))
2932 return;
2933
2934 if (!tcp_skb_can_collapse(skb, next_skb))
2935 return;
2936
2937 nlen = min_t(u32, amount, next_skb->len);
2938 if (!nlen || !skb_shift(skb, next_skb, nlen))
2939 return;
2940
2941 TCP_SKB_CB(skb)->end_seq += nlen;
2942 TCP_SKB_CB(next_skb)->seq += nlen;
2943
2944 if (!next_skb->len) {
2945 /* In case FIN is set, we need to update end_seq */
2946 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2947
2948 tcp_eat_one_skb(sk, skb, next_skb);
2949 }
2950 }
2951
2952 /* This routine writes packets to the network. It advances the
2953 * send_head. This happens as incoming acks open up the remote
2954 * window for us.
2955 *
2956 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2957 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2958 * account rare use of URG, this is not a big flaw.
2959 *
2960 * Send at most one packet when push_one > 0. Temporarily ignore
2961 * cwnd limit to force at most one packet out when push_one == 2.
2962
2963 * Returns true, if no segments are in flight and we have queued segments,
2964 * but cannot send anything now because of SWS or another problem.
2965 */
tcp_write_xmit(struct sock * sk,unsigned int mss_now,int nonagle,int push_one,gfp_t gfp)2966 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2967 int push_one, gfp_t gfp)
2968 {
2969 struct tcp_sock *tp = tcp_sk(sk);
2970 struct sk_buff *skb;
2971 unsigned int tso_segs, sent_pkts;
2972 u32 cwnd_quota, max_segs;
2973 int result;
2974 bool is_cwnd_limited = false, is_rwnd_limited = false;
2975
2976 sent_pkts = 0;
2977
2978 tcp_mstamp_refresh(tp);
2979
2980 /* AccECN option beacon depends on mstamp, it may change mss */
2981 if (tcp_ecn_mode_accecn(tp) && tcp_accecn_option_beacon_check(sk))
2982 mss_now = tcp_current_mss(sk);
2983
2984 if (!push_one) {
2985 /* Do MTU probing. */
2986 result = tcp_mtu_probe(sk);
2987 if (!result) {
2988 return false;
2989 } else if (result > 0) {
2990 sent_pkts = 1;
2991 }
2992 }
2993
2994 max_segs = tcp_tso_segs(sk, mss_now);
2995 while ((skb = tcp_send_head(sk))) {
2996 unsigned int limit;
2997 int missing_bytes;
2998
2999 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
3000 /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
3001 tp->tcp_wstamp_ns = tp->tcp_clock_cache;
3002 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
3003 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
3004 tcp_init_tso_segs(skb, mss_now);
3005 goto repair; /* Skip network transmission */
3006 }
3007
3008 if (tcp_pacing_check(sk))
3009 break;
3010
3011 cwnd_quota = tcp_cwnd_test(tp);
3012 if (!cwnd_quota) {
3013 if (push_one == 2)
3014 /* Force out a loss probe pkt. */
3015 cwnd_quota = 1;
3016 else
3017 break;
3018 }
3019 cwnd_quota = min(cwnd_quota, max_segs);
3020 missing_bytes = cwnd_quota * mss_now - skb->len;
3021 if (missing_bytes > 0)
3022 tcp_grow_skb(sk, skb, missing_bytes);
3023
3024 tso_segs = tcp_set_skb_tso_segs(skb, mss_now);
3025
3026 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
3027 is_rwnd_limited = true;
3028 break;
3029 }
3030
3031 if (tso_segs == 1) {
3032 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
3033 (tcp_skb_is_last(sk, skb) ?
3034 nonagle : TCP_NAGLE_PUSH))))
3035 break;
3036 } else {
3037 if (!push_one &&
3038 tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
3039 &is_rwnd_limited, max_segs))
3040 break;
3041 }
3042
3043 limit = mss_now;
3044 if (tso_segs > 1 && !tcp_urg_mode(tp))
3045 limit = tcp_mss_split_point(sk, skb, mss_now,
3046 cwnd_quota,
3047 nonagle);
3048
3049 if (skb->len > limit &&
3050 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
3051 break;
3052
3053 if (tcp_small_queue_check(sk, skb, 0))
3054 break;
3055
3056 /* Argh, we hit an empty skb(), presumably a thread
3057 * is sleeping in sendmsg()/sk_stream_wait_memory().
3058 * We do not want to send a pure-ack packet and have
3059 * a strange looking rtx queue with empty packet(s).
3060 */
3061 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
3062 break;
3063
3064 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
3065 break;
3066
3067 repair:
3068 /* Advance the send_head. This one is sent out.
3069 * This call will increment packets_out.
3070 */
3071 tcp_event_new_data_sent(sk, skb);
3072
3073 tcp_minshall_update(tp, mss_now, skb);
3074 sent_pkts += tcp_skb_pcount(skb);
3075
3076 if (push_one)
3077 break;
3078 }
3079
3080 if (is_rwnd_limited)
3081 tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
3082 else
3083 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
3084
3085 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
3086 if (likely(sent_pkts || is_cwnd_limited))
3087 tcp_cwnd_validate(sk, is_cwnd_limited);
3088
3089 if (likely(sent_pkts)) {
3090 if (tcp_in_cwnd_reduction(sk))
3091 tp->prr_out += sent_pkts;
3092
3093 /* Send one loss probe per tail loss episode. */
3094 if (push_one != 2)
3095 tcp_schedule_loss_probe(sk, false);
3096 return false;
3097 }
3098 return !tp->packets_out && !tcp_write_queue_empty(sk);
3099 }
3100
tcp_schedule_loss_probe(struct sock * sk,bool advancing_rto)3101 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
3102 {
3103 struct inet_connection_sock *icsk = inet_csk(sk);
3104 struct tcp_sock *tp = tcp_sk(sk);
3105 u32 timeout, timeout_us, rto_delta_us;
3106 int early_retrans;
3107
3108 /* Don't do any loss probe on a Fast Open connection before 3WHS
3109 * finishes.
3110 */
3111 if (rcu_access_pointer(tp->fastopen_rsk))
3112 return false;
3113
3114 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
3115 /* Schedule a loss probe in 2*RTT for SACK capable connections
3116 * not in loss recovery, that are either limited by cwnd or application.
3117 */
3118 if ((early_retrans != 3 && early_retrans != 4) ||
3119 !tp->packets_out || !tcp_is_sack(tp) ||
3120 (icsk->icsk_ca_state != TCP_CA_Open &&
3121 icsk->icsk_ca_state != TCP_CA_CWR))
3122 return false;
3123
3124 /* Probe timeout is 2*rtt. Add minimum RTO to account
3125 * for delayed ack when there's one outstanding packet. If no RTT
3126 * sample is available then probe after TCP_TIMEOUT_INIT.
3127 */
3128 if (tp->srtt_us) {
3129 timeout_us = tp->srtt_us >> 2;
3130 if (tp->packets_out == 1)
3131 timeout_us += tcp_rto_min_us(sk);
3132 else
3133 timeout_us += TCP_TIMEOUT_MIN_US;
3134 timeout = usecs_to_jiffies(timeout_us);
3135 } else {
3136 timeout = TCP_TIMEOUT_INIT;
3137 }
3138
3139 /* If the RTO formula yields an earlier time, then use that time. */
3140 rto_delta_us = advancing_rto ?
3141 jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
3142 tcp_rto_delta_us(sk); /* How far in future is RTO? */
3143 if (rto_delta_us > 0)
3144 timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
3145
3146 tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, true);
3147 return true;
3148 }
3149
3150 /* Thanks to skb fast clones, we can detect if a prior transmit of
3151 * a packet is still in a qdisc or driver queue.
3152 * In this case, there is very little point doing a retransmit !
3153 */
skb_still_in_host_queue(struct sock * sk,const struct sk_buff * skb)3154 static bool skb_still_in_host_queue(struct sock *sk,
3155 const struct sk_buff *skb)
3156 {
3157 if (unlikely(skb_fclone_busy(sk, skb))) {
3158 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
3159 smp_mb__after_atomic();
3160 if (skb_fclone_busy(sk, skb)) {
3161 NET_INC_STATS(sock_net(sk),
3162 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
3163 return true;
3164 }
3165 }
3166 return false;
3167 }
3168
3169 /* When probe timeout (PTO) fires, try send a new segment if possible, else
3170 * retransmit the last segment.
3171 */
tcp_send_loss_probe(struct sock * sk)3172 void tcp_send_loss_probe(struct sock *sk)
3173 {
3174 struct tcp_sock *tp = tcp_sk(sk);
3175 struct sk_buff *skb;
3176 int pcount;
3177 int mss = tcp_current_mss(sk);
3178
3179 /* At most one outstanding TLP */
3180 if (tp->tlp_high_seq)
3181 goto rearm_timer;
3182
3183 tp->tlp_retrans = 0;
3184 skb = tcp_send_head(sk);
3185 if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
3186 pcount = tp->packets_out;
3187 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
3188 if (tp->packets_out > pcount)
3189 goto probe_sent;
3190 goto rearm_timer;
3191 }
3192 skb = skb_rb_last(&sk->tcp_rtx_queue);
3193 if (unlikely(!skb)) {
3194 tcp_warn_once(sk, tp->packets_out, "invalid inflight: ");
3195 smp_store_release(&inet_csk(sk)->icsk_pending, 0);
3196 return;
3197 }
3198
3199 if (skb_still_in_host_queue(sk, skb))
3200 goto rearm_timer;
3201
3202 pcount = tcp_skb_pcount(skb);
3203 if (WARN_ON(!pcount))
3204 goto rearm_timer;
3205
3206 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
3207 if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
3208 (pcount - 1) * mss, mss,
3209 GFP_ATOMIC)))
3210 goto rearm_timer;
3211 skb = skb_rb_next(skb);
3212 }
3213
3214 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
3215 goto rearm_timer;
3216
3217 if (__tcp_retransmit_skb(sk, skb, 1))
3218 goto rearm_timer;
3219
3220 tp->tlp_retrans = 1;
3221
3222 probe_sent:
3223 /* Record snd_nxt for loss detection. */
3224 tp->tlp_high_seq = tp->snd_nxt;
3225
3226 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
3227 /* Reset s.t. tcp_rearm_rto will restart timer from now */
3228 smp_store_release(&inet_csk(sk)->icsk_pending, 0);
3229 rearm_timer:
3230 tcp_rearm_rto(sk);
3231 }
3232
3233 /* Push out any pending frames which were held back due to
3234 * TCP_CORK or attempt at coalescing tiny packets.
3235 * The socket must be locked by the caller.
3236 */
__tcp_push_pending_frames(struct sock * sk,unsigned int cur_mss,int nonagle)3237 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
3238 int nonagle)
3239 {
3240 /* If we are closed, the bytes will have to remain here.
3241 * In time closedown will finish, we empty the write queue and
3242 * all will be happy.
3243 */
3244 if (unlikely(sk->sk_state == TCP_CLOSE))
3245 return;
3246
3247 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
3248 sk_gfp_mask(sk, GFP_ATOMIC)))
3249 tcp_check_probe_timer(sk);
3250 }
3251
3252 /* Send _single_ skb sitting at the send head. This function requires
3253 * true push pending frames to setup probe timer etc.
3254 */
tcp_push_one(struct sock * sk,unsigned int mss_now)3255 void tcp_push_one(struct sock *sk, unsigned int mss_now)
3256 {
3257 struct sk_buff *skb = tcp_send_head(sk);
3258
3259 BUG_ON(!skb || skb->len < mss_now);
3260
3261 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
3262 }
3263
3264 /* This function returns the amount that we can raise the
3265 * usable window based on the following constraints
3266 *
3267 * 1. The window can never be shrunk once it is offered (RFC 793)
3268 * 2. We limit memory per socket
3269 *
3270 * RFC 1122:
3271 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
3272 * RECV.NEXT + RCV.WIN fixed until:
3273 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
3274 *
3275 * i.e. don't raise the right edge of the window until you can raise
3276 * it at least MSS bytes.
3277 *
3278 * Unfortunately, the recommended algorithm breaks header prediction,
3279 * since header prediction assumes th->window stays fixed.
3280 *
3281 * Strictly speaking, keeping th->window fixed violates the receiver
3282 * side SWS prevention criteria. The problem is that under this rule
3283 * a stream of single byte packets will cause the right side of the
3284 * window to always advance by a single byte.
3285 *
3286 * Of course, if the sender implements sender side SWS prevention
3287 * then this will not be a problem.
3288 *
3289 * BSD seems to make the following compromise:
3290 *
3291 * If the free space is less than the 1/4 of the maximum
3292 * space available and the free space is less than 1/2 mss,
3293 * then set the window to 0.
3294 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
3295 * Otherwise, just prevent the window from shrinking
3296 * and from being larger than the largest representable value.
3297 *
3298 * This prevents incremental opening of the window in the regime
3299 * where TCP is limited by the speed of the reader side taking
3300 * data out of the TCP receive queue. It does nothing about
3301 * those cases where the window is constrained on the sender side
3302 * because the pipeline is full.
3303 *
3304 * BSD also seems to "accidentally" limit itself to windows that are a
3305 * multiple of MSS, at least until the free space gets quite small.
3306 * This would appear to be a side effect of the mbuf implementation.
3307 * Combining these two algorithms results in the observed behavior
3308 * of having a fixed window size at almost all times.
3309 *
3310 * Below we obtain similar behavior by forcing the offered window to
3311 * a multiple of the mss when it is feasible to do so.
3312 *
3313 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
3314 * Regular options like TIMESTAMP are taken into account.
3315 */
__tcp_select_window(struct sock * sk)3316 u32 __tcp_select_window(struct sock *sk)
3317 {
3318 struct inet_connection_sock *icsk = inet_csk(sk);
3319 struct tcp_sock *tp = tcp_sk(sk);
3320 struct net *net = sock_net(sk);
3321 /* MSS for the peer's data. Previous versions used mss_clamp
3322 * here. I don't know if the value based on our guesses
3323 * of peer's MSS is better for the performance. It's more correct
3324 * but may be worse for the performance because of rcv_mss
3325 * fluctuations. --SAW 1998/11/1
3326 */
3327 int mss = icsk->icsk_ack.rcv_mss;
3328 int free_space = tcp_space(sk);
3329 int allowed_space = tcp_full_space(sk);
3330 int full_space, window;
3331
3332 if (sk_is_mptcp(sk))
3333 mptcp_space(sk, &free_space, &allowed_space);
3334
3335 full_space = min_t(int, tp->window_clamp, allowed_space);
3336
3337 if (unlikely(mss > full_space)) {
3338 mss = full_space;
3339 if (mss <= 0)
3340 return 0;
3341 }
3342
3343 /* Only allow window shrink if the sysctl is enabled and we have
3344 * a non-zero scaling factor in effect.
3345 */
3346 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale)
3347 goto shrink_window_allowed;
3348
3349 /* do not allow window to shrink */
3350
3351 if (free_space < (full_space >> 1)) {
3352 icsk->icsk_ack.quick = 0;
3353
3354 if (tcp_under_memory_pressure(sk))
3355 tcp_adjust_rcv_ssthresh(sk);
3356
3357 /* free_space might become our new window, make sure we don't
3358 * increase it due to wscale.
3359 */
3360 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3361
3362 /* if free space is less than mss estimate, or is below 1/16th
3363 * of the maximum allowed, try to move to zero-window, else
3364 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
3365 * new incoming data is dropped due to memory limits.
3366 * With large window, mss test triggers way too late in order
3367 * to announce zero window in time before rmem limit kicks in.
3368 */
3369 if (free_space < (allowed_space >> 4) || free_space < mss)
3370 return 0;
3371 }
3372
3373 if (free_space > tp->rcv_ssthresh)
3374 free_space = tp->rcv_ssthresh;
3375
3376 /* Don't do rounding if we are using window scaling, since the
3377 * scaled window will not line up with the MSS boundary anyway.
3378 */
3379 if (tp->rx_opt.rcv_wscale) {
3380 window = free_space;
3381
3382 /* Advertise enough space so that it won't get scaled away.
3383 * Import case: prevent zero window announcement if
3384 * 1<<rcv_wscale > mss.
3385 */
3386 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
3387 } else {
3388 window = tp->rcv_wnd;
3389 /* Get the largest window that is a nice multiple of mss.
3390 * Window clamp already applied above.
3391 * If our current window offering is within 1 mss of the
3392 * free space we just keep it. This prevents the divide
3393 * and multiply from happening most of the time.
3394 * We also don't do any window rounding when the free space
3395 * is too small.
3396 */
3397 if (window <= free_space - mss || window > free_space)
3398 window = rounddown(free_space, mss);
3399 else if (mss == full_space &&
3400 free_space > window + (full_space >> 1))
3401 window = free_space;
3402 }
3403
3404 return window;
3405
3406 shrink_window_allowed:
3407 /* new window should always be an exact multiple of scaling factor */
3408 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3409
3410 if (free_space < (full_space >> 1)) {
3411 icsk->icsk_ack.quick = 0;
3412
3413 if (tcp_under_memory_pressure(sk))
3414 tcp_adjust_rcv_ssthresh(sk);
3415
3416 /* if free space is too low, return a zero window */
3417 if (free_space < (allowed_space >> 4) || free_space < mss ||
3418 free_space < (1 << tp->rx_opt.rcv_wscale))
3419 return 0;
3420 }
3421
3422 if (free_space > tp->rcv_ssthresh) {
3423 free_space = tp->rcv_ssthresh;
3424 /* new window should always be an exact multiple of scaling factor
3425 *
3426 * For this case, we ALIGN "up" (increase free_space) because
3427 * we know free_space is not zero here, it has been reduced from
3428 * the memory-based limit, and rcv_ssthresh is not a hard limit
3429 * (unlike sk_rcvbuf).
3430 */
3431 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale));
3432 }
3433
3434 return free_space;
3435 }
3436
tcp_skb_collapse_tstamp(struct sk_buff * skb,const struct sk_buff * next_skb)3437 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3438 const struct sk_buff *next_skb)
3439 {
3440 if (unlikely(tcp_has_tx_tstamp(next_skb))) {
3441 const struct skb_shared_info *next_shinfo =
3442 skb_shinfo(next_skb);
3443 struct skb_shared_info *shinfo = skb_shinfo(skb);
3444
3445 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3446 shinfo->tskey = next_shinfo->tskey;
3447 TCP_SKB_CB(skb)->txstamp_ack |=
3448 TCP_SKB_CB(next_skb)->txstamp_ack;
3449 }
3450 }
3451
3452 /* Collapses two adjacent SKB's during retransmission. */
tcp_collapse_retrans(struct sock * sk,struct sk_buff * skb)3453 static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
3454 {
3455 struct tcp_sock *tp = tcp_sk(sk);
3456 struct sk_buff *next_skb = skb_rb_next(skb);
3457 int next_skb_size;
3458
3459 next_skb_size = next_skb->len;
3460
3461 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
3462
3463 if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3464 return false;
3465
3466 tcp_highest_sack_replace(sk, next_skb, skb);
3467
3468 /* Update sequence range on original skb. */
3469 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
3470
3471 /* Merge over control information. This moves PSH/FIN etc. over */
3472 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
3473
3474 /* All done, get rid of second SKB and account for it so
3475 * packet counting does not break.
3476 */
3477 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3478 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3479
3480 /* changed transmit queue under us so clear hints */
3481 if (next_skb == tp->retransmit_skb_hint)
3482 tp->retransmit_skb_hint = skb;
3483
3484 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3485
3486 tcp_skb_collapse_tstamp(skb, next_skb);
3487
3488 tcp_rtx_queue_unlink_and_free(next_skb, sk);
3489 return true;
3490 }
3491
3492 /* Check if coalescing SKBs is legal. */
tcp_can_collapse(const struct sock * sk,const struct sk_buff * skb)3493 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
3494 {
3495 if (tcp_skb_pcount(skb) > 1)
3496 return false;
3497 if (skb_cloned(skb))
3498 return false;
3499 if (!skb_frags_readable(skb))
3500 return false;
3501 /* Some heuristics for collapsing over SACK'd could be invented */
3502 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3503 return false;
3504
3505 return true;
3506 }
3507
3508 /* Collapse packets in the retransmit queue to make to create
3509 * less packets on the wire. This is only done on retransmission.
3510 */
tcp_retrans_try_collapse(struct sock * sk,struct sk_buff * to,int space)3511 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
3512 int space)
3513 {
3514 struct tcp_sock *tp = tcp_sk(sk);
3515 struct sk_buff *skb = to, *tmp;
3516 bool first = true;
3517
3518 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
3519 return;
3520 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3521 return;
3522
3523 skb_rbtree_walk_from_safe(skb, tmp) {
3524 if (!tcp_can_collapse(sk, skb))
3525 break;
3526
3527 if (!tcp_skb_can_collapse(to, skb))
3528 break;
3529
3530 space -= skb->len;
3531
3532 if (first) {
3533 first = false;
3534 continue;
3535 }
3536
3537 if (space < 0)
3538 break;
3539
3540 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
3541 break;
3542
3543 if (!tcp_collapse_retrans(sk, to))
3544 break;
3545 }
3546 }
3547
3548 /* This retransmits one SKB. Policy decisions and retransmit queue
3549 * state updates are done by the caller. Returns non-zero if an
3550 * error occurred which prevented the send.
3551 */
__tcp_retransmit_skb(struct sock * sk,struct sk_buff * skb,int segs)3552 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3553 {
3554 struct inet_connection_sock *icsk = inet_csk(sk);
3555 struct tcp_sock *tp = tcp_sk(sk);
3556 unsigned int cur_mss;
3557 int diff, len, err;
3558 int avail_wnd;
3559
3560 /* Inconclusive MTU probe */
3561 if (icsk->icsk_mtup.probe_size)
3562 icsk->icsk_mtup.probe_size = 0;
3563
3564 if (skb_still_in_host_queue(sk, skb)) {
3565 err = -EBUSY;
3566 goto out;
3567 }
3568
3569 start:
3570 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3571 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3572 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
3573 TCP_SKB_CB(skb)->seq++;
3574 goto start;
3575 }
3576 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3577 WARN_ON_ONCE(1);
3578 err = -EINVAL;
3579 goto out;
3580 }
3581 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) {
3582 err = -ENOMEM;
3583 goto out;
3584 }
3585 }
3586
3587 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) {
3588 err = -EHOSTUNREACH; /* Routing failure or similar. */
3589 goto out;
3590 }
3591
3592 cur_mss = tcp_current_mss(sk);
3593 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3594
3595 /* If receiver has shrunk his window, and skb is out of
3596 * new window, do not retransmit it. The exception is the
3597 * case, when window is shrunk to zero. In this case
3598 * our retransmit of one segment serves as a zero window probe.
3599 */
3600 if (avail_wnd <= 0) {
3601 if (TCP_SKB_CB(skb)->seq != tp->snd_una) {
3602 err = -EAGAIN;
3603 goto out;
3604 }
3605 avail_wnd = cur_mss;
3606 }
3607
3608 len = cur_mss * segs;
3609 if (len > avail_wnd) {
3610 len = rounddown(avail_wnd, cur_mss);
3611 if (!len)
3612 len = avail_wnd;
3613 }
3614 if (skb->len > len) {
3615 if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
3616 cur_mss, GFP_ATOMIC)) {
3617 err = -ENOMEM; /* We'll try again later. */
3618 goto out;
3619 }
3620 } else {
3621 if (skb_unclone_keeptruesize(skb, GFP_ATOMIC)) {
3622 err = -ENOMEM;
3623 goto out;
3624 }
3625
3626 diff = tcp_skb_pcount(skb);
3627 tcp_set_skb_tso_segs(skb, cur_mss);
3628 diff -= tcp_skb_pcount(skb);
3629 if (diff)
3630 tcp_adjust_pcount(sk, skb, diff);
3631 avail_wnd = min_t(int, avail_wnd, cur_mss);
3632 if (skb->len < avail_wnd)
3633 tcp_retrans_try_collapse(sk, skb, avail_wnd);
3634 }
3635
3636 if (!tcp_ecn_mode_pending(tp) || icsk->icsk_retransmits > 1) {
3637 /* RFC3168, section 6.1.1.1. ECN fallback
3638 * As AccECN uses the same SYN flags (+ AE), this check
3639 * covers both cases.
3640 */
3641 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) ==
3642 TCPHDR_SYN_ECN)
3643 tcp_ecn_clear_syn(sk, skb);
3644 }
3645
3646 /* Update global and local TCP statistics. */
3647 segs = tcp_skb_pcount(skb);
3648 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3649 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3650 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3651 tp->total_retrans += segs;
3652 tp->bytes_retrans += skb->len;
3653
3654 /* make sure skb->data is aligned on arches that require it
3655 * and check if ack-trimming & collapsing extended the headroom
3656 * beyond what csum_start can cover.
3657 */
3658 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3659 skb_headroom(skb) >= 0xFFFF)) {
3660 struct sk_buff *nskb;
3661
3662 tcp_skb_tsorted_save(skb) {
3663 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3664 if (nskb) {
3665 nskb->dev = NULL;
3666 err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
3667 } else {
3668 err = -ENOBUFS;
3669 }
3670 } tcp_skb_tsorted_restore(skb);
3671
3672 if (!err) {
3673 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
3674 tcp_rate_skb_sent(sk, skb);
3675 }
3676 } else {
3677 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3678 }
3679
3680 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3681 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3682 TCP_SKB_CB(skb)->seq, segs, err);
3683
3684 if (unlikely(err) && err != -EBUSY)
3685 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3686
3687 /* To avoid taking spuriously low RTT samples based on a timestamp
3688 * for a transmit that never happened, always mark EVER_RETRANS
3689 */
3690 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
3691
3692 out:
3693 trace_tcp_retransmit_skb(sk, skb, err);
3694 return err;
3695 }
3696
tcp_retransmit_skb(struct sock * sk,struct sk_buff * skb,int segs)3697 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3698 {
3699 struct tcp_sock *tp = tcp_sk(sk);
3700 int err = __tcp_retransmit_skb(sk, skb, segs);
3701
3702 if (err == 0) {
3703 #if FASTRETRANS_DEBUG > 0
3704 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3705 net_dbg_ratelimited("retrans_out leaked\n");
3706 }
3707 #endif
3708 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
3709 tp->retrans_out += tcp_skb_pcount(skb);
3710 }
3711
3712 /* Save stamp of the first (attempted) retransmit. */
3713 if (!tp->retrans_stamp)
3714 tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb);
3715
3716 if (tp->undo_retrans < 0)
3717 tp->undo_retrans = 0;
3718 tp->undo_retrans += tcp_skb_pcount(skb);
3719 return err;
3720 }
3721
3722 /* This gets called after a retransmit timeout, and the initially
3723 * retransmitted data is acknowledged. It tries to continue
3724 * resending the rest of the retransmit queue, until either
3725 * we've sent it all or the congestion window limit is reached.
3726 */
tcp_xmit_retransmit_queue(struct sock * sk)3727 void tcp_xmit_retransmit_queue(struct sock *sk)
3728 {
3729 const struct inet_connection_sock *icsk = inet_csk(sk);
3730 struct sk_buff *skb, *rtx_head, *hole = NULL;
3731 struct tcp_sock *tp = tcp_sk(sk);
3732 bool rearm_timer = false;
3733 u32 max_segs;
3734 int mib_idx;
3735
3736 if (!tp->packets_out)
3737 return;
3738
3739 rtx_head = tcp_rtx_queue_head(sk);
3740 skb = tp->retransmit_skb_hint ?: rtx_head;
3741 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
3742 skb_rbtree_walk_from(skb) {
3743 __u8 sacked;
3744 int segs;
3745
3746 if (tcp_pacing_check(sk))
3747 break;
3748
3749 /* we could do better than to assign each time */
3750 if (!hole)
3751 tp->retransmit_skb_hint = skb;
3752
3753 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
3754 if (segs <= 0)
3755 break;
3756 sacked = TCP_SKB_CB(skb)->sacked;
3757 /* In case tcp_shift_skb_data() have aggregated large skbs,
3758 * we need to make sure not sending too bigs TSO packets
3759 */
3760 segs = min_t(int, segs, max_segs);
3761
3762 if (tp->retrans_out >= tp->lost_out) {
3763 break;
3764 } else if (!(sacked & TCPCB_LOST)) {
3765 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
3766 hole = skb;
3767 continue;
3768
3769 } else {
3770 if (icsk->icsk_ca_state != TCP_CA_Loss)
3771 mib_idx = LINUX_MIB_TCPFASTRETRANS;
3772 else
3773 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
3774 }
3775
3776 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
3777 continue;
3778
3779 if (tcp_small_queue_check(sk, skb, 1))
3780 break;
3781
3782 if (tcp_retransmit_skb(sk, skb, segs))
3783 break;
3784
3785 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
3786
3787 if (tcp_in_cwnd_reduction(sk))
3788 tp->prr_out += tcp_skb_pcount(skb);
3789
3790 if (skb == rtx_head &&
3791 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3792 rearm_timer = true;
3793
3794 }
3795 if (rearm_timer)
3796 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3797 inet_csk(sk)->icsk_rto, true);
3798 }
3799
3800 /* Send a FIN. The caller locks the socket for us.
3801 * We should try to send a FIN packet really hard, but eventually give up.
3802 */
tcp_send_fin(struct sock * sk)3803 void tcp_send_fin(struct sock *sk)
3804 {
3805 struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
3806 struct tcp_sock *tp = tcp_sk(sk);
3807
3808 /* Optimization, tack on the FIN if we have one skb in write queue and
3809 * this skb was not yet sent, or we are under memory pressure.
3810 * Note: in the latter case, FIN packet will be sent after a timeout,
3811 * as TCP stack thinks it has already been transmitted.
3812 */
3813 tskb = tail;
3814 if (!tskb && tcp_under_memory_pressure(sk))
3815 tskb = skb_rb_last(&sk->tcp_rtx_queue);
3816
3817 if (tskb) {
3818 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3819 TCP_SKB_CB(tskb)->end_seq++;
3820 tp->write_seq++;
3821 if (!tail) {
3822 /* This means tskb was already sent.
3823 * Pretend we included the FIN on previous transmit.
3824 * We need to set tp->snd_nxt to the value it would have
3825 * if FIN had been sent. This is because retransmit path
3826 * does not change tp->snd_nxt.
3827 */
3828 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3829 return;
3830 }
3831 } else {
3832 skb = alloc_skb_fclone(MAX_TCP_HEADER,
3833 sk_gfp_mask(sk, GFP_ATOMIC |
3834 __GFP_NOWARN));
3835 if (unlikely(!skb))
3836 return;
3837
3838 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3839 skb_reserve(skb, MAX_TCP_HEADER);
3840 sk_forced_mem_schedule(sk, skb->truesize);
3841 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3842 tcp_init_nondata_skb(skb, sk, tp->write_seq,
3843 TCPHDR_ACK | TCPHDR_FIN);
3844 tcp_queue_skb(sk, skb);
3845 }
3846 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3847 }
3848
3849 /* We get here when a process closes a file descriptor (either due to
3850 * an explicit close() or as a byproduct of exit()'ing) and there
3851 * was unread data in the receive queue. This behavior is recommended
3852 * by RFC 2525, section 2.17. -DaveM
3853 */
tcp_send_active_reset(struct sock * sk,gfp_t priority,enum sk_rst_reason reason)3854 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
3855 enum sk_rst_reason reason)
3856 {
3857 struct sk_buff *skb;
3858
3859 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3860
3861 /* NOTE: No TCP options attached and we never retransmit this. */
3862 skb = alloc_skb(MAX_TCP_HEADER, priority);
3863 if (!skb) {
3864 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3865 return;
3866 }
3867
3868 /* Reserve space for headers and prepare control bits. */
3869 skb_reserve(skb, MAX_TCP_HEADER);
3870 tcp_init_nondata_skb(skb, sk, tcp_acceptable_seq(sk),
3871 TCPHDR_ACK | TCPHDR_RST);
3872 tcp_mstamp_refresh(tcp_sk(sk));
3873 /* Send it off. */
3874 if (tcp_transmit_skb(sk, skb, 0, priority))
3875 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3876
3877 /* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3878 * skb here is different to the troublesome skb, so use NULL
3879 */
3880 trace_tcp_send_reset(sk, NULL, reason);
3881 }
3882
3883 /* Send a crossed SYN-ACK during socket establishment.
3884 * WARNING: This routine must only be called when we have already sent
3885 * a SYN packet that crossed the incoming SYN that caused this routine
3886 * to get called. If this assumption fails then the initial rcv_wnd
3887 * and rcv_wscale values will not be correct.
3888 */
tcp_send_synack(struct sock * sk)3889 int tcp_send_synack(struct sock *sk)
3890 {
3891 struct sk_buff *skb;
3892
3893 skb = tcp_rtx_queue_head(sk);
3894 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3895 pr_err("%s: wrong queue state\n", __func__);
3896 return -EFAULT;
3897 }
3898 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3899 if (skb_cloned(skb)) {
3900 struct sk_buff *nskb;
3901
3902 tcp_skb_tsorted_save(skb) {
3903 nskb = skb_copy(skb, GFP_ATOMIC);
3904 } tcp_skb_tsorted_restore(skb);
3905 if (!nskb)
3906 return -ENOMEM;
3907 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3908 tcp_highest_sack_replace(sk, skb, nskb);
3909 tcp_rtx_queue_unlink_and_free(skb, sk);
3910 __skb_header_release(nskb);
3911 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3912 sk_wmem_queued_add(sk, nskb->truesize);
3913 sk_mem_charge(sk, nskb->truesize);
3914 skb = nskb;
3915 }
3916
3917 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3918 tcp_ecn_send_synack(sk, skb);
3919 }
3920 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3921 }
3922
3923 /**
3924 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3925 * @sk: listener socket
3926 * @dst: dst entry attached to the SYNACK. It is consumed and caller
3927 * should not use it again.
3928 * @req: request_sock pointer
3929 * @foc: cookie for tcp fast open
3930 * @synack_type: Type of synack to prepare
3931 * @syn_skb: SYN packet just received. It could be NULL for rtx case.
3932 */
tcp_make_synack(const struct sock * sk,struct dst_entry * dst,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)3933 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3934 struct request_sock *req,
3935 struct tcp_fastopen_cookie *foc,
3936 enum tcp_synack_type synack_type,
3937 struct sk_buff *syn_skb)
3938 {
3939 struct inet_request_sock *ireq = inet_rsk(req);
3940 const struct tcp_sock *tp = tcp_sk(sk);
3941 struct tcp_out_options opts;
3942 struct tcp_key key = {};
3943 struct sk_buff *skb;
3944 int tcp_header_size;
3945 struct tcphdr *th;
3946 int mss;
3947 u64 now;
3948
3949 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3950 if (unlikely(!skb)) {
3951 dst_release(dst);
3952 return NULL;
3953 }
3954 /* Reserve space for headers. */
3955 skb_reserve(skb, MAX_TCP_HEADER);
3956
3957 switch (synack_type) {
3958 case TCP_SYNACK_NORMAL:
3959 case TCP_SYNACK_RETRANS:
3960 skb_set_owner_edemux(skb, req_to_sk(req));
3961 break;
3962 case TCP_SYNACK_COOKIE:
3963 /* Under synflood, we do not attach skb to a socket,
3964 * to avoid false sharing.
3965 */
3966 break;
3967 case TCP_SYNACK_FASTOPEN:
3968 /* sk is a const pointer, because we want to express multiple
3969 * cpu might call us concurrently.
3970 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3971 */
3972 skb_set_owner_w(skb, (struct sock *)sk);
3973 break;
3974 }
3975 skb_dst_set(skb, dst);
3976
3977 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3978
3979 memset(&opts, 0, sizeof(opts));
3980 now = tcp_clock_ns();
3981 #ifdef CONFIG_SYN_COOKIES
3982 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3983 skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
3984 SKB_CLOCK_MONOTONIC);
3985 else
3986 #endif
3987 {
3988 skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
3989 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
3990 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
3991 }
3992
3993 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
3994 rcu_read_lock();
3995 #endif
3996 if (tcp_rsk_used_ao(req)) {
3997 #ifdef CONFIG_TCP_AO
3998 struct tcp_ao_key *ao_key = NULL;
3999 u8 keyid = tcp_rsk(req)->ao_keyid;
4000 u8 rnext = tcp_rsk(req)->ao_rcv_next;
4001
4002 ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
4003 keyid, -1);
4004 /* If there is no matching key - avoid sending anything,
4005 * especially usigned segments. It could try harder and lookup
4006 * for another peer-matching key, but the peer has requested
4007 * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
4008 */
4009 if (unlikely(!ao_key)) {
4010 trace_tcp_ao_synack_no_key(sk, keyid, rnext);
4011 rcu_read_unlock();
4012 kfree_skb(skb);
4013 net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
4014 keyid);
4015 return NULL;
4016 }
4017 key.ao_key = ao_key;
4018 key.type = TCP_KEY_AO;
4019 #endif
4020 } else {
4021 #ifdef CONFIG_TCP_MD5SIG
4022 key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk,
4023 req_to_sk(req));
4024 if (key.md5_key)
4025 key.type = TCP_KEY_MD5;
4026 #endif
4027 }
4028 skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
4029 /* bpf program will be interested in the tcp_flags */
4030 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
4031 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts,
4032 &key, foc, synack_type, syn_skb)
4033 + sizeof(*th);
4034
4035 skb_push(skb, tcp_header_size);
4036 skb_reset_transport_header(skb);
4037
4038 th = (struct tcphdr *)skb->data;
4039 memset(th, 0, sizeof(struct tcphdr));
4040 th->syn = 1;
4041 th->ack = 1;
4042 tcp_ecn_make_synack(req, th, synack_type);
4043 th->source = htons(ireq->ir_num);
4044 th->dest = ireq->ir_rmt_port;
4045 skb->mark = ireq->ir_mark;
4046 skb->ip_summed = CHECKSUM_PARTIAL;
4047 th->seq = htonl(tcp_rsk(req)->snt_isn);
4048 /* XXX data is queued and acked as is. No buffer/window check */
4049 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
4050
4051 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
4052 th->window = htons(min(req->rsk_rcv_wnd, 65535U));
4053 tcp_options_write(th, NULL, tcp_rsk(req), &opts, &key);
4054 th->doff = (tcp_header_size >> 2);
4055 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
4056
4057 /* Okay, we have all we need - do the md5 hash if needed */
4058 if (tcp_key_is_md5(&key)) {
4059 #ifdef CONFIG_TCP_MD5SIG
4060 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
4061 key.md5_key, req_to_sk(req), skb);
4062 #endif
4063 } else if (tcp_key_is_ao(&key)) {
4064 #ifdef CONFIG_TCP_AO
4065 tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location,
4066 key.ao_key, req, skb,
4067 opts.hash_location - (u8 *)th, 0);
4068 #endif
4069 }
4070 #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
4071 rcu_read_unlock();
4072 #endif
4073
4074 bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
4075 synack_type, &opts);
4076
4077 skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
4078 tcp_add_tx_delay(skb, tp);
4079
4080 return skb;
4081 }
4082 EXPORT_IPV6_MOD(tcp_make_synack);
4083
tcp_ca_dst_init(struct sock * sk,const struct dst_entry * dst)4084 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
4085 {
4086 struct inet_connection_sock *icsk = inet_csk(sk);
4087 const struct tcp_congestion_ops *ca;
4088 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
4089
4090 if (ca_key == TCP_CA_UNSPEC)
4091 return;
4092
4093 rcu_read_lock();
4094 ca = tcp_ca_find_key(ca_key);
4095 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
4096 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
4097 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
4098 icsk->icsk_ca_ops = ca;
4099 }
4100 rcu_read_unlock();
4101 }
4102
4103 /* Do all connect socket setups that can be done AF independent. */
tcp_connect_init(struct sock * sk)4104 static void tcp_connect_init(struct sock *sk)
4105 {
4106 const struct dst_entry *dst = __sk_dst_get(sk);
4107 struct tcp_sock *tp = tcp_sk(sk);
4108 __u8 rcv_wscale;
4109 u16 user_mss;
4110 u32 rcv_wnd;
4111
4112 /* We'll fix this up when we get a response from the other end.
4113 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
4114 */
4115 tp->tcp_header_len = sizeof(struct tcphdr);
4116 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
4117 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
4118
4119 tcp_ao_connect_init(sk);
4120
4121 /* If user gave his TCP_MAXSEG, record it to clamp */
4122 user_mss = READ_ONCE(tp->rx_opt.user_mss);
4123 if (user_mss)
4124 tp->rx_opt.mss_clamp = user_mss;
4125 tp->max_window = 0;
4126 tcp_mtup_init(sk);
4127 tcp_sync_mss(sk, dst_mtu(dst));
4128
4129 tcp_ca_dst_init(sk, dst);
4130
4131 if (!tp->window_clamp)
4132 WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW));
4133 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
4134
4135 tcp_initialize_rcv_mss(sk);
4136
4137 /* limit the window selection if the user enforce a smaller rx buffer */
4138 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
4139 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
4140 WRITE_ONCE(tp->window_clamp, tcp_full_space(sk));
4141
4142 rcv_wnd = tcp_rwnd_init_bpf(sk);
4143 if (rcv_wnd == 0)
4144 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
4145
4146 tcp_select_initial_window(sk, tcp_full_space(sk),
4147 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
4148 &tp->rcv_wnd,
4149 &tp->window_clamp,
4150 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
4151 &rcv_wscale,
4152 rcv_wnd);
4153
4154 tp->rx_opt.rcv_wscale = rcv_wscale;
4155 tp->rcv_ssthresh = tp->rcv_wnd;
4156
4157 WRITE_ONCE(sk->sk_err, 0);
4158 sock_reset_flag(sk, SOCK_DONE);
4159 tp->snd_wnd = 0;
4160 tcp_init_wl(tp, 0);
4161 tcp_write_queue_purge(sk);
4162 tp->snd_una = tp->write_seq;
4163 tp->snd_sml = tp->write_seq;
4164 tp->snd_up = tp->write_seq;
4165 WRITE_ONCE(tp->snd_nxt, tp->write_seq);
4166
4167 if (likely(!tp->repair))
4168 tp->rcv_nxt = 0;
4169 else
4170 tp->rcv_tstamp = tcp_jiffies32;
4171 tp->rcv_wup = tp->rcv_nxt;
4172 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
4173
4174 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
4175 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
4176 tcp_clear_retrans(tp);
4177 }
4178
tcp_connect_queue_skb(struct sock * sk,struct sk_buff * skb)4179 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
4180 {
4181 struct tcp_sock *tp = tcp_sk(sk);
4182 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
4183
4184 tcb->end_seq += skb->len;
4185 __skb_header_release(skb);
4186 sk_wmem_queued_add(sk, skb->truesize);
4187 sk_mem_charge(sk, skb->truesize);
4188 WRITE_ONCE(tp->write_seq, tcb->end_seq);
4189 tp->packets_out += tcp_skb_pcount(skb);
4190 }
4191
4192 /* Build and send a SYN with data and (cached) Fast Open cookie. However,
4193 * queue a data-only packet after the regular SYN, such that regular SYNs
4194 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
4195 * only the SYN sequence, the data are retransmitted in the first ACK.
4196 * If cookie is not cached or other error occurs, falls back to send a
4197 * regular SYN with Fast Open cookie request option.
4198 */
tcp_send_syn_data(struct sock * sk,struct sk_buff * syn)4199 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
4200 {
4201 struct inet_connection_sock *icsk = inet_csk(sk);
4202 struct tcp_sock *tp = tcp_sk(sk);
4203 struct tcp_fastopen_request *fo = tp->fastopen_req;
4204 struct page_frag *pfrag = sk_page_frag(sk);
4205 struct sk_buff *syn_data;
4206 int space, err = 0;
4207
4208 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
4209 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
4210 goto fallback;
4211
4212 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
4213 * user-MSS. Reserve maximum option space for middleboxes that add
4214 * private TCP options. The cost is reduced data space in SYN :(
4215 */
4216 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
4217 /* Sync mss_cache after updating the mss_clamp */
4218 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
4219
4220 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
4221 MAX_TCP_OPTION_SPACE;
4222
4223 space = min_t(size_t, space, fo->size);
4224
4225 if (space &&
4226 !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
4227 pfrag, sk->sk_allocation))
4228 goto fallback;
4229 syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false);
4230 if (!syn_data)
4231 goto fallback;
4232 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
4233 if (space) {
4234 space = min_t(size_t, space, pfrag->size - pfrag->offset);
4235 space = tcp_wmem_schedule(sk, space);
4236 }
4237 if (space) {
4238 space = copy_page_from_iter(pfrag->page, pfrag->offset,
4239 space, &fo->data->msg_iter);
4240 if (unlikely(!space)) {
4241 tcp_skb_tsorted_anchor_cleanup(syn_data);
4242 kfree_skb(syn_data);
4243 goto fallback;
4244 }
4245 skb_fill_page_desc(syn_data, 0, pfrag->page,
4246 pfrag->offset, space);
4247 page_ref_inc(pfrag->page);
4248 pfrag->offset += space;
4249 skb_len_add(syn_data, space);
4250 skb_zcopy_set(syn_data, fo->uarg, NULL);
4251 }
4252 /* No more data pending in inet_wait_for_connect() */
4253 if (space == fo->size)
4254 fo->data = NULL;
4255 fo->copied = space;
4256
4257 tcp_connect_queue_skb(sk, syn_data);
4258 if (syn_data->len)
4259 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
4260
4261 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
4262
4263 skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC);
4264
4265 /* Now full SYN+DATA was cloned and sent (or not),
4266 * remove the SYN from the original skb (syn_data)
4267 * we keep in write queue in case of a retransmit, as we
4268 * also have the SYN packet (with no data) in the same queue.
4269 */
4270 TCP_SKB_CB(syn_data)->seq++;
4271 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
4272 if (!err) {
4273 tp->syn_data = (fo->copied > 0);
4274 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
4275 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
4276 goto done;
4277 }
4278
4279 /* data was not sent, put it in write_queue */
4280 __skb_queue_tail(&sk->sk_write_queue, syn_data);
4281 tp->packets_out -= tcp_skb_pcount(syn_data);
4282
4283 fallback:
4284 /* Send a regular SYN with Fast Open cookie request option */
4285 if (fo->cookie.len > 0)
4286 fo->cookie.len = 0;
4287 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
4288 if (err)
4289 tp->syn_fastopen = 0;
4290 done:
4291 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
4292 return err;
4293 }
4294
4295 /* Build a SYN and send it off. */
tcp_connect(struct sock * sk)4296 int tcp_connect(struct sock *sk)
4297 {
4298 struct tcp_sock *tp = tcp_sk(sk);
4299 struct sk_buff *buff;
4300 int err;
4301
4302 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
4303
4304 #if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO)
4305 /* Has to be checked late, after setting daddr/saddr/ops.
4306 * Return error if the peer has both a md5 and a tcp-ao key
4307 * configured as this is ambiguous.
4308 */
4309 if (unlikely(rcu_dereference_protected(tp->md5sig_info,
4310 lockdep_sock_is_held(sk)))) {
4311 bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1);
4312 bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk);
4313 struct tcp_ao_info *ao_info;
4314
4315 ao_info = rcu_dereference_check(tp->ao_info,
4316 lockdep_sock_is_held(sk));
4317 if (ao_info) {
4318 /* This is an extra check: tcp_ao_required() in
4319 * tcp_v{4,6}_parse_md5_keys() should prevent adding
4320 * md5 keys on ao_required socket.
4321 */
4322 needs_ao |= ao_info->ao_required;
4323 WARN_ON_ONCE(ao_info->ao_required && needs_md5);
4324 }
4325 if (needs_md5 && needs_ao)
4326 return -EKEYREJECTED;
4327
4328 /* If we have a matching md5 key and no matching tcp-ao key
4329 * then free up ao_info if allocated.
4330 */
4331 if (needs_md5) {
4332 tcp_ao_destroy_sock(sk, false);
4333 } else if (needs_ao) {
4334 tcp_clear_md5_list(sk);
4335 kfree(rcu_replace_pointer(tp->md5sig_info, NULL,
4336 lockdep_sock_is_held(sk)));
4337 }
4338 }
4339 #endif
4340 #ifdef CONFIG_TCP_AO
4341 if (unlikely(rcu_dereference_protected(tp->ao_info,
4342 lockdep_sock_is_held(sk)))) {
4343 /* Don't allow connecting if ao is configured but no
4344 * matching key is found.
4345 */
4346 if (!tp->af_specific->ao_lookup(sk, sk, -1, -1))
4347 return -EKEYREJECTED;
4348 }
4349 #endif
4350
4351 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
4352 return -EHOSTUNREACH; /* Routing failure or similar. */
4353
4354 tcp_connect_init(sk);
4355
4356 if (unlikely(tp->repair)) {
4357 tcp_finish_connect(sk, NULL);
4358 return 0;
4359 }
4360
4361 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true);
4362 if (unlikely(!buff))
4363 return -ENOBUFS;
4364
4365 /* SYN eats a sequence byte, write_seq updated by
4366 * tcp_connect_queue_skb().
4367 */
4368 tcp_init_nondata_skb(buff, sk, tp->write_seq, TCPHDR_SYN);
4369 tcp_mstamp_refresh(tp);
4370 tp->retrans_stamp = tcp_time_stamp_ts(tp);
4371 tcp_connect_queue_skb(sk, buff);
4372 tcp_ecn_send_syn(sk, buff);
4373 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
4374
4375 /* Send off SYN; include data in Fast Open. */
4376 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
4377 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
4378 if (err == -ECONNREFUSED)
4379 return err;
4380
4381 /* We change tp->snd_nxt after the tcp_transmit_skb() call
4382 * in order to make this packet get counted in tcpOutSegs.
4383 */
4384 WRITE_ONCE(tp->snd_nxt, tp->write_seq);
4385 tp->pushed_seq = tp->write_seq;
4386 buff = tcp_send_head(sk);
4387 if (unlikely(buff)) {
4388 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
4389 tp->pushed_seq = TCP_SKB_CB(buff)->seq;
4390 }
4391 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
4392
4393 /* Timer for repeating the SYN until an answer. */
4394 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
4395 inet_csk(sk)->icsk_rto, false);
4396 return 0;
4397 }
4398 EXPORT_SYMBOL(tcp_connect);
4399
tcp_delack_max(const struct sock * sk)4400 u32 tcp_delack_max(const struct sock *sk)
4401 {
4402 u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1;
4403
4404 return min(READ_ONCE(inet_csk(sk)->icsk_delack_max), delack_from_rto_min);
4405 }
4406
4407 /* Send out a delayed ack, the caller does the policy checking
4408 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
4409 * for details.
4410 */
tcp_send_delayed_ack(struct sock * sk)4411 void tcp_send_delayed_ack(struct sock *sk)
4412 {
4413 struct inet_connection_sock *icsk = inet_csk(sk);
4414 int ato = icsk->icsk_ack.ato;
4415 unsigned long timeout;
4416
4417 if (ato > TCP_DELACK_MIN) {
4418 const struct tcp_sock *tp = tcp_sk(sk);
4419 int max_ato = HZ / 2;
4420
4421 if (inet_csk_in_pingpong_mode(sk) ||
4422 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
4423 max_ato = TCP_DELACK_MAX;
4424
4425 /* Slow path, intersegment interval is "high". */
4426
4427 /* If some rtt estimate is known, use it to bound delayed ack.
4428 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
4429 * directly.
4430 */
4431 if (tp->srtt_us) {
4432 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
4433 TCP_DELACK_MIN);
4434
4435 if (rtt < max_ato)
4436 max_ato = rtt;
4437 }
4438
4439 ato = min(ato, max_ato);
4440 }
4441
4442 ato = min_t(u32, ato, tcp_delack_max(sk));
4443
4444 /* Stay within the limit we were given */
4445 timeout = jiffies + ato;
4446
4447 /* Use new timeout only if there wasn't a older one earlier. */
4448 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
4449 /* If delack timer is about to expire, send ACK now. */
4450 if (time_before_eq(icsk_delack_timeout(icsk), jiffies + (ato >> 2))) {
4451 tcp_send_ack(sk);
4452 return;
4453 }
4454
4455 if (!time_before(timeout, icsk_delack_timeout(icsk)))
4456 timeout = icsk_delack_timeout(icsk);
4457 }
4458 smp_store_release(&icsk->icsk_ack.pending,
4459 icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
4460 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
4461 }
4462
4463 /* This routine sends an ack and also updates the window. */
__tcp_send_ack(struct sock * sk,u32 rcv_nxt,u16 flags)4464 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags)
4465 {
4466 struct sk_buff *buff;
4467
4468 /* If we have been reset, we may not send again. */
4469 if (sk->sk_state == TCP_CLOSE)
4470 return;
4471
4472 /* We are not putting this on the write queue, so
4473 * tcp_transmit_skb() will set the ownership to this
4474 * sock.
4475 */
4476 buff = alloc_skb(MAX_TCP_HEADER,
4477 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
4478 if (unlikely(!buff)) {
4479 struct inet_connection_sock *icsk = inet_csk(sk);
4480 unsigned long delay;
4481
4482 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
4483 if (delay < tcp_rto_max(sk))
4484 icsk->icsk_ack.retry++;
4485 inet_csk_schedule_ack(sk);
4486 icsk->icsk_ack.ato = TCP_ATO_MIN;
4487 tcp_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, false);
4488 return;
4489 }
4490
4491 /* Reserve space for headers and prepare control bits. */
4492 skb_reserve(buff, MAX_TCP_HEADER);
4493 tcp_init_nondata_skb(buff, sk,
4494 tcp_acceptable_seq(sk), TCPHDR_ACK | flags);
4495
4496 /* We do not want pure acks influencing TCP Small Queues or fq/pacing
4497 * too much.
4498 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
4499 */
4500 skb_set_tcp_pure_ack(buff);
4501
4502 /* Send it off, this clears delayed acks for us. */
4503 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
4504 }
4505 EXPORT_SYMBOL_GPL(__tcp_send_ack);
4506
tcp_send_ack(struct sock * sk)4507 void tcp_send_ack(struct sock *sk)
4508 {
4509 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt, 0);
4510 }
4511
4512 /* This routine sends a packet with an out of date sequence
4513 * number. It assumes the other end will try to ack it.
4514 *
4515 * Question: what should we make while urgent mode?
4516 * 4.4BSD forces sending single byte of data. We cannot send
4517 * out of window data, because we have SND.NXT==SND.MAX...
4518 *
4519 * Current solution: to send TWO zero-length segments in urgent mode:
4520 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
4521 * out-of-date with SND.UNA-1 to probe window.
4522 */
tcp_xmit_probe_skb(struct sock * sk,int urgent,int mib)4523 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
4524 {
4525 struct tcp_sock *tp = tcp_sk(sk);
4526 struct sk_buff *skb;
4527
4528 /* We don't queue it, tcp_transmit_skb() sets ownership. */
4529 skb = alloc_skb(MAX_TCP_HEADER,
4530 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
4531 if (!skb)
4532 return -1;
4533
4534 /* Reserve space for headers and set control bits. */
4535 skb_reserve(skb, MAX_TCP_HEADER);
4536 /* Use a previous sequence. This should cause the other
4537 * end to send an ack. Don't queue or clone SKB, just
4538 * send it.
4539 */
4540 tcp_init_nondata_skb(skb, sk, tp->snd_una - !urgent, TCPHDR_ACK);
4541 NET_INC_STATS(sock_net(sk), mib);
4542 return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
4543 }
4544
4545 /* Called from setsockopt( ... TCP_REPAIR ) */
tcp_send_window_probe(struct sock * sk)4546 void tcp_send_window_probe(struct sock *sk)
4547 {
4548 if (sk->sk_state == TCP_ESTABLISHED) {
4549 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
4550 tcp_mstamp_refresh(tcp_sk(sk));
4551 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4552 }
4553 }
4554
4555 /* Initiate keepalive or window probe from timer. */
tcp_write_wakeup(struct sock * sk,int mib)4556 int tcp_write_wakeup(struct sock *sk, int mib)
4557 {
4558 struct tcp_sock *tp = tcp_sk(sk);
4559 struct sk_buff *skb;
4560
4561 if (sk->sk_state == TCP_CLOSE)
4562 return -1;
4563
4564 skb = tcp_send_head(sk);
4565 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
4566 int err;
4567 unsigned int mss = tcp_current_mss(sk);
4568 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
4569
4570 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
4571 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
4572
4573 /* We are probing the opening of a window
4574 * but the window size is != 0
4575 * must have been a result SWS avoidance ( sender )
4576 */
4577 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
4578 skb->len > mss) {
4579 seg_size = min(seg_size, mss);
4580 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4581 if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
4582 skb, seg_size, mss, GFP_ATOMIC))
4583 return -1;
4584 } else if (!tcp_skb_pcount(skb))
4585 tcp_set_skb_tso_segs(skb, mss);
4586
4587 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4588 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4589 if (!err)
4590 tcp_event_new_data_sent(sk, skb);
4591 return err;
4592 } else {
4593 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4594 tcp_xmit_probe_skb(sk, 1, mib);
4595 return tcp_xmit_probe_skb(sk, 0, mib);
4596 }
4597 }
4598
4599 /* A window probe timeout has occurred. If window is not closed send
4600 * a partial packet else a zero probe.
4601 */
tcp_send_probe0(struct sock * sk)4602 void tcp_send_probe0(struct sock *sk)
4603 {
4604 struct inet_connection_sock *icsk = inet_csk(sk);
4605 struct tcp_sock *tp = tcp_sk(sk);
4606 struct net *net = sock_net(sk);
4607 unsigned long timeout;
4608 int err;
4609
4610 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
4611
4612 if (tp->packets_out || tcp_write_queue_empty(sk)) {
4613 /* Cancel probe timer, if it is not required. */
4614 WRITE_ONCE(icsk->icsk_probes_out, 0);
4615 icsk->icsk_backoff = 0;
4616 icsk->icsk_probes_tstamp = 0;
4617 return;
4618 }
4619
4620 WRITE_ONCE(icsk->icsk_probes_out, icsk->icsk_probes_out + 1);
4621 if (err <= 0) {
4622 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
4623 icsk->icsk_backoff++;
4624 timeout = tcp_probe0_when(sk, tcp_rto_max(sk));
4625 } else {
4626 /* If packet was not sent due to local congestion,
4627 * Let senders fight for local resources conservatively.
4628 */
4629 timeout = TCP_RESOURCE_PROBE_INTERVAL;
4630 }
4631
4632 timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
4633 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, true);
4634 }
4635
tcp_rtx_synack(const struct sock * sk,struct request_sock * req)4636 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
4637 {
4638 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
4639 struct flowi fl;
4640 int res;
4641
4642 /* Paired with WRITE_ONCE() in sock_setsockopt() */
4643 if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
4644 WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
4645 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_RETRANS,
4646 NULL);
4647 if (!res) {
4648 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
4649 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4650 if (unlikely(tcp_passive_fastopen(sk))) {
4651 /* sk has const attribute because listeners are lockless.
4652 * However in this case, we are dealing with a passive fastopen
4653 * socket thus we can change total_retrans value.
4654 */
4655 tcp_sk_rw(sk)->total_retrans++;
4656 }
4657 trace_tcp_retransmit_synack(sk, req);
4658 WRITE_ONCE(req->num_retrans, req->num_retrans + 1);
4659 }
4660 return res;
4661 }
4662 EXPORT_IPV6_MOD(tcp_rtx_synack);
4663