1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 /*
23 * Changes:
24 * Pedro Roque : Fast Retransmit/Recovery.
25 * Two receive queues.
26 * Retransmit queue handled by TCP.
27 * Better retransmit timer handling.
28 * New congestion avoidance.
29 * Header prediction.
30 * Variable renaming.
31 *
32 * Eric : Fast Retransmit.
33 * Randy Scott : MSS option defines.
34 * Eric Schenk : Fixes to slow start algorithm.
35 * Eric Schenk : Yet another double ACK bug.
36 * Eric Schenk : Delayed ACK bug fixes.
37 * Eric Schenk : Floyd style fast retrans war avoidance.
38 * David S. Miller : Don't allow zero congestion window.
39 * Eric Schenk : Fix retransmitter so that it sends
40 * next packet on ack of previous packet.
41 * Andi Kleen : Moved open_request checking here
42 * and process RSTs for open_requests.
43 * Andi Kleen : Better prune_queue, and other fixes.
44 * Andrey Savochkin: Fix RTT measurements in the presence of
45 * timestamps.
46 * Andrey Savochkin: Check sequence numbers correctly when
47 * removing SACKs due to in sequence incoming
48 * data segments.
49 * Andi Kleen: Make sure we never ack data there is not
50 * enough room for. Also make this condition
51 * a fatal error if it might still happen.
52 * Andi Kleen: Add tcp_measure_rcv_mss to make
53 * connections with MSS<min(MTU,ann. MSS)
54 * work without delayed acks.
55 * Andi Kleen: Process packets with PSH set in the
56 * fast path.
57 * J Hadi Salim: ECN support
58 * Andrei Gurtov,
59 * Pasi Sarolahti,
60 * Panu Kuhlberg: Experimental audit of TCP (re)transmission
61 * engine. Lots of bugs are found.
62 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
63 */
64
65 #define pr_fmt(fmt) "TCP: " fmt
66
67 #include <linux/mm.h>
68 #include <linux/slab.h>
69 #include <linux/module.h>
70 #include <linux/sysctl.h>
71 #include <linux/kernel.h>
72 #include <linux/prefetch.h>
73 #include <linux/bitops.h>
74 #include <net/dst.h>
75 #include <net/tcp.h>
76 #include <net/tcp_ecn.h>
77 #include <net/proto_memory.h>
78 #include <net/inet_common.h>
79 #include <linux/ipsec.h>
80 #include <linux/unaligned.h>
81 #include <linux/errqueue.h>
82 #include <trace/events/tcp.h>
83 #include <linux/jump_label_ratelimit.h>
84 #include <net/busy_poll.h>
85 #include <net/mptcp.h>
86
87 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
88
89 #define FLAG_DATA 0x01 /* Incoming frame contained data. */
90 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
91 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
92 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
93 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
94 #define FLAG_DATA_SACKED 0x20 /* New SACK. */
95 #define FLAG_ECE 0x40 /* ECE in this ACK */
96 #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */
97 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
98 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
99 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
100 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
101 #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
102 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
103 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
104 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
105 #define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */
106 #define FLAG_DSACK_TLP 0x20000 /* DSACK for tail loss probe */
107 #define FLAG_TS_PROGRESS 0x40000 /* Positive timestamp delta */
108
109 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
110 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
111 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
112 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
113
114 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
115 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
116
117 #define REXMIT_NONE 0 /* no loss recovery to do */
118 #define REXMIT_LOST 1 /* retransmit packets marked lost */
119 #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
120
121 #if IS_ENABLED(CONFIG_TLS_DEVICE)
122 static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ);
123
clean_acked_data_enable(struct tcp_sock * tp,void (* cad)(struct sock * sk,u32 ack_seq))124 void clean_acked_data_enable(struct tcp_sock *tp,
125 void (*cad)(struct sock *sk, u32 ack_seq))
126 {
127 tp->tcp_clean_acked = cad;
128 static_branch_deferred_inc(&clean_acked_data_enabled);
129 }
130 EXPORT_SYMBOL_GPL(clean_acked_data_enable);
131
clean_acked_data_disable(struct tcp_sock * tp)132 void clean_acked_data_disable(struct tcp_sock *tp)
133 {
134 static_branch_slow_dec_deferred(&clean_acked_data_enabled);
135 tp->tcp_clean_acked = NULL;
136 }
137 EXPORT_SYMBOL_GPL(clean_acked_data_disable);
138
clean_acked_data_flush(void)139 void clean_acked_data_flush(void)
140 {
141 static_key_deferred_flush(&clean_acked_data_enabled);
142 }
143 EXPORT_SYMBOL_GPL(clean_acked_data_flush);
144 #endif
145
146 #ifdef CONFIG_CGROUP_BPF
bpf_skops_parse_hdr(struct sock * sk,struct sk_buff * skb)147 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
148 {
149 bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown &&
150 BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
151 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG);
152 bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
153 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
154 struct bpf_sock_ops_kern sock_ops;
155
156 if (likely(!unknown_opt && !parse_all_opt))
157 return;
158
159 /* The skb will be handled in the
160 * bpf_skops_established() or
161 * bpf_skops_write_hdr_opt().
162 */
163 switch (sk->sk_state) {
164 case TCP_SYN_RECV:
165 case TCP_SYN_SENT:
166 case TCP_LISTEN:
167 return;
168 }
169
170 sock_owned_by_me(sk);
171
172 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
173 sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
174 sock_ops.is_fullsock = 1;
175 sock_ops.is_locked_tcp_sock = 1;
176 sock_ops.sk = sk;
177 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
178
179 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
180 }
181
bpf_skops_established(struct sock * sk,int bpf_op,struct sk_buff * skb)182 static void bpf_skops_established(struct sock *sk, int bpf_op,
183 struct sk_buff *skb)
184 {
185 struct bpf_sock_ops_kern sock_ops;
186
187 sock_owned_by_me(sk);
188
189 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
190 sock_ops.op = bpf_op;
191 sock_ops.is_fullsock = 1;
192 sock_ops.is_locked_tcp_sock = 1;
193 sock_ops.sk = sk;
194 /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
195 if (skb)
196 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
197
198 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
199 }
200 #else
bpf_skops_parse_hdr(struct sock * sk,struct sk_buff * skb)201 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
202 {
203 }
204
bpf_skops_established(struct sock * sk,int bpf_op,struct sk_buff * skb)205 static void bpf_skops_established(struct sock *sk, int bpf_op,
206 struct sk_buff *skb)
207 {
208 }
209 #endif
210
tcp_gro_dev_warn(const struct sock * sk,const struct sk_buff * skb,unsigned int len)211 static __cold void tcp_gro_dev_warn(const struct sock *sk, const struct sk_buff *skb,
212 unsigned int len)
213 {
214 struct net_device *dev;
215
216 rcu_read_lock();
217 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
218 if (!dev || len >= READ_ONCE(dev->mtu))
219 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
220 dev ? dev->name : "Unknown driver");
221 rcu_read_unlock();
222 }
223
224 /* Adapt the MSS value used to make delayed ack decision to the
225 * real world.
226 */
tcp_measure_rcv_mss(struct sock * sk,const struct sk_buff * skb)227 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
228 {
229 struct inet_connection_sock *icsk = inet_csk(sk);
230 const unsigned int lss = icsk->icsk_ack.last_seg_size;
231 unsigned int len;
232
233 icsk->icsk_ack.last_seg_size = 0;
234
235 /* skb->len may jitter because of SACKs, even if peer
236 * sends good full-sized frames.
237 */
238 len = skb_shinfo(skb)->gso_size ? : skb->len;
239 if (len >= icsk->icsk_ack.rcv_mss) {
240 /* Note: divides are still a bit expensive.
241 * For the moment, only adjust scaling_ratio
242 * when we update icsk_ack.rcv_mss.
243 */
244 if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
245 u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
246 u8 old_ratio = tcp_sk(sk)->scaling_ratio;
247
248 do_div(val, skb->truesize);
249 tcp_sk(sk)->scaling_ratio = val ? val : 1;
250
251 if (old_ratio != tcp_sk(sk)->scaling_ratio) {
252 struct tcp_sock *tp = tcp_sk(sk);
253
254 val = tcp_win_from_space(sk, sk->sk_rcvbuf);
255 tcp_set_window_clamp(sk, val);
256
257 if (tp->window_clamp < tp->rcvq_space.space)
258 tp->rcvq_space.space = tp->window_clamp;
259 }
260 }
261 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
262 tcp_sk(sk)->advmss);
263 /* Account for possibly-removed options */
264 DO_ONCE_LITE_IF(len > icsk->icsk_ack.rcv_mss + MAX_TCP_OPTION_SPACE,
265 tcp_gro_dev_warn, sk, skb, len);
266 /* If the skb has a len of exactly 1*MSS and has the PSH bit
267 * set then it is likely the end of an application write. So
268 * more data may not be arriving soon, and yet the data sender
269 * may be waiting for an ACK if cwnd-bound or using TX zero
270 * copy. So we set ICSK_ACK_PUSHED here so that
271 * tcp_cleanup_rbuf() will send an ACK immediately if the app
272 * reads all of the data and is not ping-pong. If len > MSS
273 * then this logic does not matter (and does not hurt) because
274 * tcp_cleanup_rbuf() will always ACK immediately if the app
275 * reads data and there is more than an MSS of unACKed data.
276 */
277 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
278 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
279 } else {
280 /* Otherwise, we make more careful check taking into account,
281 * that SACKs block is variable.
282 *
283 * "len" is invariant segment length, including TCP header.
284 */
285 len += skb->data - skb_transport_header(skb);
286 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
287 /* If PSH is not set, packet should be
288 * full sized, provided peer TCP is not badly broken.
289 * This observation (if it is correct 8)) allows
290 * to handle super-low mtu links fairly.
291 */
292 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
293 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
294 /* Subtract also invariant (if peer is RFC compliant),
295 * tcp header plus fixed timestamp option length.
296 * Resulting "len" is MSS free of SACK jitter.
297 */
298 len -= tcp_sk(sk)->tcp_header_len;
299 icsk->icsk_ack.last_seg_size = len;
300 if (len == lss) {
301 icsk->icsk_ack.rcv_mss = len;
302 return;
303 }
304 }
305 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
306 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
307 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
308 }
309 }
310
tcp_incr_quickack(struct sock * sk,unsigned int max_quickacks)311 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
312 {
313 struct inet_connection_sock *icsk = inet_csk(sk);
314 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
315
316 if (quickacks == 0)
317 quickacks = 2;
318 quickacks = min(quickacks, max_quickacks);
319 if (quickacks > icsk->icsk_ack.quick)
320 icsk->icsk_ack.quick = quickacks;
321 }
322
tcp_enter_quickack_mode(struct sock * sk,unsigned int max_quickacks)323 static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
324 {
325 struct inet_connection_sock *icsk = inet_csk(sk);
326
327 tcp_incr_quickack(sk, max_quickacks);
328 inet_csk_exit_pingpong_mode(sk);
329 icsk->icsk_ack.ato = TCP_ATO_MIN;
330 }
331
332 /* Send ACKs quickly, if "quick" count is not exhausted
333 * and the session is not interactive.
334 */
335
tcp_in_quickack_mode(struct sock * sk)336 static bool tcp_in_quickack_mode(struct sock *sk)
337 {
338 const struct inet_connection_sock *icsk = inet_csk(sk);
339
340 return icsk->icsk_ack.dst_quick_ack ||
341 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
342 }
343
tcp_data_ecn_check(struct sock * sk,const struct sk_buff * skb)344 static void tcp_data_ecn_check(struct sock *sk, const struct sk_buff *skb)
345 {
346 struct tcp_sock *tp = tcp_sk(sk);
347
348 if (tcp_ecn_disabled(tp))
349 return;
350
351 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
352 case INET_ECN_NOT_ECT:
353 /* Funny extension: if ECT is not set on a segment,
354 * and we already seen ECT on a previous segment,
355 * it is probably a retransmit.
356 */
357 if (tp->ecn_flags & TCP_ECN_SEEN)
358 tcp_enter_quickack_mode(sk, 2);
359 break;
360 case INET_ECN_CE:
361 if (tcp_ca_needs_ecn(sk))
362 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
363
364 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR) &&
365 tcp_ecn_mode_rfc3168(tp)) {
366 /* Better not delay acks, sender can have a very low cwnd */
367 tcp_enter_quickack_mode(sk, 2);
368 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
369 }
370 /* As for RFC3168 ECN, the TCP_ECN_SEEN flag is set by
371 * tcp_data_ecn_check() when the ECN codepoint of
372 * received TCP data contains ECT(0), ECT(1), or CE.
373 */
374 if (!tcp_ecn_mode_rfc3168(tp))
375 break;
376 tp->ecn_flags |= TCP_ECN_SEEN;
377 break;
378 default:
379 if (tcp_ca_needs_ecn(sk))
380 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
381 if (!tcp_ecn_mode_rfc3168(tp))
382 break;
383 tp->ecn_flags |= TCP_ECN_SEEN;
384 break;
385 }
386 }
387
388 /* Returns true if the byte counters can be used */
tcp_accecn_process_option(struct tcp_sock * tp,const struct sk_buff * skb,u32 delivered_bytes,int flag)389 static bool tcp_accecn_process_option(struct tcp_sock *tp,
390 const struct sk_buff *skb,
391 u32 delivered_bytes, int flag)
392 {
393 u8 estimate_ecnfield = tp->est_ecnfield;
394 bool ambiguous_ecn_bytes_incr = false;
395 bool first_changed = false;
396 unsigned int optlen;
397 bool order1, res;
398 unsigned int i;
399 u8 *ptr;
400
401 if (tcp_accecn_opt_fail_recv(tp))
402 return false;
403
404 if (!(flag & FLAG_SLOWPATH) || !tp->rx_opt.accecn) {
405 if (!tp->saw_accecn_opt) {
406 /* Too late to enable after this point due to
407 * potential counter wraps
408 */
409 if (tp->bytes_sent >= (1 << 23) - 1) {
410 u8 saw_opt = TCP_ACCECN_OPT_FAIL_SEEN;
411
412 tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
413 }
414 return false;
415 }
416
417 if (estimate_ecnfield) {
418 u8 ecnfield = estimate_ecnfield - 1;
419
420 tp->delivered_ecn_bytes[ecnfield] += delivered_bytes;
421 return true;
422 }
423 return false;
424 }
425
426 ptr = skb_transport_header(skb) + tp->rx_opt.accecn;
427 optlen = ptr[1] - 2;
428 if (WARN_ON_ONCE(ptr[0] != TCPOPT_ACCECN0 && ptr[0] != TCPOPT_ACCECN1))
429 return false;
430 order1 = (ptr[0] == TCPOPT_ACCECN1);
431 ptr += 2;
432
433 if (tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
434 tp->saw_accecn_opt = tcp_accecn_option_init(skb,
435 tp->rx_opt.accecn);
436 if (tp->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN)
437 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_RECV);
438 }
439
440 res = !!estimate_ecnfield;
441 for (i = 0; i < 3; i++) {
442 u32 init_offset;
443 u8 ecnfield;
444 s32 delta;
445 u32 *cnt;
446
447 if (optlen < TCPOLEN_ACCECN_PERFIELD)
448 break;
449
450 ecnfield = tcp_accecn_optfield_to_ecnfield(i, order1);
451 init_offset = tcp_accecn_field_init_offset(ecnfield);
452 cnt = &tp->delivered_ecn_bytes[ecnfield - 1];
453 delta = tcp_update_ecn_bytes(cnt, ptr, init_offset);
454 if (delta && delta < 0) {
455 res = false;
456 ambiguous_ecn_bytes_incr = true;
457 }
458 if (delta && ecnfield != estimate_ecnfield) {
459 if (!first_changed) {
460 tp->est_ecnfield = ecnfield;
461 first_changed = true;
462 } else {
463 res = false;
464 ambiguous_ecn_bytes_incr = true;
465 }
466 }
467
468 optlen -= TCPOLEN_ACCECN_PERFIELD;
469 ptr += TCPOLEN_ACCECN_PERFIELD;
470 }
471 if (ambiguous_ecn_bytes_incr)
472 tp->est_ecnfield = 0;
473
474 return res;
475 }
476
tcp_count_delivered_ce(struct tcp_sock * tp,u32 ecn_count)477 static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count)
478 {
479 tp->delivered_ce += ecn_count;
480 }
481
482 /* Updates the delivered and delivered_ce counts */
tcp_count_delivered(struct tcp_sock * tp,u32 delivered,bool ece_ack)483 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
484 bool ece_ack)
485 {
486 tp->delivered += delivered;
487 if (tcp_ecn_mode_rfc3168(tp) && ece_ack)
488 tcp_count_delivered_ce(tp, delivered);
489 }
490
491 #define PKTS_ACKED_WEIGHT 6
492 #define PKTS_ACKED_PREC 6
493 #define ACK_COMP_THRESH 4
494
495 /* Returns the ECN CE delta */
__tcp_accecn_process(struct sock * sk,const struct sk_buff * skb,u32 delivered_pkts,u32 delivered_bytes,int flag)496 static u32 __tcp_accecn_process(struct sock *sk, const struct sk_buff *skb,
497 u32 delivered_pkts, u32 delivered_bytes,
498 int flag)
499 {
500 u32 old_ceb = tcp_sk(sk)->delivered_ecn_bytes[INET_ECN_CE - 1];
501 const struct tcphdr *th = tcp_hdr(skb);
502 struct tcp_sock *tp = tcp_sk(sk);
503 u32 delta, safe_delta, d_ceb;
504 bool opt_deltas_valid;
505 u32 corrected_ace;
506 u32 ewma;
507
508 /* Reordered ACK or uncertain due to lack of data to send and ts */
509 if (!(flag & (FLAG_FORWARD_PROGRESS | FLAG_TS_PROGRESS)))
510 return 0;
511
512 opt_deltas_valid = tcp_accecn_process_option(tp, skb,
513 delivered_bytes, flag);
514
515 if (delivered_pkts) {
516 if (!tp->pkts_acked_ewma) {
517 ewma = delivered_pkts << PKTS_ACKED_PREC;
518 } else {
519 ewma = tp->pkts_acked_ewma;
520 ewma = (((ewma << PKTS_ACKED_WEIGHT) - ewma) +
521 (delivered_pkts << PKTS_ACKED_PREC)) >>
522 PKTS_ACKED_WEIGHT;
523 }
524 tp->pkts_acked_ewma = min_t(u32, ewma, 0xFFFFU);
525 }
526
527 if (!(flag & FLAG_SLOWPATH)) {
528 /* AccECN counter might overflow on large ACKs */
529 if (delivered_pkts <= TCP_ACCECN_CEP_ACE_MASK)
530 return 0;
531 }
532
533 /* ACE field is not available during handshake */
534 if (flag & FLAG_SYN_ACKED)
535 return 0;
536
537 if (tp->received_ce_pending >= TCP_ACCECN_ACE_MAX_DELTA)
538 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
539
540 corrected_ace = tcp_accecn_ace(th) - TCP_ACCECN_CEP_INIT_OFFSET;
541 delta = (corrected_ace - tp->delivered_ce) & TCP_ACCECN_CEP_ACE_MASK;
542 if (delivered_pkts <= TCP_ACCECN_CEP_ACE_MASK)
543 return delta;
544
545 safe_delta = delivered_pkts -
546 ((delivered_pkts - delta) & TCP_ACCECN_CEP_ACE_MASK);
547
548 if (opt_deltas_valid) {
549 d_ceb = tp->delivered_ecn_bytes[INET_ECN_CE - 1] - old_ceb;
550 if (!d_ceb)
551 return delta;
552
553 if ((delivered_pkts >= (TCP_ACCECN_CEP_ACE_MASK + 1) * 2) &&
554 (tcp_is_sack(tp) ||
555 ((1 << inet_csk(sk)->icsk_ca_state) &
556 (TCPF_CA_Open | TCPF_CA_CWR)))) {
557 u32 est_d_cep;
558
559 if (delivered_bytes <= d_ceb)
560 return safe_delta;
561
562 est_d_cep = DIV_ROUND_UP_ULL((u64)d_ceb *
563 delivered_pkts,
564 delivered_bytes);
565 return min(safe_delta,
566 delta +
567 (est_d_cep & ~TCP_ACCECN_CEP_ACE_MASK));
568 }
569
570 if (d_ceb > delta * tp->mss_cache)
571 return safe_delta;
572 if (d_ceb <
573 safe_delta * tp->mss_cache >> TCP_ACCECN_SAFETY_SHIFT)
574 return delta;
575 } else if (tp->pkts_acked_ewma > (ACK_COMP_THRESH << PKTS_ACKED_PREC))
576 return delta;
577
578 return safe_delta;
579 }
580
tcp_accecn_process(struct sock * sk,const struct sk_buff * skb,u32 delivered_pkts,u32 delivered_bytes,int * flag)581 static u32 tcp_accecn_process(struct sock *sk, const struct sk_buff *skb,
582 u32 delivered_pkts, u32 delivered_bytes,
583 int *flag)
584 {
585 struct tcp_sock *tp = tcp_sk(sk);
586 u32 delta;
587
588 delta = __tcp_accecn_process(sk, skb, delivered_pkts,
589 delivered_bytes, *flag);
590 if (delta > 0) {
591 tcp_count_delivered_ce(tp, delta);
592 *flag |= FLAG_ECE;
593 /* Recalculate header predictor */
594 if (tp->pred_flags)
595 tcp_fast_path_on(tp);
596 }
597 return delta;
598 }
599
600 /* Buffer size and advertised window tuning.
601 *
602 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
603 */
604
tcp_sndbuf_expand(struct sock * sk)605 static void tcp_sndbuf_expand(struct sock *sk)
606 {
607 const struct tcp_sock *tp = tcp_sk(sk);
608 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
609 int sndmem, per_mss;
610 u32 nr_segs;
611
612 /* Worst case is non GSO/TSO : each frame consumes one skb
613 * and skb->head is kmalloced using power of two area of memory
614 */
615 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
616 MAX_TCP_HEADER +
617 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
618
619 per_mss = roundup_pow_of_two(per_mss) +
620 SKB_DATA_ALIGN(sizeof(struct sk_buff));
621
622 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
623 nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
624
625 /* Fast Recovery (RFC 5681 3.2) :
626 * Cubic needs 1.7 factor, rounded to 2 to include
627 * extra cushion (application might react slowly to EPOLLOUT)
628 */
629 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
630 sndmem *= nr_segs * per_mss;
631
632 if (sk->sk_sndbuf < sndmem)
633 WRITE_ONCE(sk->sk_sndbuf,
634 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
635 }
636
637 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
638 *
639 * All tcp_full_space() is split to two parts: "network" buffer, allocated
640 * forward and advertised in receiver window (tp->rcv_wnd) and
641 * "application buffer", required to isolate scheduling/application
642 * latencies from network.
643 * window_clamp is maximal advertised window. It can be less than
644 * tcp_full_space(), in this case tcp_full_space() - window_clamp
645 * is reserved for "application" buffer. The less window_clamp is
646 * the smoother our behaviour from viewpoint of network, but the lower
647 * throughput and the higher sensitivity of the connection to losses. 8)
648 *
649 * rcv_ssthresh is more strict window_clamp used at "slow start"
650 * phase to predict further behaviour of this connection.
651 * It is used for two goals:
652 * - to enforce header prediction at sender, even when application
653 * requires some significant "application buffer". It is check #1.
654 * - to prevent pruning of receive queue because of misprediction
655 * of receiver window. Check #2.
656 *
657 * The scheme does not work when sender sends good segments opening
658 * window and then starts to feed us spaghetti. But it should work
659 * in common situations. Otherwise, we have to rely on queue collapsing.
660 */
661
662 /* Slow part of check#2. */
__tcp_grow_window(const struct sock * sk,const struct sk_buff * skb,unsigned int skbtruesize)663 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
664 unsigned int skbtruesize)
665 {
666 const struct tcp_sock *tp = tcp_sk(sk);
667 /* Optimize this! */
668 int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
669 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
670
671 while (tp->rcv_ssthresh <= window) {
672 if (truesize <= skb->len)
673 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
674
675 truesize >>= 1;
676 window >>= 1;
677 }
678 return 0;
679 }
680
681 /* Even if skb appears to have a bad len/truesize ratio, TCP coalescing
682 * can play nice with us, as sk_buff and skb->head might be either
683 * freed or shared with up to MAX_SKB_FRAGS segments.
684 * Only give a boost to drivers using page frag(s) to hold the frame(s),
685 * and if no payload was pulled in skb->head before reaching us.
686 */
truesize_adjust(bool adjust,const struct sk_buff * skb)687 static u32 truesize_adjust(bool adjust, const struct sk_buff *skb)
688 {
689 u32 truesize = skb->truesize;
690
691 if (adjust && !skb_headlen(skb)) {
692 truesize -= SKB_TRUESIZE(skb_end_offset(skb));
693 /* paranoid check, some drivers might be buggy */
694 if (unlikely((int)truesize < (int)skb->len))
695 truesize = skb->truesize;
696 }
697 return truesize;
698 }
699
tcp_grow_window(struct sock * sk,const struct sk_buff * skb,bool adjust)700 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
701 bool adjust)
702 {
703 struct tcp_sock *tp = tcp_sk(sk);
704 int room;
705
706 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
707
708 if (room <= 0)
709 return;
710
711 /* Check #1 */
712 if (!tcp_under_memory_pressure(sk)) {
713 unsigned int truesize = truesize_adjust(adjust, skb);
714 int incr;
715
716 /* Check #2. Increase window, if skb with such overhead
717 * will fit to rcvbuf in future.
718 */
719 if (tcp_win_from_space(sk, truesize) <= skb->len)
720 incr = 2 * tp->advmss;
721 else
722 incr = __tcp_grow_window(sk, skb, truesize);
723
724 if (incr) {
725 incr = max_t(int, incr, 2 * skb->len);
726 tp->rcv_ssthresh += min(room, incr);
727 inet_csk(sk)->icsk_ack.quick |= 1;
728 }
729 } else {
730 /* Under pressure:
731 * Adjust rcv_ssthresh according to reserved mem
732 */
733 tcp_adjust_rcv_ssthresh(sk);
734 }
735 }
736
737 /* 3. Try to fixup all. It is made immediately after connection enters
738 * established state.
739 */
tcp_init_buffer_space(struct sock * sk)740 static void tcp_init_buffer_space(struct sock *sk)
741 {
742 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
743 struct tcp_sock *tp = tcp_sk(sk);
744 int maxwin;
745
746 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
747 tcp_sndbuf_expand(sk);
748
749 tcp_mstamp_refresh(tp);
750 tp->rcvq_space.time = tp->tcp_mstamp;
751 tp->rcvq_space.seq = tp->copied_seq;
752
753 maxwin = tcp_full_space(sk);
754
755 if (tp->window_clamp >= maxwin) {
756 WRITE_ONCE(tp->window_clamp, maxwin);
757
758 if (tcp_app_win && maxwin > 4 * tp->advmss)
759 WRITE_ONCE(tp->window_clamp,
760 max(maxwin - (maxwin >> tcp_app_win),
761 4 * tp->advmss));
762 }
763
764 /* Force reservation of one segment. */
765 if (tcp_app_win &&
766 tp->window_clamp > 2 * tp->advmss &&
767 tp->window_clamp + tp->advmss > maxwin)
768 WRITE_ONCE(tp->window_clamp,
769 max(2 * tp->advmss, maxwin - tp->advmss));
770
771 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
772 tp->snd_cwnd_stamp = tcp_jiffies32;
773 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
774 (u32)TCP_INIT_CWND * tp->advmss);
775 }
776
777 /* 4. Recalculate window clamp after socket hit its memory bounds. */
tcp_clamp_window(struct sock * sk)778 static void tcp_clamp_window(struct sock *sk)
779 {
780 struct tcp_sock *tp = tcp_sk(sk);
781 struct inet_connection_sock *icsk = inet_csk(sk);
782 struct net *net = sock_net(sk);
783 int rmem2;
784
785 icsk->icsk_ack.quick = 0;
786 rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
787
788 if (sk->sk_rcvbuf < rmem2 &&
789 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
790 !tcp_under_memory_pressure(sk) &&
791 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
792 WRITE_ONCE(sk->sk_rcvbuf,
793 min(atomic_read(&sk->sk_rmem_alloc), rmem2));
794 }
795 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
796 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
797 }
798
799 /* Initialize RCV_MSS value.
800 * RCV_MSS is an our guess about MSS used by the peer.
801 * We haven't any direct information about the MSS.
802 * It's better to underestimate the RCV_MSS rather than overestimate.
803 * Overestimations make us ACKing less frequently than needed.
804 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
805 */
tcp_initialize_rcv_mss(struct sock * sk)806 void tcp_initialize_rcv_mss(struct sock *sk)
807 {
808 const struct tcp_sock *tp = tcp_sk(sk);
809 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
810
811 hint = min(hint, tp->rcv_wnd / 2);
812 hint = min(hint, TCP_MSS_DEFAULT);
813 hint = max(hint, TCP_MIN_MSS);
814
815 inet_csk(sk)->icsk_ack.rcv_mss = hint;
816 }
817
818 /* Receiver "autotuning" code.
819 *
820 * The algorithm for RTT estimation w/o timestamps is based on
821 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
822 * <https://public.lanl.gov/radiant/pubs.html#DRS>
823 *
824 * More detail on this code can be found at
825 * <http://staff.psc.edu/jheffner/>,
826 * though this reference is out of date. A new paper
827 * is pending.
828 */
tcp_rcv_rtt_update(struct tcp_sock * tp,u32 sample,int win_dep)829 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
830 {
831 u32 new_sample, old_sample = tp->rcv_rtt_est.rtt_us;
832 long m = sample << 3;
833
834 if (old_sample == 0 || m < old_sample) {
835 new_sample = m;
836 } else {
837 /* If we sample in larger samples in the non-timestamp
838 * case, we could grossly overestimate the RTT especially
839 * with chatty applications or bulk transfer apps which
840 * are stalled on filesystem I/O.
841 *
842 * Also, since we are only going for a minimum in the
843 * non-timestamp case, we do not smooth things out
844 * else with timestamps disabled convergence takes too
845 * long.
846 */
847 if (win_dep)
848 return;
849 /* Do not use this sample if receive queue is not empty. */
850 if (tp->rcv_nxt != tp->copied_seq)
851 return;
852 new_sample = old_sample - (old_sample >> 3) + sample;
853 }
854
855 tp->rcv_rtt_est.rtt_us = new_sample;
856 }
857
tcp_rcv_rtt_measure(struct tcp_sock * tp)858 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
859 {
860 u32 delta_us;
861
862 if (tp->rcv_rtt_est.time == 0)
863 goto new_measure;
864 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
865 return;
866 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
867 if (!delta_us)
868 delta_us = 1;
869 tcp_rcv_rtt_update(tp, delta_us, 1);
870
871 new_measure:
872 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
873 tp->rcv_rtt_est.time = tp->tcp_mstamp;
874 }
875
tcp_rtt_tsopt_us(const struct tcp_sock * tp,u32 min_delta)876 static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp, u32 min_delta)
877 {
878 u32 delta, delta_us;
879
880 delta = tcp_time_stamp_ts(tp) - tp->rx_opt.rcv_tsecr;
881 if (tp->tcp_usec_ts)
882 return delta;
883
884 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
885 if (!delta)
886 delta = min_delta;
887 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
888 return delta_us;
889 }
890 return -1;
891 }
892
tcp_rcv_rtt_measure_ts(struct sock * sk,const struct sk_buff * skb)893 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
894 const struct sk_buff *skb)
895 {
896 struct tcp_sock *tp = tcp_sk(sk);
897
898 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
899 return;
900 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
901
902 if (TCP_SKB_CB(skb)->end_seq -
903 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
904 s32 delta = tcp_rtt_tsopt_us(tp, 0);
905
906 if (delta > 0)
907 tcp_rcv_rtt_update(tp, delta, 0);
908 }
909 }
910
tcp_rcvbuf_grow(struct sock * sk,u32 newval)911 void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
912 {
913 const struct net *net = sock_net(sk);
914 struct tcp_sock *tp = tcp_sk(sk);
915 u32 rcvwin, rcvbuf, cap, oldval;
916 u32 rtt_threshold, rtt_us;
917 u64 grow;
918
919 oldval = tp->rcvq_space.space;
920 tp->rcvq_space.space = newval;
921
922 if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
923 (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
924 return;
925
926 /* DRS is always one RTT late. */
927 rcvwin = newval << 1;
928
929 rtt_us = tp->rcv_rtt_est.rtt_us >> 3;
930 rtt_threshold = READ_ONCE(net->ipv4.sysctl_tcp_rcvbuf_low_rtt);
931 if (rtt_us < rtt_threshold) {
932 /* For small RTT, we set @grow to rcvwin * rtt_us/rtt_threshold.
933 * It might take few additional ms to reach 'line rate',
934 * but will avoid sk_rcvbuf inflation and poor cache use.
935 */
936 grow = div_u64((u64)rcvwin * rtt_us, rtt_threshold);
937 } else {
938 /* slow start: allow the sender to double its rate. */
939 grow = div_u64(((u64)rcvwin << 1) * (newval - oldval), oldval);
940 }
941 rcvwin += grow;
942
943 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
944 rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
945
946 cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
947
948 rcvbuf = min_t(u32, tcp_space_from_win(sk, rcvwin), cap);
949 if (rcvbuf > sk->sk_rcvbuf) {
950 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
951 /* Make the window clamp follow along. */
952 WRITE_ONCE(tp->window_clamp,
953 tcp_win_from_space(sk, rcvbuf));
954 }
955 }
956 /*
957 * This function should be called every time data is copied to user space.
958 * It calculates the appropriate TCP receive buffer space.
959 */
tcp_rcv_space_adjust(struct sock * sk)960 void tcp_rcv_space_adjust(struct sock *sk)
961 {
962 struct tcp_sock *tp = tcp_sk(sk);
963 int time, inq, copied;
964
965 trace_tcp_rcv_space_adjust(sk);
966
967 if (unlikely(!tp->rcv_rtt_est.rtt_us))
968 return;
969
970 /* We do not refresh tp->tcp_mstamp here.
971 * Some platforms have expensive ktime_get() implementations.
972 * Using the last cached value is enough for DRS.
973 */
974 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
975 if (time < (tp->rcv_rtt_est.rtt_us >> 3))
976 return;
977
978 /* Number of bytes copied to user in last RTT */
979 copied = tp->copied_seq - tp->rcvq_space.seq;
980 /* Number of bytes in receive queue. */
981 inq = tp->rcv_nxt - tp->copied_seq;
982 copied -= inq;
983 if (copied <= tp->rcvq_space.space)
984 goto new_measure;
985
986 trace_tcp_rcvbuf_grow(sk, time);
987
988 tcp_rcvbuf_grow(sk, copied);
989
990 new_measure:
991 tp->rcvq_space.seq = tp->copied_seq;
992 tp->rcvq_space.time = tp->tcp_mstamp;
993 }
994
tcp_save_lrcv_flowlabel(struct sock * sk,const struct sk_buff * skb)995 static void tcp_save_lrcv_flowlabel(struct sock *sk, const struct sk_buff *skb)
996 {
997 #if IS_ENABLED(CONFIG_IPV6)
998 struct inet_connection_sock *icsk = inet_csk(sk);
999
1000 if (skb->protocol == htons(ETH_P_IPV6))
1001 icsk->icsk_ack.lrcv_flowlabel = ntohl(ip6_flowlabel(ipv6_hdr(skb)));
1002 #endif
1003 }
1004
1005 /* There is something which you must keep in mind when you analyze the
1006 * behavior of the tp->ato delayed ack timeout interval. When a
1007 * connection starts up, we want to ack as quickly as possible. The
1008 * problem is that "good" TCP's do slow start at the beginning of data
1009 * transmission. The means that until we send the first few ACK's the
1010 * sender will sit on his end and only queue most of his data, because
1011 * he can only send snd_cwnd unacked packets at any given time. For
1012 * each ACK we send, he increments snd_cwnd and transmits more of his
1013 * queue. -DaveM
1014 */
tcp_event_data_recv(struct sock * sk,struct sk_buff * skb)1015 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
1016 {
1017 struct tcp_sock *tp = tcp_sk(sk);
1018 struct inet_connection_sock *icsk = inet_csk(sk);
1019 u32 now;
1020
1021 inet_csk_schedule_ack(sk);
1022
1023 tcp_measure_rcv_mss(sk, skb);
1024
1025 tcp_rcv_rtt_measure(tp);
1026
1027 now = tcp_jiffies32;
1028
1029 if (!icsk->icsk_ack.ato) {
1030 /* The _first_ data packet received, initialize
1031 * delayed ACK engine.
1032 */
1033 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
1034 icsk->icsk_ack.ato = TCP_ATO_MIN;
1035 } else {
1036 int m = now - icsk->icsk_ack.lrcvtime;
1037
1038 if (m <= TCP_ATO_MIN / 2) {
1039 /* The fastest case is the first. */
1040 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
1041 } else if (m < icsk->icsk_ack.ato) {
1042 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
1043 if (icsk->icsk_ack.ato > icsk->icsk_rto)
1044 icsk->icsk_ack.ato = icsk->icsk_rto;
1045 } else if (m > icsk->icsk_rto) {
1046 /* Too long gap. Apparently sender failed to
1047 * restart window, so that we send ACKs quickly.
1048 */
1049 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
1050 }
1051 }
1052 icsk->icsk_ack.lrcvtime = now;
1053 tcp_save_lrcv_flowlabel(sk, skb);
1054
1055 tcp_data_ecn_check(sk, skb);
1056
1057 if (skb->len >= 128)
1058 tcp_grow_window(sk, skb, true);
1059 }
1060
1061 /* Called to compute a smoothed rtt estimate. The data fed to this
1062 * routine either comes from timestamps, or from segments that were
1063 * known _not_ to have been retransmitted [see Karn/Partridge
1064 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
1065 * piece by Van Jacobson.
1066 * NOTE: the next three routines used to be one big routine.
1067 * To save cycles in the RFC 1323 implementation it was better to break
1068 * it up into three procedures. -- erics
1069 */
tcp_rtt_estimator(struct sock * sk,long mrtt_us)1070 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
1071 {
1072 struct tcp_sock *tp = tcp_sk(sk);
1073 long m = mrtt_us; /* RTT */
1074 u32 srtt = tp->srtt_us;
1075
1076 /* The following amusing code comes from Jacobson's
1077 * article in SIGCOMM '88. Note that rtt and mdev
1078 * are scaled versions of rtt and mean deviation.
1079 * This is designed to be as fast as possible
1080 * m stands for "measurement".
1081 *
1082 * On a 1990 paper the rto value is changed to:
1083 * RTO = rtt + 4 * mdev
1084 *
1085 * Funny. This algorithm seems to be very broken.
1086 * These formulae increase RTO, when it should be decreased, increase
1087 * too slowly, when it should be increased quickly, decrease too quickly
1088 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
1089 * does not matter how to _calculate_ it. Seems, it was trap
1090 * that VJ failed to avoid. 8)
1091 */
1092 if (srtt != 0) {
1093 m -= (srtt >> 3); /* m is now error in rtt est */
1094 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
1095 if (m < 0) {
1096 m = -m; /* m is now abs(error) */
1097 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1098 /* This is similar to one of Eifel findings.
1099 * Eifel blocks mdev updates when rtt decreases.
1100 * This solution is a bit different: we use finer gain
1101 * for mdev in this case (alpha*beta).
1102 * Like Eifel it also prevents growth of rto,
1103 * but also it limits too fast rto decreases,
1104 * happening in pure Eifel.
1105 */
1106 if (m > 0)
1107 m >>= 3;
1108 } else {
1109 m -= (tp->mdev_us >> 2); /* similar update on mdev */
1110 }
1111 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
1112 if (tp->mdev_us > tp->mdev_max_us) {
1113 tp->mdev_max_us = tp->mdev_us;
1114 if (tp->mdev_max_us > tp->rttvar_us)
1115 tp->rttvar_us = tp->mdev_max_us;
1116 }
1117 if (after(tp->snd_una, tp->rtt_seq)) {
1118 if (tp->mdev_max_us < tp->rttvar_us)
1119 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
1120 tp->rtt_seq = tp->snd_nxt;
1121 tp->mdev_max_us = tcp_rto_min_us(sk);
1122
1123 tcp_bpf_rtt(sk, mrtt_us, srtt);
1124 }
1125 } else {
1126 /* no previous measure. */
1127 srtt = m << 3; /* take the measured time to be rtt */
1128 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
1129 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
1130 tp->mdev_max_us = tp->rttvar_us;
1131 tp->rtt_seq = tp->snd_nxt;
1132
1133 tcp_bpf_rtt(sk, mrtt_us, srtt);
1134 }
1135 tp->srtt_us = max(1U, srtt);
1136 }
1137
tcp_update_pacing_rate(struct sock * sk)1138 void tcp_update_pacing_rate(struct sock *sk)
1139 {
1140 const struct tcp_sock *tp = tcp_sk(sk);
1141 u64 rate;
1142
1143 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
1144 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
1145
1146 /* current rate is (cwnd * mss) / srtt
1147 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
1148 * In Congestion Avoidance phase, set it to 120 % the current rate.
1149 *
1150 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
1151 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
1152 * end of slow start and should slow down.
1153 */
1154 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
1155 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
1156 else
1157 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
1158
1159 rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
1160
1161 if (likely(tp->srtt_us))
1162 do_div(rate, tp->srtt_us);
1163
1164 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
1165 * without any lock. We want to make sure compiler wont store
1166 * intermediate values in this location.
1167 */
1168 WRITE_ONCE(sk->sk_pacing_rate,
1169 min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate)));
1170 }
1171
1172 /* Calculate rto without backoff. This is the second half of Van Jacobson's
1173 * routine referred to above.
1174 */
tcp_set_rto(struct sock * sk)1175 void tcp_set_rto(struct sock *sk)
1176 {
1177 const struct tcp_sock *tp = tcp_sk(sk);
1178 /* Old crap is replaced with new one. 8)
1179 *
1180 * More seriously:
1181 * 1. If rtt variance happened to be less 50msec, it is hallucination.
1182 * It cannot be less due to utterly erratic ACK generation made
1183 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
1184 * to do with delayed acks, because at cwnd>2 true delack timeout
1185 * is invisible. Actually, Linux-2.4 also generates erratic
1186 * ACKs in some circumstances.
1187 */
1188 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
1189
1190 /* 2. Fixups made earlier cannot be right.
1191 * If we do not estimate RTO correctly without them,
1192 * all the algo is pure shit and should be replaced
1193 * with correct one. It is exactly, which we pretend to do.
1194 */
1195
1196 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
1197 * guarantees that rto is higher.
1198 */
1199 tcp_bound_rto(sk);
1200 }
1201
tcp_init_cwnd(const struct tcp_sock * tp,const struct dst_entry * dst)1202 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
1203 {
1204 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
1205
1206 if (!cwnd)
1207 cwnd = TCP_INIT_CWND;
1208 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
1209 }
1210
1211 struct tcp_sacktag_state {
1212 /* Timestamps for earliest and latest never-retransmitted segment
1213 * that was SACKed. RTO needs the earliest RTT to stay conservative,
1214 * but congestion control should still get an accurate delay signal.
1215 */
1216 u64 first_sackt;
1217 u64 last_sackt;
1218 u32 reord;
1219 u32 sack_delivered;
1220 u32 delivered_bytes;
1221 int flag;
1222 unsigned int mss_now;
1223 struct rate_sample *rate;
1224 };
1225
1226 /* Take a notice that peer is sending D-SACKs. Skip update of data delivery
1227 * and spurious retransmission information if this DSACK is unlikely caused by
1228 * sender's action:
1229 * - DSACKed sequence range is larger than maximum receiver's window.
1230 * - Total no. of DSACKed segments exceed the total no. of retransmitted segs.
1231 */
tcp_dsack_seen(struct tcp_sock * tp,u32 start_seq,u32 end_seq,struct tcp_sacktag_state * state)1232 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq,
1233 u32 end_seq, struct tcp_sacktag_state *state)
1234 {
1235 u32 seq_len, dup_segs = 1;
1236
1237 if (!before(start_seq, end_seq))
1238 return 0;
1239
1240 seq_len = end_seq - start_seq;
1241 /* Dubious DSACK: DSACKed range greater than maximum advertised rwnd */
1242 if (seq_len > tp->max_window)
1243 return 0;
1244 if (seq_len > tp->mss_cache)
1245 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache);
1246 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq)
1247 state->flag |= FLAG_DSACK_TLP;
1248
1249 tp->dsack_dups += dup_segs;
1250 /* Skip the DSACK if dup segs weren't retransmitted by sender */
1251 if (tp->dsack_dups > tp->total_retrans)
1252 return 0;
1253
1254 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
1255 /* We increase the RACK ordering window in rounds where we receive
1256 * DSACKs that may have been due to reordering causing RACK to trigger
1257 * a spurious fast recovery. Thus RACK ignores DSACKs that happen
1258 * without having seen reordering, or that match TLP probes (TLP
1259 * is timer-driven, not triggered by RACK).
1260 */
1261 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP))
1262 tp->rack.dsack_seen = 1;
1263
1264 state->flag |= FLAG_DSACKING_ACK;
1265 /* A spurious retransmission is delivered */
1266 state->sack_delivered += dup_segs;
1267
1268 return dup_segs;
1269 }
1270
1271 /* It's reordering when higher sequence was delivered (i.e. sacked) before
1272 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering
1273 * distance is approximated in full-mss packet distance ("reordering").
1274 */
tcp_check_sack_reordering(struct sock * sk,const u32 low_seq,const int ts)1275 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
1276 const int ts)
1277 {
1278 struct tcp_sock *tp = tcp_sk(sk);
1279 const u32 mss = tp->mss_cache;
1280 u32 fack, metric;
1281
1282 fack = tcp_highest_sack_seq(tp);
1283 if (!before(low_seq, fack))
1284 return;
1285
1286 metric = fack - low_seq;
1287 if ((metric > tp->reordering * mss) && mss) {
1288 #if FASTRETRANS_DEBUG > 1
1289 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
1290 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
1291 tp->reordering,
1292 0,
1293 tp->sacked_out,
1294 tp->undo_marker ? tp->undo_retrans : 0);
1295 #endif
1296 tp->reordering = min_t(u32, (metric + mss - 1) / mss,
1297 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
1298 }
1299
1300 /* This exciting event is worth to be remembered. 8) */
1301 tp->reord_seen++;
1302 NET_INC_STATS(sock_net(sk),
1303 ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
1304 }
1305
1306 /* This must be called before lost_out or retrans_out are updated
1307 * on a new loss, because we want to know if all skbs previously
1308 * known to be lost have already been retransmitted, indicating
1309 * that this newly lost skb is our next skb to retransmit.
1310 */
tcp_verify_retransmit_hint(struct tcp_sock * tp,struct sk_buff * skb)1311 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
1312 {
1313 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
1314 (tp->retransmit_skb_hint &&
1315 before(TCP_SKB_CB(skb)->seq,
1316 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
1317 tp->retransmit_skb_hint = skb;
1318 }
1319
1320 /* Sum the number of packets on the wire we have marked as lost, and
1321 * notify the congestion control module that the given skb was marked lost.
1322 */
tcp_notify_skb_loss_event(struct tcp_sock * tp,const struct sk_buff * skb)1323 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
1324 {
1325 tp->lost += tcp_skb_pcount(skb);
1326 }
1327
tcp_mark_skb_lost(struct sock * sk,struct sk_buff * skb)1328 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
1329 {
1330 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1331 struct tcp_sock *tp = tcp_sk(sk);
1332
1333 if (sacked & TCPCB_SACKED_ACKED)
1334 return;
1335
1336 tcp_verify_retransmit_hint(tp, skb);
1337 if (sacked & TCPCB_LOST) {
1338 if (sacked & TCPCB_SACKED_RETRANS) {
1339 /* Account for retransmits that are lost again */
1340 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1341 tp->retrans_out -= tcp_skb_pcount(skb);
1342 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
1343 tcp_skb_pcount(skb));
1344 tcp_notify_skb_loss_event(tp, skb);
1345 }
1346 } else {
1347 tp->lost_out += tcp_skb_pcount(skb);
1348 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1349 tcp_notify_skb_loss_event(tp, skb);
1350 }
1351 }
1352
1353 /* This procedure tags the retransmission queue when SACKs arrive.
1354 *
1355 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
1356 * Packets in queue with these bits set are counted in variables
1357 * sacked_out, retrans_out and lost_out, correspondingly.
1358 *
1359 * Valid combinations are:
1360 * Tag InFlight Description
1361 * 0 1 - orig segment is in flight.
1362 * S 0 - nothing flies, orig reached receiver.
1363 * L 0 - nothing flies, orig lost by net.
1364 * R 2 - both orig and retransmit are in flight.
1365 * L|R 1 - orig is lost, retransmit is in flight.
1366 * S|R 1 - orig reached receiver, retrans is still in flight.
1367 * (L|S|R is logically valid, it could occur when L|R is sacked,
1368 * but it is equivalent to plain S and code short-circuits it to S.
1369 * L|S is logically invalid, it would mean -1 packet in flight 8))
1370 *
1371 * These 6 states form finite state machine, controlled by the following events:
1372 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1373 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1374 * 3. Loss detection event of two flavors:
1375 * A. Scoreboard estimator decided the packet is lost.
1376 * A'. Reno "three dupacks" marks head of queue lost.
1377 * B. SACK arrives sacking SND.NXT at the moment, when the
1378 * segment was retransmitted.
1379 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1380 *
1381 * It is pleasant to note, that state diagram turns out to be commutative,
1382 * so that we are allowed not to be bothered by order of our actions,
1383 * when multiple events arrive simultaneously. (see the function below).
1384 *
1385 * Reordering detection.
1386 * --------------------
1387 * Reordering metric is maximal distance, which a packet can be displaced
1388 * in packet stream. With SACKs we can estimate it:
1389 *
1390 * 1. SACK fills old hole and the corresponding segment was not
1391 * ever retransmitted -> reordering. Alas, we cannot use it
1392 * when segment was retransmitted.
1393 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1394 * for retransmitted and already SACKed segment -> reordering..
1395 * Both of these heuristics are not used in Loss state, when we cannot
1396 * account for retransmits accurately.
1397 *
1398 * SACK block validation.
1399 * ----------------------
1400 *
1401 * SACK block range validation checks that the received SACK block fits to
1402 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1403 * Note that SND.UNA is not included to the range though being valid because
1404 * it means that the receiver is rather inconsistent with itself reporting
1405 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1406 * perfectly valid, however, in light of RFC2018 which explicitly states
1407 * that "SACK block MUST reflect the newest segment. Even if the newest
1408 * segment is going to be discarded ...", not that it looks very clever
1409 * in case of head skb. Due to potentional receiver driven attacks, we
1410 * choose to avoid immediate execution of a walk in write queue due to
1411 * reneging and defer head skb's loss recovery to standard loss recovery
1412 * procedure that will eventually trigger (nothing forbids us doing this).
1413 *
1414 * Implements also blockage to start_seq wrap-around. Problem lies in the
1415 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1416 * there's no guarantee that it will be before snd_nxt (n). The problem
1417 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1418 * wrap (s_w):
1419 *
1420 * <- outs wnd -> <- wrapzone ->
1421 * u e n u_w e_w s n_w
1422 * | | | | | | |
1423 * |<------------+------+----- TCP seqno space --------------+---------->|
1424 * ...-- <2^31 ->| |<--------...
1425 * ...---- >2^31 ------>| |<--------...
1426 *
1427 * Current code wouldn't be vulnerable but it's better still to discard such
1428 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1429 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1430 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1431 * equal to the ideal case (infinite seqno space without wrap caused issues).
1432 *
1433 * With D-SACK the lower bound is extended to cover sequence space below
1434 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1435 * again, D-SACK block must not to go across snd_una (for the same reason as
1436 * for the normal SACK blocks, explained above). But there all simplicity
1437 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1438 * fully below undo_marker they do not affect behavior in anyway and can
1439 * therefore be safely ignored. In rare cases (which are more or less
1440 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1441 * fragmentation and packet reordering past skb's retransmission. To consider
1442 * them correctly, the acceptable range must be extended even more though
1443 * the exact amount is rather hard to quantify. However, tp->max_window can
1444 * be used as an exaggerated estimate.
1445 */
tcp_is_sackblock_valid(struct tcp_sock * tp,bool is_dsack,u32 start_seq,u32 end_seq)1446 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1447 u32 start_seq, u32 end_seq)
1448 {
1449 /* Too far in future, or reversed (interpretation is ambiguous) */
1450 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1451 return false;
1452
1453 /* Nasty start_seq wrap-around check (see comments above) */
1454 if (!before(start_seq, tp->snd_nxt))
1455 return false;
1456
1457 /* In outstanding window? ...This is valid exit for D-SACKs too.
1458 * start_seq == snd_una is non-sensical (see comments above)
1459 */
1460 if (after(start_seq, tp->snd_una))
1461 return true;
1462
1463 if (!is_dsack || !tp->undo_marker)
1464 return false;
1465
1466 /* ...Then it's D-SACK, and must reside below snd_una completely */
1467 if (after(end_seq, tp->snd_una))
1468 return false;
1469
1470 if (!before(start_seq, tp->undo_marker))
1471 return true;
1472
1473 /* Too old */
1474 if (!after(end_seq, tp->undo_marker))
1475 return false;
1476
1477 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1478 * start_seq < undo_marker and end_seq >= undo_marker.
1479 */
1480 return !before(start_seq, end_seq - tp->max_window);
1481 }
1482
tcp_check_dsack(struct sock * sk,const struct sk_buff * ack_skb,struct tcp_sack_block_wire * sp,int num_sacks,u32 prior_snd_una,struct tcp_sacktag_state * state)1483 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1484 struct tcp_sack_block_wire *sp, int num_sacks,
1485 u32 prior_snd_una, struct tcp_sacktag_state *state)
1486 {
1487 struct tcp_sock *tp = tcp_sk(sk);
1488 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1489 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1490 u32 dup_segs;
1491
1492 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1493 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1494 } else if (num_sacks > 1) {
1495 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1496 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1497
1498 if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1))
1499 return false;
1500 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV);
1501 } else {
1502 return false;
1503 }
1504
1505 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state);
1506 if (!dup_segs) { /* Skip dubious DSACK */
1507 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS);
1508 return false;
1509 }
1510
1511 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs);
1512
1513 /* D-SACK for already forgotten data... Do dumb counting. */
1514 if (tp->undo_marker && tp->undo_retrans > 0 &&
1515 !after(end_seq_0, prior_snd_una) &&
1516 after(end_seq_0, tp->undo_marker))
1517 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs);
1518
1519 return true;
1520 }
1521
1522 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
1523 * the incoming SACK may not exactly match but we can find smaller MSS
1524 * aligned portion of it that matches. Therefore we might need to fragment
1525 * which may fail and creates some hassle (caller must handle error case
1526 * returns).
1527 *
1528 * FIXME: this could be merged to shift decision code
1529 */
tcp_match_skb_to_sack(struct sock * sk,struct sk_buff * skb,u32 start_seq,u32 end_seq)1530 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1531 u32 start_seq, u32 end_seq)
1532 {
1533 int err;
1534 bool in_sack;
1535 unsigned int pkt_len;
1536 unsigned int mss;
1537
1538 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1539 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1540
1541 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1542 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1543 mss = tcp_skb_mss(skb);
1544 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1545
1546 if (!in_sack) {
1547 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1548 if (pkt_len < mss)
1549 pkt_len = mss;
1550 } else {
1551 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1552 if (pkt_len < mss)
1553 return -EINVAL;
1554 }
1555
1556 /* Round if necessary so that SACKs cover only full MSSes
1557 * and/or the remaining small portion (if present)
1558 */
1559 if (pkt_len > mss) {
1560 unsigned int new_len = (pkt_len / mss) * mss;
1561 if (!in_sack && new_len < pkt_len)
1562 new_len += mss;
1563 pkt_len = new_len;
1564 }
1565
1566 if (pkt_len >= skb->len && !in_sack)
1567 return 0;
1568
1569 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
1570 pkt_len, mss, GFP_ATOMIC);
1571 if (err < 0)
1572 return err;
1573 }
1574
1575 return in_sack;
1576 }
1577
1578 /* Record the most recently (re)sent time among the (s)acked packets
1579 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
1580 * draft-cheng-tcpm-rack-00.txt
1581 */
tcp_rack_advance(struct tcp_sock * tp,u8 sacked,u32 end_seq,u64 xmit_time)1582 static void tcp_rack_advance(struct tcp_sock *tp, u8 sacked,
1583 u32 end_seq, u64 xmit_time)
1584 {
1585 u32 rtt_us;
1586
1587 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
1588 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
1589 /* If the sacked packet was retransmitted, it's ambiguous
1590 * whether the retransmission or the original (or the prior
1591 * retransmission) was sacked.
1592 *
1593 * If the original is lost, there is no ambiguity. Otherwise
1594 * we assume the original can be delayed up to aRTT + min_rtt.
1595 * the aRTT term is bounded by the fast recovery or timeout,
1596 * so it's at least one RTT (i.e., retransmission is at least
1597 * an RTT later).
1598 */
1599 return;
1600 }
1601 tp->rack.advanced = 1;
1602 tp->rack.rtt_us = rtt_us;
1603 if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
1604 end_seq, tp->rack.end_seq)) {
1605 tp->rack.mstamp = xmit_time;
1606 tp->rack.end_seq = end_seq;
1607 }
1608 }
1609
1610 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */
tcp_sacktag_one(struct sock * sk,struct tcp_sacktag_state * state,u8 sacked,u32 start_seq,u32 end_seq,int dup_sack,int pcount,u32 plen,u64 xmit_time)1611 static u8 tcp_sacktag_one(struct sock *sk,
1612 struct tcp_sacktag_state *state, u8 sacked,
1613 u32 start_seq, u32 end_seq,
1614 int dup_sack, int pcount, u32 plen,
1615 u64 xmit_time)
1616 {
1617 struct tcp_sock *tp = tcp_sk(sk);
1618
1619 /* Account D-SACK for retransmitted packet. */
1620 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1621 if (tp->undo_marker && tp->undo_retrans > 0 &&
1622 after(end_seq, tp->undo_marker))
1623 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
1624 if ((sacked & TCPCB_SACKED_ACKED) &&
1625 before(start_seq, state->reord))
1626 state->reord = start_seq;
1627 }
1628
1629 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1630 if (!after(end_seq, tp->snd_una))
1631 return sacked;
1632
1633 if (!(sacked & TCPCB_SACKED_ACKED)) {
1634 tcp_rack_advance(tp, sacked, end_seq, xmit_time);
1635
1636 if (sacked & TCPCB_SACKED_RETRANS) {
1637 /* If the segment is not tagged as lost,
1638 * we do not clear RETRANS, believing
1639 * that retransmission is still in flight.
1640 */
1641 if (sacked & TCPCB_LOST) {
1642 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1643 tp->lost_out -= pcount;
1644 tp->retrans_out -= pcount;
1645 }
1646 } else {
1647 if (!(sacked & TCPCB_RETRANS)) {
1648 /* New sack for not retransmitted frame,
1649 * which was in hole. It is reordering.
1650 */
1651 if (before(start_seq,
1652 tcp_highest_sack_seq(tp)) &&
1653 before(start_seq, state->reord))
1654 state->reord = start_seq;
1655
1656 if (!after(end_seq, tp->high_seq))
1657 state->flag |= FLAG_ORIG_SACK_ACKED;
1658 if (state->first_sackt == 0)
1659 state->first_sackt = xmit_time;
1660 state->last_sackt = xmit_time;
1661 }
1662
1663 if (sacked & TCPCB_LOST) {
1664 sacked &= ~TCPCB_LOST;
1665 tp->lost_out -= pcount;
1666 }
1667 }
1668
1669 sacked |= TCPCB_SACKED_ACKED;
1670 state->flag |= FLAG_DATA_SACKED;
1671 tp->sacked_out += pcount;
1672 /* Out-of-order packets delivered */
1673 state->sack_delivered += pcount;
1674 state->delivered_bytes += plen;
1675 }
1676
1677 /* D-SACK. We can detect redundant retransmission in S|R and plain R
1678 * frames and clear it. undo_retrans is decreased above, L|R frames
1679 * are accounted above as well.
1680 */
1681 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1682 sacked &= ~TCPCB_SACKED_RETRANS;
1683 tp->retrans_out -= pcount;
1684 }
1685
1686 return sacked;
1687 }
1688
1689 /* The bandwidth estimator estimates the rate at which the network
1690 * can currently deliver outbound data packets for this flow. At a high
1691 * level, it operates by taking a delivery rate sample for each ACK.
1692 *
1693 * A rate sample records the rate at which the network delivered packets
1694 * for this flow, calculated over the time interval between the transmission
1695 * of a data packet and the acknowledgment of that packet.
1696 *
1697 * Specifically, over the interval between each transmit and corresponding ACK,
1698 * the estimator generates a delivery rate sample. Typically it uses the rate
1699 * at which packets were acknowledged. However, the approach of using only the
1700 * acknowledgment rate faces a challenge under the prevalent ACK decimation or
1701 * compression: packets can temporarily appear to be delivered much quicker
1702 * than the bottleneck rate. Since it is physically impossible to do that in a
1703 * sustained fashion, when the estimator notices that the ACK rate is faster
1704 * than the transmit rate, it uses the latter:
1705 *
1706 * send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
1707 * ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
1708 * bw = min(send_rate, ack_rate)
1709 *
1710 * Notice the estimator essentially estimates the goodput, not always the
1711 * network bottleneck link rate when the sending or receiving is limited by
1712 * other factors like applications or receiver window limits. The estimator
1713 * deliberately avoids using the inter-packet spacing approach because that
1714 * approach requires a large number of samples and sophisticated filtering.
1715 *
1716 * TCP flows can often be application-limited in request/response workloads.
1717 * The estimator marks a bandwidth sample as application-limited if there
1718 * was some moment during the sampled window of packets when there was no data
1719 * ready to send in the write queue.
1720 */
1721
1722 /* Update the connection delivery information and generate a rate sample. */
tcp_rate_gen(struct sock * sk,u32 delivered,u32 lost,bool is_sack_reneg,struct rate_sample * rs)1723 static void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1724 bool is_sack_reneg, struct rate_sample *rs)
1725 {
1726 struct tcp_sock *tp = tcp_sk(sk);
1727 u32 snd_us, ack_us;
1728
1729 /* Clear app limited if bubble is acked and gone. */
1730 if (tp->app_limited && after(tp->delivered, tp->app_limited))
1731 tp->app_limited = 0;
1732
1733 /* TODO: there are multiple places throughout tcp_ack() to get
1734 * current time. Refactor the code using a new "tcp_acktag_state"
1735 * to carry current time, flags, stats like "tcp_sacktag_state".
1736 */
1737 if (delivered)
1738 tp->delivered_mstamp = tp->tcp_mstamp;
1739
1740 rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
1741 rs->losses = lost; /* freshly marked lost */
1742 /* Return an invalid sample if no timing information is available or
1743 * in recovery from loss with SACK reneging. Rate samples taken during
1744 * a SACK reneging event may overestimate bw by including packets that
1745 * were SACKed before the reneg.
1746 */
1747 if (!rs->prior_mstamp || is_sack_reneg) {
1748 rs->delivered = -1;
1749 rs->interval_us = -1;
1750 return;
1751 }
1752 rs->delivered = tp->delivered - rs->prior_delivered;
1753
1754 rs->delivered_ce = tp->delivered_ce - rs->prior_delivered_ce;
1755 /* delivered_ce occupies less than 32 bits in the skb control block */
1756 rs->delivered_ce &= TCPCB_DELIVERED_CE_MASK;
1757
1758 /* Model sending data and receiving ACKs as separate pipeline phases
1759 * for a window. Usually the ACK phase is longer, but with ACK
1760 * compression the send phase can be longer. To be safe we use the
1761 * longer phase.
1762 */
1763 snd_us = rs->interval_us; /* send phase */
1764 ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
1765 rs->prior_mstamp); /* ack phase */
1766 rs->interval_us = max(snd_us, ack_us);
1767
1768 /* Record both segment send and ack receive intervals */
1769 rs->snd_interval_us = snd_us;
1770 rs->rcv_interval_us = ack_us;
1771
1772 /* Normally we expect interval_us >= min-rtt.
1773 * Note that rate may still be over-estimated when a spuriously
1774 * retransmistted skb was first (s)acked because "interval_us"
1775 * is under-estimated (up to an RTT). However continuously
1776 * measuring the delivery rate during loss recovery is crucial
1777 * for connections suffer heavy or prolonged losses.
1778 */
1779 if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
1780 if (!rs->is_retrans)
1781 pr_debug("tcp rate: %ld %d %u %u %u\n",
1782 rs->interval_us, rs->delivered,
1783 inet_csk(sk)->icsk_ca_state,
1784 tp->rx_opt.sack_ok, tcp_min_rtt(tp));
1785 rs->interval_us = -1;
1786 return;
1787 }
1788
1789 /* Record the last non-app-limited or the highest app-limited bw */
1790 if (!rs->is_app_limited ||
1791 ((u64)rs->delivered * tp->rate_interval_us >=
1792 (u64)tp->rate_delivered * rs->interval_us)) {
1793 tp->rate_delivered = rs->delivered;
1794 tp->rate_interval_us = rs->interval_us;
1795 tp->rate_app_limited = rs->is_app_limited;
1796 }
1797 }
1798
1799 /* When an skb is sacked or acked, we fill in the rate sample with the (prior)
1800 * delivery information when the skb was last transmitted.
1801 *
1802 * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
1803 * called multiple times. We favor the information from the most recently
1804 * sent skb, i.e., the skb with the most recently sent time and the highest
1805 * sequence.
1806 */
tcp_rate_skb_delivered(struct sock * sk,struct sk_buff * skb,struct rate_sample * rs)1807 static void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1808 struct rate_sample *rs)
1809 {
1810 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
1811 struct tcp_sock *tp = tcp_sk(sk);
1812 u64 tx_tstamp;
1813
1814 if (!scb->tx.delivered_mstamp)
1815 return;
1816
1817 tx_tstamp = tcp_skb_timestamp_us(skb);
1818 if (!rs->prior_delivered ||
1819 tcp_skb_sent_after(tx_tstamp, tp->first_tx_mstamp,
1820 scb->end_seq, rs->last_end_seq)) {
1821 rs->prior_delivered_ce = scb->tx.delivered_ce;
1822 rs->prior_delivered = scb->tx.delivered;
1823 rs->prior_mstamp = scb->tx.delivered_mstamp;
1824 rs->is_app_limited = scb->tx.is_app_limited;
1825 rs->is_retrans = scb->sacked & TCPCB_RETRANS;
1826 rs->last_end_seq = scb->end_seq;
1827
1828 /* Record send time of most recently ACKed packet: */
1829 tp->first_tx_mstamp = tx_tstamp;
1830 /* Find the duration of the "send phase" of this window: */
1831 rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
1832 scb->tx.first_tx_mstamp);
1833
1834 }
1835 /* Mark off the skb delivered once it's sacked to avoid being
1836 * used again when it's cumulatively acked. For acked packets
1837 * we don't need to reset since it'll be freed soon.
1838 */
1839 if (scb->sacked & TCPCB_SACKED_ACKED)
1840 scb->tx.delivered_mstamp = 0;
1841 }
1842
1843 /* Shift newly-SACKed bytes from this skb to the immediately previous
1844 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1845 */
tcp_shifted_skb(struct sock * sk,struct sk_buff * prev,struct sk_buff * skb,struct tcp_sacktag_state * state,unsigned int pcount,int shifted,int mss,bool dup_sack)1846 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1847 struct sk_buff *skb,
1848 struct tcp_sacktag_state *state,
1849 unsigned int pcount, int shifted, int mss,
1850 bool dup_sack)
1851 {
1852 struct tcp_sock *tp = tcp_sk(sk);
1853 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1854 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1855
1856 BUG_ON(!pcount);
1857
1858 /* Adjust counters and hints for the newly sacked sequence
1859 * range but discard the return value since prev is already
1860 * marked. We must tag the range first because the seq
1861 * advancement below implicitly advances
1862 * tcp_highest_sack_seq() when skb is highest_sack.
1863 */
1864 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1865 start_seq, end_seq, dup_sack, pcount, skb->len,
1866 tcp_skb_timestamp_us(skb));
1867 tcp_rate_skb_delivered(sk, skb, state->rate);
1868
1869 TCP_SKB_CB(prev)->end_seq += shifted;
1870 TCP_SKB_CB(skb)->seq += shifted;
1871
1872 tcp_skb_pcount_add(prev, pcount);
1873 WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
1874 tcp_skb_pcount_add(skb, -pcount);
1875
1876 /* When we're adding to gso_segs == 1, gso_size will be zero,
1877 * in theory this shouldn't be necessary but as long as DSACK
1878 * code can come after this skb later on it's better to keep
1879 * setting gso_size to something.
1880 */
1881 if (!TCP_SKB_CB(prev)->tcp_gso_size)
1882 TCP_SKB_CB(prev)->tcp_gso_size = mss;
1883
1884 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1885 if (tcp_skb_pcount(skb) <= 1)
1886 TCP_SKB_CB(skb)->tcp_gso_size = 0;
1887
1888 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1889 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1890
1891 if (skb->len > 0) {
1892 BUG_ON(!tcp_skb_pcount(skb));
1893 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1894 return false;
1895 }
1896
1897 /* Whole SKB was eaten :-) */
1898
1899 if (skb == tp->retransmit_skb_hint)
1900 tp->retransmit_skb_hint = prev;
1901
1902 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1903 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
1904 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1905 TCP_SKB_CB(prev)->end_seq++;
1906
1907 if (skb == tcp_highest_sack(sk))
1908 tcp_advance_highest_sack(sk, skb);
1909
1910 tcp_skb_collapse_tstamp(prev, skb);
1911 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
1912 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
1913
1914 tcp_rtx_queue_unlink_and_free(skb, sk);
1915
1916 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1917
1918 return true;
1919 }
1920
1921 /* I wish gso_size would have a bit more sane initialization than
1922 * something-or-zero which complicates things
1923 */
tcp_skb_seglen(const struct sk_buff * skb)1924 static int tcp_skb_seglen(const struct sk_buff *skb)
1925 {
1926 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1927 }
1928
1929 /* Shifting pages past head area doesn't work */
skb_can_shift(const struct sk_buff * skb)1930 static int skb_can_shift(const struct sk_buff *skb)
1931 {
1932 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1933 }
1934
tcp_skb_shift(struct sk_buff * to,struct sk_buff * from,int pcount,int shiftlen)1935 int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
1936 int pcount, int shiftlen)
1937 {
1938 /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
1939 * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
1940 * to make sure not storing more than 65535 * 8 bytes per skb,
1941 * even if current MSS is bigger.
1942 */
1943 if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
1944 return 0;
1945 if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
1946 return 0;
1947 return skb_shift(to, from, shiftlen);
1948 }
1949
1950 /* Try collapsing SACK blocks spanning across multiple skbs to a single
1951 * skb.
1952 */
tcp_shift_skb_data(struct sock * sk,struct sk_buff * skb,struct tcp_sacktag_state * state,u32 start_seq,u32 end_seq,bool dup_sack)1953 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1954 struct tcp_sacktag_state *state,
1955 u32 start_seq, u32 end_seq,
1956 bool dup_sack)
1957 {
1958 struct tcp_sock *tp = tcp_sk(sk);
1959 struct sk_buff *prev;
1960 int mss;
1961 int pcount = 0;
1962 int len;
1963 int in_sack;
1964
1965 /* Normally R but no L won't result in plain S */
1966 if (!dup_sack &&
1967 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
1968 goto fallback;
1969 if (!skb_can_shift(skb))
1970 goto fallback;
1971 /* This frame is about to be dropped (was ACKed). */
1972 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1973 goto fallback;
1974
1975 /* Can only happen with delayed DSACK + discard craziness */
1976 prev = skb_rb_prev(skb);
1977 if (!prev)
1978 goto fallback;
1979
1980 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1981 goto fallback;
1982
1983 if (!tcp_skb_can_collapse(prev, skb))
1984 goto fallback;
1985
1986 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1987 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1988
1989 if (in_sack) {
1990 len = skb->len;
1991 pcount = tcp_skb_pcount(skb);
1992 mss = tcp_skb_seglen(skb);
1993
1994 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1995 * drop this restriction as unnecessary
1996 */
1997 if (mss != tcp_skb_seglen(prev))
1998 goto fallback;
1999 } else {
2000 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
2001 goto noop;
2002 /* CHECKME: This is non-MSS split case only?, this will
2003 * cause skipped skbs due to advancing loop btw, original
2004 * has that feature too
2005 */
2006 if (tcp_skb_pcount(skb) <= 1)
2007 goto noop;
2008
2009 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
2010 if (!in_sack) {
2011 /* TODO: head merge to next could be attempted here
2012 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
2013 * though it might not be worth of the additional hassle
2014 *
2015 * ...we can probably just fallback to what was done
2016 * previously. We could try merging non-SACKed ones
2017 * as well but it probably isn't going to buy off
2018 * because later SACKs might again split them, and
2019 * it would make skb timestamp tracking considerably
2020 * harder problem.
2021 */
2022 goto fallback;
2023 }
2024
2025 len = end_seq - TCP_SKB_CB(skb)->seq;
2026 BUG_ON(len < 0);
2027 BUG_ON(len > skb->len);
2028
2029 /* MSS boundaries should be honoured or else pcount will
2030 * severely break even though it makes things bit trickier.
2031 * Optimize common case to avoid most of the divides
2032 */
2033 mss = tcp_skb_mss(skb);
2034
2035 /* TODO: Fix DSACKs to not fragment already SACKed and we can
2036 * drop this restriction as unnecessary
2037 */
2038 if (mss != tcp_skb_seglen(prev))
2039 goto fallback;
2040
2041 if (len == mss) {
2042 pcount = 1;
2043 } else if (len < mss) {
2044 goto noop;
2045 } else {
2046 pcount = len / mss;
2047 len = pcount * mss;
2048 }
2049 }
2050
2051 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
2052 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
2053 goto fallback;
2054
2055 if (!tcp_skb_shift(prev, skb, pcount, len))
2056 goto fallback;
2057 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
2058 goto out;
2059
2060 /* Hole filled allows collapsing with the next as well, this is very
2061 * useful when hole on every nth skb pattern happens
2062 */
2063 skb = skb_rb_next(prev);
2064 if (!skb)
2065 goto out;
2066
2067 if (!skb_can_shift(skb) ||
2068 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
2069 (mss != tcp_skb_seglen(skb)))
2070 goto out;
2071
2072 if (!tcp_skb_can_collapse(prev, skb))
2073 goto out;
2074 len = skb->len;
2075 pcount = tcp_skb_pcount(skb);
2076 if (tcp_skb_shift(prev, skb, pcount, len))
2077 tcp_shifted_skb(sk, prev, skb, state, pcount,
2078 len, mss, 0);
2079
2080 out:
2081 return prev;
2082
2083 noop:
2084 return skb;
2085
2086 fallback:
2087 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
2088 return NULL;
2089 }
2090
tcp_sacktag_walk(struct sk_buff * skb,struct sock * sk,struct tcp_sack_block * next_dup,struct tcp_sacktag_state * state,u32 start_seq,u32 end_seq,bool dup_sack_in)2091 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
2092 struct tcp_sack_block *next_dup,
2093 struct tcp_sacktag_state *state,
2094 u32 start_seq, u32 end_seq,
2095 bool dup_sack_in)
2096 {
2097 struct tcp_sock *tp = tcp_sk(sk);
2098 struct sk_buff *tmp;
2099
2100 skb_rbtree_walk_from(skb) {
2101 int in_sack = 0;
2102 bool dup_sack = dup_sack_in;
2103
2104 /* queue is in-order => we can short-circuit the walk early */
2105 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
2106 break;
2107
2108 if (next_dup &&
2109 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
2110 in_sack = tcp_match_skb_to_sack(sk, skb,
2111 next_dup->start_seq,
2112 next_dup->end_seq);
2113 if (in_sack > 0)
2114 dup_sack = true;
2115 }
2116
2117 /* skb reference here is a bit tricky to get right, since
2118 * shifting can eat and free both this skb and the next,
2119 * so not even _safe variant of the loop is enough.
2120 */
2121 if (in_sack <= 0) {
2122 tmp = tcp_shift_skb_data(sk, skb, state,
2123 start_seq, end_seq, dup_sack);
2124 if (tmp) {
2125 if (tmp != skb) {
2126 skb = tmp;
2127 continue;
2128 }
2129
2130 in_sack = 0;
2131 } else {
2132 in_sack = tcp_match_skb_to_sack(sk, skb,
2133 start_seq,
2134 end_seq);
2135 }
2136 }
2137
2138 if (unlikely(in_sack < 0))
2139 break;
2140
2141 if (in_sack) {
2142 TCP_SKB_CB(skb)->sacked =
2143 tcp_sacktag_one(sk,
2144 state,
2145 TCP_SKB_CB(skb)->sacked,
2146 TCP_SKB_CB(skb)->seq,
2147 TCP_SKB_CB(skb)->end_seq,
2148 dup_sack,
2149 tcp_skb_pcount(skb),
2150 skb->len,
2151 tcp_skb_timestamp_us(skb));
2152 tcp_rate_skb_delivered(sk, skb, state->rate);
2153 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2154 list_del_init(&skb->tcp_tsorted_anchor);
2155
2156 if (!before(TCP_SKB_CB(skb)->seq,
2157 tcp_highest_sack_seq(tp)))
2158 tcp_advance_highest_sack(sk, skb);
2159 }
2160 }
2161 return skb;
2162 }
2163
tcp_sacktag_bsearch(struct sock * sk,u32 seq)2164 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq)
2165 {
2166 struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
2167 struct sk_buff *skb;
2168
2169 while (*p) {
2170 parent = *p;
2171 skb = rb_to_skb(parent);
2172 if (before(seq, TCP_SKB_CB(skb)->seq)) {
2173 p = &parent->rb_left;
2174 continue;
2175 }
2176 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) {
2177 p = &parent->rb_right;
2178 continue;
2179 }
2180 return skb;
2181 }
2182 return NULL;
2183 }
2184
tcp_sacktag_skip(struct sk_buff * skb,struct sock * sk,u32 skip_to_seq)2185 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
2186 u32 skip_to_seq)
2187 {
2188 if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
2189 return skb;
2190
2191 return tcp_sacktag_bsearch(sk, skip_to_seq);
2192 }
2193
tcp_maybe_skipping_dsack(struct sk_buff * skb,struct sock * sk,struct tcp_sack_block * next_dup,struct tcp_sacktag_state * state,u32 skip_to_seq)2194 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
2195 struct sock *sk,
2196 struct tcp_sack_block *next_dup,
2197 struct tcp_sacktag_state *state,
2198 u32 skip_to_seq)
2199 {
2200 if (!next_dup)
2201 return skb;
2202
2203 if (before(next_dup->start_seq, skip_to_seq)) {
2204 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
2205 skb = tcp_sacktag_walk(skb, sk, NULL, state,
2206 next_dup->start_seq, next_dup->end_seq,
2207 1);
2208 }
2209
2210 return skb;
2211 }
2212
tcp_sack_cache_ok(const struct tcp_sock * tp,const struct tcp_sack_block * cache)2213 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
2214 {
2215 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
2216 }
2217
2218 static int
tcp_sacktag_write_queue(struct sock * sk,const struct sk_buff * ack_skb,u32 prior_snd_una,struct tcp_sacktag_state * state)2219 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
2220 u32 prior_snd_una, struct tcp_sacktag_state *state)
2221 {
2222 struct tcp_sock *tp = tcp_sk(sk);
2223 const unsigned char *ptr = (skb_transport_header(ack_skb) +
2224 TCP_SKB_CB(ack_skb)->sacked);
2225 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
2226 struct tcp_sack_block sp[TCP_NUM_SACKS];
2227 struct tcp_sack_block *cache;
2228 struct sk_buff *skb;
2229 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
2230 int used_sacks;
2231 bool found_dup_sack = false;
2232 int i, j;
2233 int first_sack_index;
2234
2235 state->flag = 0;
2236 state->reord = tp->snd_nxt;
2237
2238 if (!tp->sacked_out)
2239 tcp_highest_sack_reset(sk);
2240
2241 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
2242 num_sacks, prior_snd_una, state);
2243
2244 /* Eliminate too old ACKs, but take into
2245 * account more or less fresh ones, they can
2246 * contain valid SACK info.
2247 */
2248 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
2249 return 0;
2250
2251 if (!tp->packets_out)
2252 goto out;
2253
2254 used_sacks = 0;
2255 first_sack_index = 0;
2256 for (i = 0; i < num_sacks; i++) {
2257 bool dup_sack = !i && found_dup_sack;
2258
2259 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
2260 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
2261
2262 if (!tcp_is_sackblock_valid(tp, dup_sack,
2263 sp[used_sacks].start_seq,
2264 sp[used_sacks].end_seq)) {
2265 int mib_idx;
2266
2267 if (dup_sack) {
2268 if (!tp->undo_marker)
2269 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
2270 else
2271 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
2272 } else {
2273 /* Don't count olds caused by ACK reordering */
2274 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
2275 !after(sp[used_sacks].end_seq, tp->snd_una))
2276 continue;
2277 mib_idx = LINUX_MIB_TCPSACKDISCARD;
2278 }
2279
2280 NET_INC_STATS(sock_net(sk), mib_idx);
2281 if (i == 0)
2282 first_sack_index = -1;
2283 continue;
2284 }
2285
2286 /* Ignore very old stuff early */
2287 if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
2288 if (i == 0)
2289 first_sack_index = -1;
2290 continue;
2291 }
2292
2293 used_sacks++;
2294 }
2295
2296 /* order SACK blocks to allow in order walk of the retrans queue */
2297 for (i = used_sacks - 1; i > 0; i--) {
2298 for (j = 0; j < i; j++) {
2299 if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
2300 swap(sp[j], sp[j + 1]);
2301
2302 /* Track where the first SACK block goes to */
2303 if (j == first_sack_index)
2304 first_sack_index = j + 1;
2305 }
2306 }
2307 }
2308
2309 state->mss_now = tcp_current_mss(sk);
2310 skb = NULL;
2311 i = 0;
2312
2313 if (!tp->sacked_out) {
2314 /* It's already past, so skip checking against it */
2315 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
2316 } else {
2317 cache = tp->recv_sack_cache;
2318 /* Skip empty blocks in at head of the cache */
2319 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
2320 !cache->end_seq)
2321 cache++;
2322 }
2323
2324 while (i < used_sacks) {
2325 u32 start_seq = sp[i].start_seq;
2326 u32 end_seq = sp[i].end_seq;
2327 bool dup_sack = (found_dup_sack && (i == first_sack_index));
2328 struct tcp_sack_block *next_dup = NULL;
2329
2330 if (found_dup_sack && ((i + 1) == first_sack_index))
2331 next_dup = &sp[i + 1];
2332
2333 /* Skip too early cached blocks */
2334 while (tcp_sack_cache_ok(tp, cache) &&
2335 !before(start_seq, cache->end_seq))
2336 cache++;
2337
2338 /* Can skip some work by looking recv_sack_cache? */
2339 if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
2340 after(end_seq, cache->start_seq)) {
2341
2342 /* Head todo? */
2343 if (before(start_seq, cache->start_seq)) {
2344 skb = tcp_sacktag_skip(skb, sk, start_seq);
2345 skb = tcp_sacktag_walk(skb, sk, next_dup,
2346 state,
2347 start_seq,
2348 cache->start_seq,
2349 dup_sack);
2350 }
2351
2352 /* Rest of the block already fully processed? */
2353 if (!after(end_seq, cache->end_seq))
2354 goto advance_sp;
2355
2356 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
2357 state,
2358 cache->end_seq);
2359
2360 /* ...tail remains todo... */
2361 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
2362 /* ...but better entrypoint exists! */
2363 skb = tcp_highest_sack(sk);
2364 if (!skb)
2365 break;
2366 cache++;
2367 goto walk;
2368 }
2369
2370 skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
2371 /* Check overlap against next cached too (past this one already) */
2372 cache++;
2373 continue;
2374 }
2375
2376 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
2377 skb = tcp_highest_sack(sk);
2378 if (!skb)
2379 break;
2380 }
2381 skb = tcp_sacktag_skip(skb, sk, start_seq);
2382
2383 walk:
2384 skb = tcp_sacktag_walk(skb, sk, next_dup, state,
2385 start_seq, end_seq, dup_sack);
2386
2387 advance_sp:
2388 i++;
2389 }
2390
2391 /* Clear the head of the cache sack blocks so we can skip it next time */
2392 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
2393 tp->recv_sack_cache[i].start_seq = 0;
2394 tp->recv_sack_cache[i].end_seq = 0;
2395 }
2396 for (j = 0; j < used_sacks; j++)
2397 tp->recv_sack_cache[i++] = sp[j];
2398
2399 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
2400 tcp_check_sack_reordering(sk, state->reord, 0);
2401
2402 tcp_verify_left_out(tp);
2403 out:
2404
2405 #if FASTRETRANS_DEBUG > 0
2406 WARN_ON((int)tp->sacked_out < 0);
2407 WARN_ON((int)tp->lost_out < 0);
2408 WARN_ON((int)tp->retrans_out < 0);
2409 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
2410 #endif
2411 return state->flag;
2412 }
2413
2414 /* Limits sacked_out so that sum with lost_out isn't ever larger than
2415 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
2416 */
tcp_limit_reno_sacked(struct tcp_sock * tp)2417 static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
2418 {
2419 u32 holes;
2420
2421 holes = max(tp->lost_out, 1U);
2422 holes = min(holes, tp->packets_out);
2423
2424 if ((tp->sacked_out + holes) > tp->packets_out) {
2425 tp->sacked_out = tp->packets_out - holes;
2426 return true;
2427 }
2428 return false;
2429 }
2430
2431 /* If we receive more dupacks than we expected counting segments
2432 * in assumption of absent reordering, interpret this as reordering.
2433 * The only another reason could be bug in receiver TCP.
2434 */
tcp_check_reno_reordering(struct sock * sk,const int addend)2435 static void tcp_check_reno_reordering(struct sock *sk, const int addend)
2436 {
2437 struct tcp_sock *tp = tcp_sk(sk);
2438
2439 if (!tcp_limit_reno_sacked(tp))
2440 return;
2441
2442 tp->reordering = min_t(u32, tp->packets_out + addend,
2443 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
2444 tp->reord_seen++;
2445 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
2446 }
2447
2448 /* Emulate SACKs for SACKless connection: account for a new dupack. */
2449
tcp_add_reno_sack(struct sock * sk,int num_dupack,bool ece_ack)2450 static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
2451 {
2452 if (num_dupack) {
2453 struct tcp_sock *tp = tcp_sk(sk);
2454 u32 prior_sacked = tp->sacked_out;
2455 s32 delivered;
2456
2457 tp->sacked_out += num_dupack;
2458 tcp_check_reno_reordering(sk, 0);
2459 delivered = tp->sacked_out - prior_sacked;
2460 if (delivered > 0)
2461 tcp_count_delivered(tp, delivered, ece_ack);
2462 tcp_verify_left_out(tp);
2463 }
2464 }
2465
2466 /* Account for ACK, ACKing some data in Reno Recovery phase. */
2467
tcp_remove_reno_sacks(struct sock * sk,int acked,bool ece_ack)2468 static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
2469 {
2470 struct tcp_sock *tp = tcp_sk(sk);
2471
2472 if (acked > 0) {
2473 /* One ACK acked hole. The rest eat duplicate ACKs. */
2474 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1),
2475 ece_ack);
2476 if (acked - 1 >= tp->sacked_out)
2477 tp->sacked_out = 0;
2478 else
2479 tp->sacked_out -= acked - 1;
2480 }
2481 tcp_check_reno_reordering(sk, acked);
2482 tcp_verify_left_out(tp);
2483 }
2484
tcp_reset_reno_sack(struct tcp_sock * tp)2485 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
2486 {
2487 tp->sacked_out = 0;
2488 }
2489
tcp_clear_retrans(struct tcp_sock * tp)2490 void tcp_clear_retrans(struct tcp_sock *tp)
2491 {
2492 tp->retrans_out = 0;
2493 tp->lost_out = 0;
2494 tp->undo_marker = 0;
2495 tp->undo_retrans = -1;
2496 tp->sacked_out = 0;
2497 tp->rto_stamp = 0;
2498 tp->total_rto = 0;
2499 tp->total_rto_recoveries = 0;
2500 tp->total_rto_time = 0;
2501 }
2502
tcp_init_undo(struct tcp_sock * tp)2503 static inline void tcp_init_undo(struct tcp_sock *tp)
2504 {
2505 tp->undo_marker = tp->snd_una;
2506
2507 /* Retransmission still in flight may cause DSACKs later. */
2508 /* First, account for regular retransmits in flight: */
2509 tp->undo_retrans = tp->retrans_out;
2510 /* Next, account for TLP retransmits in flight: */
2511 if (tp->tlp_high_seq && tp->tlp_retrans)
2512 tp->undo_retrans++;
2513 /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
2514 if (!tp->undo_retrans)
2515 tp->undo_retrans = -1;
2516 }
2517
2518 /* If we detect SACK reneging, forget all SACK information
2519 * and reset tags completely, otherwise preserve SACKs. If receiver
2520 * dropped its ofo queue, we will know this due to reneging detection.
2521 */
tcp_timeout_mark_lost(struct sock * sk)2522 static void tcp_timeout_mark_lost(struct sock *sk)
2523 {
2524 struct tcp_sock *tp = tcp_sk(sk);
2525 struct sk_buff *skb, *head;
2526 bool is_reneg; /* is receiver reneging on SACKs? */
2527
2528 head = tcp_rtx_queue_head(sk);
2529 is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED);
2530 if (is_reneg) {
2531 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
2532 tp->sacked_out = 0;
2533 /* Mark SACK reneging until we recover from this loss event. */
2534 tp->is_sack_reneg = 1;
2535 } else if (tcp_is_reno(tp)) {
2536 tcp_reset_reno_sack(tp);
2537 }
2538
2539 skb = head;
2540 skb_rbtree_walk_from(skb) {
2541 if (is_reneg)
2542 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
2543 else if (skb != head && tcp_rack_skb_timeout(tp, skb, 0) > 0)
2544 continue; /* Don't mark recently sent ones lost yet */
2545 tcp_mark_skb_lost(sk, skb);
2546 }
2547 tcp_verify_left_out(tp);
2548 tcp_clear_all_retrans_hints(tp);
2549 }
2550
2551 /* Enter Loss state. */
tcp_enter_loss(struct sock * sk)2552 void tcp_enter_loss(struct sock *sk)
2553 {
2554 const struct inet_connection_sock *icsk = inet_csk(sk);
2555 struct tcp_sock *tp = tcp_sk(sk);
2556 struct net *net = sock_net(sk);
2557 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
2558 u8 reordering;
2559
2560 tcp_timeout_mark_lost(sk);
2561
2562 /* Reduce ssthresh if it has not yet been made inside this window. */
2563 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
2564 !after(tp->high_seq, tp->snd_una) ||
2565 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
2566 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2567 tp->prior_cwnd = tcp_snd_cwnd(tp);
2568 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2569 tcp_ca_event(sk, CA_EVENT_LOSS);
2570 tcp_init_undo(tp);
2571 }
2572 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
2573 tp->snd_cwnd_cnt = 0;
2574 tp->snd_cwnd_stamp = tcp_jiffies32;
2575
2576 /* Timeout in disordered state after receiving substantial DUPACKs
2577 * suggests that the degree of reordering is over-estimated.
2578 */
2579 reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
2580 if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
2581 tp->sacked_out >= reordering)
2582 tp->reordering = min_t(unsigned int, tp->reordering,
2583 reordering);
2584
2585 tcp_set_ca_state(sk, TCP_CA_Loss);
2586 tp->high_seq = tp->snd_nxt;
2587 tp->tlp_high_seq = 0;
2588 tcp_ecn_queue_cwr(tp);
2589
2590 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
2591 * loss recovery is underway except recurring timeout(s) on
2592 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2593 */
2594 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
2595 (new_recovery || icsk->icsk_retransmits) &&
2596 !inet_csk(sk)->icsk_mtup.probe_size;
2597 }
2598
2599 /* If ACK arrived pointing to a remembered SACK, it means that our
2600 * remembered SACKs do not reflect real state of receiver i.e.
2601 * receiver _host_ is heavily congested (or buggy).
2602 *
2603 * To avoid big spurious retransmission bursts due to transient SACK
2604 * scoreboard oddities that look like reneging, we give the receiver a
2605 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will
2606 * restore sanity to the SACK scoreboard. If the apparent reneging
2607 * persists until this RTO then we'll clear the SACK scoreboard.
2608 */
tcp_check_sack_reneging(struct sock * sk,int * ack_flag)2609 static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
2610 {
2611 if (*ack_flag & FLAG_SACK_RENEGING &&
2612 *ack_flag & FLAG_SND_UNA_ADVANCED) {
2613 struct tcp_sock *tp = tcp_sk(sk);
2614 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
2615 msecs_to_jiffies(10));
2616
2617 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, false);
2618 *ack_flag &= ~FLAG_SET_XMIT_TIMER;
2619 return true;
2620 }
2621 return false;
2622 }
2623
2624 /* Linux NewReno/SACK/ECN state machine.
2625 * --------------------------------------
2626 *
2627 * "Open" Normal state, no dubious events, fast path.
2628 * "Disorder" In all the respects it is "Open",
2629 * but requires a bit more attention. It is entered when
2630 * we see some SACKs or dupacks. It is split of "Open"
2631 * mainly to move some processing from fast path to slow one.
2632 * "CWR" CWND was reduced due to some Congestion Notification event.
2633 * It can be ECN, ICMP source quench, local device congestion.
2634 * "Recovery" CWND was reduced, we are fast-retransmitting.
2635 * "Loss" CWND was reduced due to RTO timeout or SACK reneging.
2636 *
2637 * tcp_fastretrans_alert() is entered:
2638 * - each incoming ACK, if state is not "Open"
2639 * - when arrived ACK is unusual, namely:
2640 * * SACK
2641 * * Duplicate ACK.
2642 * * ECN ECE.
2643 *
2644 * Counting packets in flight is pretty simple.
2645 *
2646 * in_flight = packets_out - left_out + retrans_out
2647 *
2648 * packets_out is SND.NXT-SND.UNA counted in packets.
2649 *
2650 * retrans_out is number of retransmitted segments.
2651 *
2652 * left_out is number of segments left network, but not ACKed yet.
2653 *
2654 * left_out = sacked_out + lost_out
2655 *
2656 * sacked_out: Packets, which arrived to receiver out of order
2657 * and hence not ACKed. With SACKs this number is simply
2658 * amount of SACKed data. Even without SACKs
2659 * it is easy to give pretty reliable estimate of this number,
2660 * counting duplicate ACKs.
2661 *
2662 * lost_out: Packets lost by network. TCP has no explicit
2663 * "loss notification" feedback from network (for now).
2664 * It means that this number can be only _guessed_.
2665 * Actually, it is the heuristics to predict lossage that
2666 * distinguishes different algorithms.
2667 *
2668 * F.e. after RTO, when all the queue is considered as lost,
2669 * lost_out = packets_out and in_flight = retrans_out.
2670 *
2671 * Essentially, we have now a few algorithms detecting
2672 * lost packets.
2673 *
2674 * If the receiver supports SACK:
2675 *
2676 * RACK (RFC8985): RACK is a newer loss detection algorithm
2677 * (2017-) that checks timing instead of counting DUPACKs.
2678 * Essentially a packet is considered lost if it's not S/ACKed
2679 * after RTT + reordering_window, where both metrics are
2680 * dynamically measured and adjusted. This is implemented in
2681 * tcp_rack_mark_lost.
2682 *
2683 * If the receiver does not support SACK:
2684 *
2685 * NewReno (RFC6582): in Recovery we assume that one segment
2686 * is lost (classic Reno). While we are in Recovery and
2687 * a partial ACK arrives, we assume that one more packet
2688 * is lost (NewReno). This heuristics are the same in NewReno
2689 * and SACK.
2690 *
2691 * The really tricky (and requiring careful tuning) part of the algorithm
2692 * is hidden in the RACK code in tcp_recovery.c and tcp_xmit_retransmit_queue().
2693 * The first determines the moment _when_ we should reduce CWND and,
2694 * hence, slow down forward transmission. In fact, it determines the moment
2695 * when we decide that hole is caused by loss, rather than by a reorder.
2696 *
2697 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
2698 * holes, caused by lost packets.
2699 *
2700 * And the most logically complicated part of algorithm is undo
2701 * heuristics. We detect false retransmits due to both too early
2702 * fast retransmit (reordering) and underestimated RTO, analyzing
2703 * timestamps and D-SACKs. When we detect that some segments were
2704 * retransmitted by mistake and CWND reduction was wrong, we undo
2705 * window reduction and abort recovery phase. This logic is hidden
2706 * inside several functions named tcp_try_undo_<something>.
2707 */
2708
2709 /* This function decides, when we should leave Disordered state
2710 * and enter Recovery phase, reducing congestion window.
2711 *
2712 * Main question: may we further continue forward transmission
2713 * with the same cwnd?
2714 */
tcp_time_to_recover(const struct tcp_sock * tp)2715 static bool tcp_time_to_recover(const struct tcp_sock *tp)
2716 {
2717 /* Has loss detection marked at least one packet lost? */
2718 return tp->lost_out != 0;
2719 }
2720
tcp_tsopt_ecr_before(const struct tcp_sock * tp,u32 when)2721 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2722 {
2723 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2724 before(tp->rx_opt.rcv_tsecr, when);
2725 }
2726
2727 /* skb is spurious retransmitted if the returned timestamp echo
2728 * reply is prior to the skb transmission time
2729 */
tcp_skb_spurious_retrans(const struct tcp_sock * tp,const struct sk_buff * skb)2730 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
2731 const struct sk_buff *skb)
2732 {
2733 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
2734 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
2735 }
2736
2737 /* Nothing was retransmitted or returned timestamp is less
2738 * than timestamp of the first retransmission.
2739 */
tcp_packet_delayed(const struct tcp_sock * tp)2740 static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2741 {
2742 const struct sock *sk = (const struct sock *)tp;
2743
2744 /* Received an echoed timestamp before the first retransmission? */
2745 if (tp->retrans_stamp)
2746 return tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
2747
2748 /* We set tp->retrans_stamp upon the first retransmission of a loss
2749 * recovery episode, so normally if tp->retrans_stamp is 0 then no
2750 * retransmission has happened yet (likely due to TSQ, which can cause
2751 * fast retransmits to be delayed). So if snd_una advanced while
2752 * (tp->retrans_stamp is 0 then apparently a packet was merely delayed,
2753 * not lost. But there are exceptions where we retransmit but then
2754 * clear tp->retrans_stamp, so we check for those exceptions.
2755 */
2756
2757 /* (1) For non-SACK connections, tcp_is_non_sack_preventing_reopen()
2758 * clears tp->retrans_stamp when snd_una == high_seq.
2759 */
2760 if (!tcp_is_sack(tp) && !before(tp->snd_una, tp->high_seq))
2761 return false;
2762
2763 /* (2) In TCP_SYN_SENT tcp_clean_rtx_queue() clears tp->retrans_stamp
2764 * when setting FLAG_SYN_ACKED is set, even if the SYN was
2765 * retransmitted.
2766 */
2767 if (sk->sk_state == TCP_SYN_SENT)
2768 return false;
2769
2770 return true; /* tp->retrans_stamp is zero; no retransmit yet */
2771 }
2772
2773 /* Undo procedures. */
2774
2775 /* We can clear retrans_stamp when there are no retransmissions in the
2776 * window. It would seem that it is trivially available for us in
2777 * tp->retrans_out, however, that kind of assumptions doesn't consider
2778 * what will happen if errors occur when sending retransmission for the
2779 * second time. ...It could the that such segment has only
2780 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2781 * the head skb is enough except for some reneging corner cases that
2782 * are not worth the effort.
2783 *
2784 * Main reason for all this complexity is the fact that connection dying
2785 * time now depends on the validity of the retrans_stamp, in particular,
2786 * that successive retransmissions of a segment must not advance
2787 * retrans_stamp under any conditions.
2788 */
tcp_any_retrans_done(const struct sock * sk)2789 static bool tcp_any_retrans_done(const struct sock *sk)
2790 {
2791 const struct tcp_sock *tp = tcp_sk(sk);
2792 struct sk_buff *skb;
2793
2794 if (tp->retrans_out)
2795 return true;
2796
2797 skb = tcp_rtx_queue_head(sk);
2798 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2799 return true;
2800
2801 return false;
2802 }
2803
2804 /* If loss recovery is finished and there are no retransmits out in the
2805 * network, then we clear retrans_stamp so that upon the next loss recovery
2806 * retransmits_timed_out() and timestamp-undo are using the correct value.
2807 */
tcp_retrans_stamp_cleanup(struct sock * sk)2808 static void tcp_retrans_stamp_cleanup(struct sock *sk)
2809 {
2810 if (!tcp_any_retrans_done(sk))
2811 tcp_sk(sk)->retrans_stamp = 0;
2812 }
2813
DBGUNDO(struct sock * sk,const char * msg)2814 static void DBGUNDO(struct sock *sk, const char *msg)
2815 {
2816 #if FASTRETRANS_DEBUG > 1
2817 struct tcp_sock *tp = tcp_sk(sk);
2818 struct inet_sock *inet = inet_sk(sk);
2819
2820 if (sk->sk_family == AF_INET) {
2821 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2822 msg,
2823 &inet->inet_daddr, ntohs(inet->inet_dport),
2824 tcp_snd_cwnd(tp), tcp_left_out(tp),
2825 tp->snd_ssthresh, tp->prior_ssthresh,
2826 tp->packets_out);
2827 }
2828 #if IS_ENABLED(CONFIG_IPV6)
2829 else if (sk->sk_family == AF_INET6) {
2830 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2831 msg,
2832 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
2833 tcp_snd_cwnd(tp), tcp_left_out(tp),
2834 tp->snd_ssthresh, tp->prior_ssthresh,
2835 tp->packets_out);
2836 }
2837 #endif
2838 #endif
2839 }
2840
tcp_undo_cwnd_reduction(struct sock * sk,bool unmark_loss)2841 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2842 {
2843 struct tcp_sock *tp = tcp_sk(sk);
2844
2845 if (unmark_loss) {
2846 struct sk_buff *skb;
2847
2848 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2849 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2850 }
2851 tp->lost_out = 0;
2852 tcp_clear_all_retrans_hints(tp);
2853 }
2854
2855 if (tp->prior_ssthresh) {
2856 const struct inet_connection_sock *icsk = inet_csk(sk);
2857
2858 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
2859
2860 if (tp->prior_ssthresh > tp->snd_ssthresh) {
2861 tp->snd_ssthresh = tp->prior_ssthresh;
2862 tcp_ecn_withdraw_cwr(tp);
2863 }
2864 }
2865 tp->snd_cwnd_stamp = tcp_jiffies32;
2866 tp->undo_marker = 0;
2867 tp->rack.advanced = 1; /* Force RACK to re-exam losses */
2868 }
2869
tcp_may_undo(const struct tcp_sock * tp)2870 static inline bool tcp_may_undo(const struct tcp_sock *tp)
2871 {
2872 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2873 }
2874
tcp_is_non_sack_preventing_reopen(struct sock * sk)2875 static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
2876 {
2877 struct tcp_sock *tp = tcp_sk(sk);
2878
2879 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2880 /* Hold old state until something *above* high_seq
2881 * is ACKed. For Reno it is MUST to prevent false
2882 * fast retransmits (RFC2582). SACK TCP is safe. */
2883 if (!tcp_any_retrans_done(sk))
2884 tp->retrans_stamp = 0;
2885 return true;
2886 }
2887 return false;
2888 }
2889
2890 /* People celebrate: "We love our President!" */
tcp_try_undo_recovery(struct sock * sk)2891 static bool tcp_try_undo_recovery(struct sock *sk)
2892 {
2893 struct tcp_sock *tp = tcp_sk(sk);
2894
2895 if (tcp_may_undo(tp)) {
2896 int mib_idx;
2897
2898 /* Happy end! We did not retransmit anything
2899 * or our original transmission succeeded.
2900 */
2901 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2902 tcp_undo_cwnd_reduction(sk, false);
2903 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2904 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2905 else
2906 mib_idx = LINUX_MIB_TCPFULLUNDO;
2907
2908 NET_INC_STATS(sock_net(sk), mib_idx);
2909 } else if (tp->rack.reo_wnd_persist) {
2910 tp->rack.reo_wnd_persist--;
2911 }
2912 if (tcp_is_non_sack_preventing_reopen(sk))
2913 return true;
2914 tcp_set_ca_state(sk, TCP_CA_Open);
2915 tp->is_sack_reneg = 0;
2916 return false;
2917 }
2918
2919 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
tcp_try_undo_dsack(struct sock * sk)2920 static bool tcp_try_undo_dsack(struct sock *sk)
2921 {
2922 struct tcp_sock *tp = tcp_sk(sk);
2923
2924 if (tp->undo_marker && !tp->undo_retrans) {
2925 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH,
2926 tp->rack.reo_wnd_persist + 1);
2927 DBGUNDO(sk, "D-SACK");
2928 tcp_undo_cwnd_reduction(sk, false);
2929 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2930 return true;
2931 }
2932 return false;
2933 }
2934
2935 /* Undo during loss recovery after partial ACK or using F-RTO. */
tcp_try_undo_loss(struct sock * sk,bool frto_undo)2936 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2937 {
2938 struct tcp_sock *tp = tcp_sk(sk);
2939
2940 if (frto_undo || tcp_may_undo(tp)) {
2941 tcp_undo_cwnd_reduction(sk, true);
2942
2943 DBGUNDO(sk, "partial loss");
2944 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2945 if (frto_undo)
2946 NET_INC_STATS(sock_net(sk),
2947 LINUX_MIB_TCPSPURIOUSRTOS);
2948 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
2949 if (tcp_is_non_sack_preventing_reopen(sk))
2950 return true;
2951 if (frto_undo || tcp_is_sack(tp)) {
2952 tcp_set_ca_state(sk, TCP_CA_Open);
2953 tp->is_sack_reneg = 0;
2954 }
2955 return true;
2956 }
2957 return false;
2958 }
2959
2960 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
2961 * It computes the number of packets to send (sndcnt) based on packets newly
2962 * delivered:
2963 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2964 * cwnd reductions across a full RTT.
2965 * 2) Otherwise PRR uses packet conservation to send as much as delivered.
2966 * But when SND_UNA is acked without further losses,
2967 * slow starts cwnd up to ssthresh to speed up the recovery.
2968 */
tcp_init_cwnd_reduction(struct sock * sk)2969 static void tcp_init_cwnd_reduction(struct sock *sk)
2970 {
2971 struct tcp_sock *tp = tcp_sk(sk);
2972
2973 tp->high_seq = tp->snd_nxt;
2974 tp->tlp_high_seq = 0;
2975 tp->snd_cwnd_cnt = 0;
2976 tp->prior_cwnd = tcp_snd_cwnd(tp);
2977 tp->prr_delivered = 0;
2978 tp->prr_out = 0;
2979 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2980 tcp_ecn_queue_cwr(tp);
2981 }
2982
tcp_cwnd_reduction(struct sock * sk,int newly_acked_sacked,int newly_lost,int flag)2983 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag)
2984 {
2985 struct tcp_sock *tp = tcp_sk(sk);
2986 int sndcnt = 0;
2987 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2988
2989 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
2990 return;
2991
2992 trace_tcp_cwnd_reduction_tp(sk, newly_acked_sacked, newly_lost, flag);
2993
2994 tp->prr_delivered += newly_acked_sacked;
2995 if (delta < 0) {
2996 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2997 tp->prior_cwnd - 1;
2998 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2999 } else {
3000 sndcnt = max_t(int, tp->prr_delivered - tp->prr_out,
3001 newly_acked_sacked);
3002 if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
3003 sndcnt++;
3004 sndcnt = min(delta, sndcnt);
3005 }
3006 /* Force a fast retransmit upon entering fast recovery */
3007 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
3008 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
3009 }
3010
tcp_end_cwnd_reduction(struct sock * sk)3011 static inline void tcp_end_cwnd_reduction(struct sock *sk)
3012 {
3013 struct tcp_sock *tp = tcp_sk(sk);
3014
3015 if (inet_csk(sk)->icsk_ca_ops->cong_control)
3016 return;
3017
3018 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
3019 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
3020 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
3021 tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
3022 tp->snd_cwnd_stamp = tcp_jiffies32;
3023 }
3024 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
3025 }
3026
3027 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
tcp_enter_cwr(struct sock * sk)3028 void tcp_enter_cwr(struct sock *sk)
3029 {
3030 struct tcp_sock *tp = tcp_sk(sk);
3031
3032 tp->prior_ssthresh = 0;
3033 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
3034 tp->undo_marker = 0;
3035 tcp_init_cwnd_reduction(sk);
3036 tcp_set_ca_state(sk, TCP_CA_CWR);
3037 }
3038 }
3039 EXPORT_SYMBOL(tcp_enter_cwr);
3040
tcp_try_keep_open(struct sock * sk)3041 static void tcp_try_keep_open(struct sock *sk)
3042 {
3043 struct tcp_sock *tp = tcp_sk(sk);
3044 int state = TCP_CA_Open;
3045
3046 if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
3047 state = TCP_CA_Disorder;
3048
3049 if (inet_csk(sk)->icsk_ca_state != state) {
3050 tcp_set_ca_state(sk, state);
3051 tp->high_seq = tp->snd_nxt;
3052 }
3053 }
3054
tcp_try_to_open(struct sock * sk,int flag)3055 static void tcp_try_to_open(struct sock *sk, int flag)
3056 {
3057 struct tcp_sock *tp = tcp_sk(sk);
3058
3059 tcp_verify_left_out(tp);
3060
3061 if (!tcp_any_retrans_done(sk))
3062 tp->retrans_stamp = 0;
3063
3064 if (flag & FLAG_ECE)
3065 tcp_enter_cwr(sk);
3066
3067 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
3068 tcp_try_keep_open(sk);
3069 }
3070 }
3071
tcp_mtup_probe_failed(struct sock * sk)3072 static void tcp_mtup_probe_failed(struct sock *sk)
3073 {
3074 struct inet_connection_sock *icsk = inet_csk(sk);
3075
3076 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
3077 icsk->icsk_mtup.probe_size = 0;
3078 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
3079 }
3080
tcp_mtup_probe_success(struct sock * sk)3081 static void tcp_mtup_probe_success(struct sock *sk)
3082 {
3083 struct tcp_sock *tp = tcp_sk(sk);
3084 struct inet_connection_sock *icsk = inet_csk(sk);
3085 u64 val;
3086
3087 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3088
3089 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
3090 do_div(val, icsk->icsk_mtup.probe_size);
3091 DEBUG_NET_WARN_ON_ONCE((u32)val != val);
3092 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
3093
3094 tp->snd_cwnd_cnt = 0;
3095 tp->snd_cwnd_stamp = tcp_jiffies32;
3096 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3097
3098 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
3099 icsk->icsk_mtup.probe_size = 0;
3100 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
3101 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
3102 }
3103
3104 /* Sometimes we deduce that packets have been dropped due to reasons other than
3105 * congestion, like path MTU reductions or failed client TFO attempts. In these
3106 * cases we call this function to retransmit as many packets as cwnd allows,
3107 * without reducing cwnd. Given that retransmits will set retrans_stamp to a
3108 * non-zero value (and may do so in a later calling context due to TSQ), we
3109 * also enter CA_Loss so that we track when all retransmitted packets are ACKed
3110 * and clear retrans_stamp when that happens (to ensure later recurring RTOs
3111 * are using the correct retrans_stamp and don't declare ETIMEDOUT
3112 * prematurely).
3113 */
tcp_non_congestion_loss_retransmit(struct sock * sk)3114 static void tcp_non_congestion_loss_retransmit(struct sock *sk)
3115 {
3116 const struct inet_connection_sock *icsk = inet_csk(sk);
3117 struct tcp_sock *tp = tcp_sk(sk);
3118
3119 if (icsk->icsk_ca_state != TCP_CA_Loss) {
3120 tp->high_seq = tp->snd_nxt;
3121 tp->snd_ssthresh = tcp_current_ssthresh(sk);
3122 tp->prior_ssthresh = 0;
3123 tp->undo_marker = 0;
3124 tcp_set_ca_state(sk, TCP_CA_Loss);
3125 }
3126 tcp_xmit_retransmit_queue(sk);
3127 }
3128
3129 /* Do a simple retransmit without using the backoff mechanisms in
3130 * tcp_timer. This is used for path mtu discovery.
3131 * The socket is already locked here.
3132 */
tcp_simple_retransmit(struct sock * sk)3133 void tcp_simple_retransmit(struct sock *sk)
3134 {
3135 struct tcp_sock *tp = tcp_sk(sk);
3136 struct sk_buff *skb;
3137 int mss;
3138
3139 /* A fastopen SYN request is stored as two separate packets within
3140 * the retransmit queue, this is done by tcp_send_syn_data().
3141 * As a result simply checking the MSS of the frames in the queue
3142 * will not work for the SYN packet.
3143 *
3144 * Us being here is an indication of a path MTU issue so we can
3145 * assume that the fastopen SYN was lost and just mark all the
3146 * frames in the retransmit queue as lost. We will use an MSS of
3147 * -1 to mark all frames as lost, otherwise compute the current MSS.
3148 */
3149 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT)
3150 mss = -1;
3151 else
3152 mss = tcp_current_mss(sk);
3153
3154 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
3155 if (tcp_skb_seglen(skb) > mss)
3156 tcp_mark_skb_lost(sk, skb);
3157 }
3158
3159 if (!tp->lost_out)
3160 return;
3161
3162 if (tcp_is_reno(tp))
3163 tcp_limit_reno_sacked(tp);
3164
3165 tcp_verify_left_out(tp);
3166
3167 /* Don't muck with the congestion window here.
3168 * Reason is that we do not increase amount of _data_
3169 * in network, but units changed and effective
3170 * cwnd/ssthresh really reduced now.
3171 */
3172 tcp_non_congestion_loss_retransmit(sk);
3173 }
3174
tcp_enter_recovery(struct sock * sk,bool ece_ack)3175 void tcp_enter_recovery(struct sock *sk, bool ece_ack)
3176 {
3177 struct tcp_sock *tp = tcp_sk(sk);
3178 int mib_idx;
3179
3180 /* Start the clock with our fast retransmit, for undo and ETIMEDOUT. */
3181 tcp_retrans_stamp_cleanup(sk);
3182
3183 if (tcp_is_reno(tp))
3184 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3185 else
3186 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3187
3188 NET_INC_STATS(sock_net(sk), mib_idx);
3189
3190 tp->prior_ssthresh = 0;
3191 tcp_init_undo(tp);
3192
3193 if (!tcp_in_cwnd_reduction(sk)) {
3194 if (!ece_ack)
3195 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3196 tcp_init_cwnd_reduction(sk);
3197 }
3198 tcp_set_ca_state(sk, TCP_CA_Recovery);
3199 }
3200
tcp_update_rto_time(struct tcp_sock * tp)3201 static void tcp_update_rto_time(struct tcp_sock *tp)
3202 {
3203 if (tp->rto_stamp) {
3204 tp->total_rto_time += tcp_time_stamp_ms(tp) - tp->rto_stamp;
3205 tp->rto_stamp = 0;
3206 }
3207 }
3208
3209 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
3210 * recovered or spurious. Otherwise retransmits more on partial ACKs.
3211 */
tcp_process_loss(struct sock * sk,int flag,int num_dupack,int * rexmit)3212 static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
3213 int *rexmit)
3214 {
3215 struct tcp_sock *tp = tcp_sk(sk);
3216 bool recovered = !before(tp->snd_una, tp->high_seq);
3217
3218 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
3219 tcp_try_undo_loss(sk, false))
3220 return;
3221
3222 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
3223 /* Step 3.b. A timeout is spurious if not all data are
3224 * lost, i.e., never-retransmitted data are (s)acked.
3225 */
3226 if ((flag & FLAG_ORIG_SACK_ACKED) &&
3227 tcp_try_undo_loss(sk, true))
3228 return;
3229
3230 if (after(tp->snd_nxt, tp->high_seq)) {
3231 if (flag & FLAG_DATA_SACKED || num_dupack)
3232 tp->frto = 0; /* Step 3.a. loss was real */
3233 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
3234 tp->high_seq = tp->snd_nxt;
3235 /* Step 2.b. Try send new data (but deferred until cwnd
3236 * is updated in tcp_ack()). Otherwise fall back to
3237 * the conventional recovery.
3238 */
3239 if (!tcp_write_queue_empty(sk) &&
3240 after(tcp_wnd_end(tp), tp->snd_nxt)) {
3241 *rexmit = REXMIT_NEW;
3242 return;
3243 }
3244 tp->frto = 0;
3245 }
3246 }
3247
3248 if (recovered) {
3249 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
3250 tcp_try_undo_recovery(sk);
3251 return;
3252 }
3253 if (tcp_is_reno(tp)) {
3254 /* A Reno DUPACK means new data in F-RTO step 2.b above are
3255 * delivered. Lower inflight to clock out (re)transmissions.
3256 */
3257 if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
3258 tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
3259 else if (flag & FLAG_SND_UNA_ADVANCED)
3260 tcp_reset_reno_sack(tp);
3261 }
3262 *rexmit = REXMIT_LOST;
3263 }
3264
3265 /* Undo during fast recovery after partial ACK. */
tcp_try_undo_partial(struct sock * sk,u32 prior_snd_una)3266 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
3267 {
3268 struct tcp_sock *tp = tcp_sk(sk);
3269
3270 if (tp->undo_marker && tcp_packet_delayed(tp)) {
3271 /* Plain luck! Hole if filled with delayed
3272 * packet, rather than with a retransmit. Check reordering.
3273 */
3274 tcp_check_sack_reordering(sk, prior_snd_una, 1);
3275
3276 /* We are getting evidence that the reordering degree is higher
3277 * than we realized. If there are no retransmits out then we
3278 * can undo. Otherwise we clock out new packets but do not
3279 * mark more packets lost or retransmit more.
3280 */
3281 if (tp->retrans_out)
3282 return true;
3283
3284 if (!tcp_any_retrans_done(sk))
3285 tp->retrans_stamp = 0;
3286
3287 DBGUNDO(sk, "partial recovery");
3288 tcp_undo_cwnd_reduction(sk, true);
3289 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
3290 tcp_try_keep_open(sk);
3291 }
3292 return false;
3293 }
3294
tcp_identify_packet_loss(struct sock * sk,int * ack_flag)3295 static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
3296 {
3297 struct tcp_sock *tp = tcp_sk(sk);
3298
3299 if (tcp_rtx_queue_empty(sk))
3300 return;
3301
3302 if (unlikely(tcp_is_reno(tp))) {
3303 tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
3304 } else {
3305 u32 prior_retrans = tp->retrans_out;
3306
3307 if (tcp_rack_mark_lost(sk))
3308 *ack_flag &= ~FLAG_SET_XMIT_TIMER;
3309 if (prior_retrans > tp->retrans_out)
3310 *ack_flag |= FLAG_LOST_RETRANS;
3311 }
3312 }
3313
3314 /* Process an event, which can update packets-in-flight not trivially.
3315 * Main goal of this function is to calculate new estimate for left_out,
3316 * taking into account both packets sitting in receiver's buffer and
3317 * packets lost by network.
3318 *
3319 * Besides that it updates the congestion state when packet loss or ECN
3320 * is detected. But it does not reduce the cwnd, it is done by the
3321 * congestion control later.
3322 *
3323 * It does _not_ decide what to send, it is made in function
3324 * tcp_xmit_retransmit_queue().
3325 */
tcp_fastretrans_alert(struct sock * sk,const u32 prior_snd_una,int num_dupack,int * ack_flag,int * rexmit)3326 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
3327 int num_dupack, int *ack_flag, int *rexmit)
3328 {
3329 struct inet_connection_sock *icsk = inet_csk(sk);
3330 struct tcp_sock *tp = tcp_sk(sk);
3331 int flag = *ack_flag;
3332 bool ece_ack = flag & FLAG_ECE;
3333
3334 if (!tp->packets_out && tp->sacked_out)
3335 tp->sacked_out = 0;
3336
3337 /* Now state machine starts.
3338 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
3339 if (ece_ack)
3340 tp->prior_ssthresh = 0;
3341
3342 /* B. In all the states check for reneging SACKs. */
3343 if (tcp_check_sack_reneging(sk, ack_flag))
3344 return;
3345
3346 /* C. Check consistency of the current state. */
3347 tcp_verify_left_out(tp);
3348
3349 /* D. Check state exit conditions. State can be terminated
3350 * when high_seq is ACKed. */
3351 if (icsk->icsk_ca_state == TCP_CA_Open) {
3352 WARN_ON(tp->retrans_out != 0 && !tp->syn_data);
3353 tp->retrans_stamp = 0;
3354 } else if (!before(tp->snd_una, tp->high_seq)) {
3355 switch (icsk->icsk_ca_state) {
3356 case TCP_CA_CWR:
3357 /* CWR is to be held something *above* high_seq
3358 * is ACKed for CWR bit to reach receiver. */
3359 if (tp->snd_una != tp->high_seq) {
3360 tcp_end_cwnd_reduction(sk);
3361 tcp_set_ca_state(sk, TCP_CA_Open);
3362 }
3363 break;
3364
3365 case TCP_CA_Recovery:
3366 if (tcp_is_reno(tp))
3367 tcp_reset_reno_sack(tp);
3368 if (tcp_try_undo_recovery(sk))
3369 return;
3370 tcp_end_cwnd_reduction(sk);
3371 break;
3372 }
3373 }
3374
3375 /* E. Process state. */
3376 switch (icsk->icsk_ca_state) {
3377 case TCP_CA_Recovery:
3378 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
3379 if (tcp_is_reno(tp))
3380 tcp_add_reno_sack(sk, num_dupack, ece_ack);
3381 } else if (tcp_try_undo_partial(sk, prior_snd_una))
3382 return;
3383
3384 if (tcp_try_undo_dsack(sk))
3385 tcp_try_to_open(sk, flag);
3386
3387 tcp_identify_packet_loss(sk, ack_flag);
3388 if (icsk->icsk_ca_state != TCP_CA_Recovery) {
3389 if (!tcp_time_to_recover(tp))
3390 return;
3391 /* Undo reverts the recovery state. If loss is evident,
3392 * starts a new recovery (e.g. reordering then loss);
3393 */
3394 tcp_enter_recovery(sk, ece_ack);
3395 }
3396 break;
3397 case TCP_CA_Loss:
3398 tcp_process_loss(sk, flag, num_dupack, rexmit);
3399 if (icsk->icsk_ca_state != TCP_CA_Loss)
3400 tcp_update_rto_time(tp);
3401 tcp_identify_packet_loss(sk, ack_flag);
3402 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
3403 (*ack_flag & FLAG_LOST_RETRANS)))
3404 return;
3405 /* Change state if cwnd is undone or retransmits are lost */
3406 fallthrough;
3407 default:
3408 if (tcp_is_reno(tp)) {
3409 if (flag & FLAG_SND_UNA_ADVANCED)
3410 tcp_reset_reno_sack(tp);
3411 tcp_add_reno_sack(sk, num_dupack, ece_ack);
3412 }
3413
3414 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3415 tcp_try_undo_dsack(sk);
3416
3417 tcp_identify_packet_loss(sk, ack_flag);
3418 if (!tcp_time_to_recover(tp)) {
3419 tcp_try_to_open(sk, flag);
3420 return;
3421 }
3422
3423 /* MTU probe failure: don't reduce cwnd */
3424 if (icsk->icsk_ca_state < TCP_CA_CWR &&
3425 icsk->icsk_mtup.probe_size &&
3426 tp->snd_una == tp->mtu_probe.probe_seq_start) {
3427 tcp_mtup_probe_failed(sk);
3428 /* Restores the reduction we did in tcp_mtup_probe() */
3429 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
3430 tcp_simple_retransmit(sk);
3431 return;
3432 }
3433
3434 /* Otherwise enter Recovery state */
3435 tcp_enter_recovery(sk, ece_ack);
3436 }
3437
3438 *rexmit = REXMIT_LOST;
3439 }
3440
tcp_update_rtt_min(struct sock * sk,u32 rtt_us,const int flag)3441 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
3442 {
3443 u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
3444 struct tcp_sock *tp = tcp_sk(sk);
3445
3446 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
3447 /* If the remote keeps returning delayed ACKs, eventually
3448 * the min filter would pick it up and overestimate the
3449 * prop. delay when it expires. Skip suspected delayed ACKs.
3450 */
3451 return;
3452 }
3453 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
3454 rtt_us ? : jiffies_to_usecs(1));
3455 }
3456
tcp_ack_update_rtt(struct sock * sk,const int flag,long seq_rtt_us,long sack_rtt_us,long ca_rtt_us,struct rate_sample * rs)3457 static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
3458 long seq_rtt_us, long sack_rtt_us,
3459 long ca_rtt_us, struct rate_sample *rs)
3460 {
3461 const struct tcp_sock *tp = tcp_sk(sk);
3462
3463 /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
3464 * broken middle-boxes or peers may corrupt TS-ECR fields. But
3465 * Karn's algorithm forbids taking RTT if some retransmitted data
3466 * is acked (RFC6298).
3467 */
3468 if (seq_rtt_us < 0)
3469 seq_rtt_us = sack_rtt_us;
3470
3471 /* RTTM Rule: A TSecr value received in a segment is used to
3472 * update the averaged RTT measurement only if the segment
3473 * acknowledges some new data, i.e., only if it advances the
3474 * left edge of the send window.
3475 * See draft-ietf-tcplw-high-performance-00, section 3.3.
3476 */
3477 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp &&
3478 tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED)
3479 seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp, 1);
3480
3481 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
3482 if (seq_rtt_us < 0)
3483 return false;
3484
3485 /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
3486 * always taken together with ACK, SACK, or TS-opts. Any negative
3487 * values will be skipped with the seq_rtt_us < 0 check above.
3488 */
3489 tcp_update_rtt_min(sk, ca_rtt_us, flag);
3490 tcp_rtt_estimator(sk, seq_rtt_us);
3491 tcp_set_rto(sk);
3492
3493 /* RFC6298: only reset backoff on valid RTT measurement. */
3494 inet_csk(sk)->icsk_backoff = 0;
3495 return true;
3496 }
3497
3498 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
tcp_synack_rtt_meas(struct sock * sk,struct request_sock * req)3499 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
3500 {
3501 struct rate_sample rs;
3502 long rtt_us = -1L;
3503
3504 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
3505 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
3506
3507 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
3508 }
3509
3510
tcp_cong_avoid(struct sock * sk,u32 ack,u32 acked)3511 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3512 {
3513 const struct inet_connection_sock *icsk = inet_csk(sk);
3514
3515 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
3516 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
3517 }
3518
3519 /* Restart timer after forward progress on connection.
3520 * RFC2988 recommends to restart timer to now+rto.
3521 */
tcp_rearm_rto(struct sock * sk)3522 void tcp_rearm_rto(struct sock *sk)
3523 {
3524 const struct inet_connection_sock *icsk = inet_csk(sk);
3525 struct tcp_sock *tp = tcp_sk(sk);
3526
3527 /* If the retrans timer is currently being used by Fast Open
3528 * for SYN-ACK retrans purpose, stay put.
3529 */
3530 if (rcu_access_pointer(tp->fastopen_rsk))
3531 return;
3532
3533 if (!tp->packets_out) {
3534 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3535 } else {
3536 u32 rto = inet_csk(sk)->icsk_rto;
3537 /* Offset the time elapsed after installing regular RTO */
3538 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3539 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3540 s64 delta_us = tcp_rto_delta_us(sk);
3541 /* delta_us may not be positive if the socket is locked
3542 * when the retrans timer fires and is rescheduled.
3543 */
3544 rto = usecs_to_jiffies(max_t(int, delta_us, 1));
3545 }
3546 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, true);
3547 }
3548 }
3549
3550 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
tcp_set_xmit_timer(struct sock * sk)3551 static void tcp_set_xmit_timer(struct sock *sk)
3552 {
3553 if (!tcp_sk(sk)->packets_out || !tcp_schedule_loss_probe(sk, true))
3554 tcp_rearm_rto(sk);
3555 }
3556
3557 /* If we get here, the whole TSO packet has not been acked. */
tcp_tso_acked(struct sock * sk,struct sk_buff * skb)3558 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3559 {
3560 struct tcp_sock *tp = tcp_sk(sk);
3561 u32 packets_acked;
3562
3563 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
3564
3565 packets_acked = tcp_skb_pcount(skb);
3566 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3567 return 0;
3568 packets_acked -= tcp_skb_pcount(skb);
3569
3570 if (packets_acked) {
3571 BUG_ON(tcp_skb_pcount(skb) == 0);
3572 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
3573 }
3574
3575 return packets_acked;
3576 }
3577
tcp_ack_tstamp(struct sock * sk,struct sk_buff * skb,const struct sk_buff * ack_skb,u32 prior_snd_una)3578 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3579 const struct sk_buff *ack_skb, u32 prior_snd_una)
3580 {
3581 const struct skb_shared_info *shinfo;
3582
3583 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3584 if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
3585 return;
3586
3587 shinfo = skb_shinfo(skb);
3588 if (!before(shinfo->tskey, prior_snd_una) &&
3589 before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
3590 tcp_skb_tsorted_save(skb) {
3591 __skb_tstamp_tx(skb, ack_skb, NULL, sk, SCM_TSTAMP_ACK);
3592 } tcp_skb_tsorted_restore(skb);
3593 }
3594 }
3595
3596 /* Remove acknowledged frames from the retransmission queue. If our packet
3597 * is before the ack sequence we can discard it as it's confirmed to have
3598 * arrived at the other end.
3599 */
tcp_clean_rtx_queue(struct sock * sk,const struct sk_buff * ack_skb,u32 prior_fack,u32 prior_snd_una,struct tcp_sacktag_state * sack,bool ece_ack)3600 static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
3601 u32 prior_fack, u32 prior_snd_una,
3602 struct tcp_sacktag_state *sack, bool ece_ack)
3603 {
3604 const struct inet_connection_sock *icsk = inet_csk(sk);
3605 u64 first_ackt, last_ackt;
3606 struct tcp_sock *tp = tcp_sk(sk);
3607 u32 prior_sacked = tp->sacked_out;
3608 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */
3609 struct sk_buff *skb, *next;
3610 bool fully_acked = true;
3611 long sack_rtt_us = -1L;
3612 long seq_rtt_us = -1L;
3613 long ca_rtt_us = -1L;
3614 u32 pkts_acked = 0;
3615 bool rtt_update;
3616 int flag = 0;
3617
3618 first_ackt = 0;
3619
3620 for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
3621 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3622 const u32 start_seq = scb->seq;
3623 u8 sacked = scb->sacked;
3624 u32 acked_pcount;
3625
3626 /* Determine how many packets and what bytes were acked, tso and else */
3627 if (after(scb->end_seq, tp->snd_una)) {
3628 if (tcp_skb_pcount(skb) == 1 ||
3629 !after(tp->snd_una, scb->seq))
3630 break;
3631
3632 acked_pcount = tcp_tso_acked(sk, skb);
3633 if (!acked_pcount)
3634 break;
3635 fully_acked = false;
3636 } else {
3637 acked_pcount = tcp_skb_pcount(skb);
3638 }
3639
3640 if (unlikely(sacked & TCPCB_RETRANS)) {
3641 if (sacked & TCPCB_SACKED_RETRANS)
3642 tp->retrans_out -= acked_pcount;
3643 flag |= FLAG_RETRANS_DATA_ACKED;
3644 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3645 last_ackt = tcp_skb_timestamp_us(skb);
3646 WARN_ON_ONCE(last_ackt == 0);
3647 if (!first_ackt)
3648 first_ackt = last_ackt;
3649
3650 if (before(start_seq, reord))
3651 reord = start_seq;
3652 if (!after(scb->end_seq, tp->high_seq))
3653 flag |= FLAG_ORIG_SACK_ACKED;
3654 }
3655
3656 if (sacked & TCPCB_SACKED_ACKED) {
3657 tp->sacked_out -= acked_pcount;
3658 /* snd_una delta covers these skbs */
3659 sack->delivered_bytes -= skb->len;
3660 } else if (tcp_is_sack(tp)) {
3661 tcp_count_delivered(tp, acked_pcount, ece_ack);
3662 if (!tcp_skb_spurious_retrans(tp, skb))
3663 tcp_rack_advance(tp, sacked, scb->end_seq,
3664 tcp_skb_timestamp_us(skb));
3665 }
3666 if (sacked & TCPCB_LOST)
3667 tp->lost_out -= acked_pcount;
3668
3669 tp->packets_out -= acked_pcount;
3670 pkts_acked += acked_pcount;
3671 tcp_rate_skb_delivered(sk, skb, sack->rate);
3672
3673 /* Initial outgoing SYN's get put onto the write_queue
3674 * just like anything else we transmit. It is not
3675 * true data, and if we misinform our callers that
3676 * this ACK acks real data, we will erroneously exit
3677 * connection startup slow start one packet too
3678 * quickly. This is severely frowned upon behavior.
3679 */
3680 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) {
3681 flag |= FLAG_DATA_ACKED;
3682 } else {
3683 flag |= FLAG_SYN_ACKED;
3684 tp->retrans_stamp = 0;
3685 }
3686
3687 if (!fully_acked)
3688 break;
3689
3690 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3691
3692 next = skb_rb_next(skb);
3693 if (unlikely(skb == tp->retransmit_skb_hint))
3694 tp->retransmit_skb_hint = NULL;
3695 tcp_highest_sack_replace(sk, skb, next);
3696 tcp_rtx_queue_unlink_and_free(skb, sk);
3697 }
3698
3699 if (!skb)
3700 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3701
3702 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
3703 tp->snd_up = tp->snd_una;
3704
3705 if (skb) {
3706 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3707 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3708 flag |= FLAG_SACK_RENEGING;
3709 }
3710
3711 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3712 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
3713 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
3714
3715 if (pkts_acked == 1 && fully_acked && !prior_sacked &&
3716 (tp->snd_una - prior_snd_una) < tp->mss_cache &&
3717 sack->rate->prior_delivered + 1 == tp->delivered &&
3718 !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
3719 /* Conservatively mark a delayed ACK. It's typically
3720 * from a lone runt packet over the round trip to
3721 * a receiver w/o out-of-order or CE events.
3722 */
3723 flag |= FLAG_ACK_MAYBE_DELAYED;
3724 }
3725 }
3726 if (sack->first_sackt) {
3727 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
3728 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
3729 }
3730 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
3731 ca_rtt_us, sack->rate);
3732
3733 if (flag & FLAG_ACKED) {
3734 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3735 if (unlikely(icsk->icsk_mtup.probe_size &&
3736 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3737 tcp_mtup_probe_success(sk);
3738 }
3739
3740 if (tcp_is_reno(tp)) {
3741 tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
3742
3743 /* If any of the cumulatively ACKed segments was
3744 * retransmitted, non-SACK case cannot confirm that
3745 * progress was due to original transmission due to
3746 * lack of TCPCB_SACKED_ACKED bits even if some of
3747 * the packets may have been never retransmitted.
3748 */
3749 if (flag & FLAG_RETRANS_DATA_ACKED)
3750 flag &= ~FLAG_ORIG_SACK_ACKED;
3751 } else {
3752 /* Non-retransmitted hole got filled? That's reordering */
3753 if (before(reord, prior_fack))
3754 tcp_check_sack_reordering(sk, reord, 0);
3755 }
3756
3757 sack->delivered_bytes = (skb ?
3758 TCP_SKB_CB(skb)->seq : tp->snd_una) -
3759 prior_snd_una;
3760 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3761 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3762 tcp_skb_timestamp_us(skb))) {
3763 /* Do not re-arm RTO if the sack RTT is measured from data sent
3764 * after when the head was last (re)transmitted. Otherwise the
3765 * timeout may continue to extend in loss recovery.
3766 */
3767 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3768 }
3769
3770 if (icsk->icsk_ca_ops->pkts_acked) {
3771 struct ack_sample sample = { .pkts_acked = pkts_acked,
3772 .rtt_us = sack->rate->rtt_us };
3773
3774 sample.in_flight = tp->mss_cache *
3775 (tp->delivered - sack->rate->prior_delivered);
3776 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
3777 }
3778
3779 #if FASTRETRANS_DEBUG > 0
3780 WARN_ON((int)tp->sacked_out < 0);
3781 WARN_ON((int)tp->lost_out < 0);
3782 WARN_ON((int)tp->retrans_out < 0);
3783 if (!tp->packets_out && tcp_is_sack(tp)) {
3784 icsk = inet_csk(sk);
3785 if (tp->lost_out) {
3786 pr_debug("Leak l=%u %d\n",
3787 tp->lost_out, icsk->icsk_ca_state);
3788 tp->lost_out = 0;
3789 }
3790 if (tp->sacked_out) {
3791 pr_debug("Leak s=%u %d\n",
3792 tp->sacked_out, icsk->icsk_ca_state);
3793 tp->sacked_out = 0;
3794 }
3795 if (tp->retrans_out) {
3796 pr_debug("Leak r=%u %d\n",
3797 tp->retrans_out, icsk->icsk_ca_state);
3798 tp->retrans_out = 0;
3799 }
3800 }
3801 #endif
3802 return flag;
3803 }
3804
tcp_ack_probe(struct sock * sk)3805 static void tcp_ack_probe(struct sock *sk)
3806 {
3807 struct inet_connection_sock *icsk = inet_csk(sk);
3808 struct sk_buff *head = tcp_send_head(sk);
3809 const struct tcp_sock *tp = tcp_sk(sk);
3810
3811 /* Was it a usable window open? */
3812 if (!head)
3813 return;
3814 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
3815 icsk->icsk_backoff = 0;
3816 icsk->icsk_probes_tstamp = 0;
3817 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
3818 /* Socket must be waked up by subsequent tcp_data_snd_check().
3819 * This function is not for random using!
3820 */
3821 } else {
3822 unsigned long when = tcp_probe0_when(sk, tcp_rto_max(sk));
3823
3824 when = tcp_clamp_probe0_to_user_timeout(sk, when);
3825 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, true);
3826 }
3827 }
3828
tcp_ack_is_dubious(const struct sock * sk,const int flag)3829 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3830 {
3831 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3832 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3833 }
3834
3835 /* Decide wheather to run the increase function of congestion control. */
tcp_may_raise_cwnd(const struct sock * sk,const int flag)3836 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3837 {
3838 /* If reordering is high then always grow cwnd whenever data is
3839 * delivered regardless of its ordering. Otherwise stay conservative
3840 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
3841 * new SACK or ECE mark may first advance cwnd here and later reduce
3842 * cwnd in tcp_fastretrans_alert() based on more states.
3843 */
3844 if (tcp_sk(sk)->reordering >
3845 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
3846 return flag & FLAG_FORWARD_PROGRESS;
3847
3848 return flag & FLAG_DATA_ACKED;
3849 }
3850
3851 /* The "ultimate" congestion control function that aims to replace the rigid
3852 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
3853 * It's called toward the end of processing an ACK with precise rate
3854 * information. All transmission or retransmission are delayed afterwards.
3855 */
tcp_cong_control(struct sock * sk,u32 ack,u32 acked_sacked,int flag,const struct rate_sample * rs)3856 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
3857 int flag, const struct rate_sample *rs)
3858 {
3859 const struct inet_connection_sock *icsk = inet_csk(sk);
3860
3861 if (icsk->icsk_ca_ops->cong_control) {
3862 icsk->icsk_ca_ops->cong_control(sk, ack, flag, rs);
3863 return;
3864 }
3865
3866 if (tcp_in_cwnd_reduction(sk)) {
3867 /* Reduce cwnd if state mandates */
3868 tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag);
3869 } else if (tcp_may_raise_cwnd(sk, flag)) {
3870 /* Advance cwnd if state allows */
3871 tcp_cong_avoid(sk, ack, acked_sacked);
3872 }
3873 tcp_update_pacing_rate(sk);
3874 }
3875
3876 /* Check that window update is acceptable.
3877 * The function assumes that snd_una<=ack<=snd_next.
3878 */
tcp_may_update_window(const struct tcp_sock * tp,const u32 ack,const u32 ack_seq,const u32 nwin)3879 static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3880 const u32 ack, const u32 ack_seq,
3881 const u32 nwin)
3882 {
3883 return after(ack, tp->snd_una) ||
3884 after(ack_seq, tp->snd_wl1) ||
3885 (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin));
3886 }
3887
tcp_snd_sne_update(struct tcp_sock * tp,u32 ack)3888 static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
3889 {
3890 #ifdef CONFIG_TCP_AO
3891 struct tcp_ao_info *ao;
3892
3893 if (!static_branch_unlikely(&tcp_ao_needed.key))
3894 return;
3895
3896 ao = rcu_dereference_protected(tp->ao_info,
3897 lockdep_sock_is_held((struct sock *)tp));
3898 if (ao && ack < tp->snd_una) {
3899 ao->snd_sne++;
3900 trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne);
3901 }
3902 #endif
3903 }
3904
3905 /* If we update tp->snd_una, also update tp->bytes_acked */
tcp_snd_una_update(struct tcp_sock * tp,u32 ack)3906 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3907 {
3908 u32 delta = ack - tp->snd_una;
3909
3910 sock_owned_by_me((struct sock *)tp);
3911 tp->bytes_acked += delta;
3912 tcp_snd_sne_update(tp, ack);
3913 tp->snd_una = ack;
3914 }
3915
tcp_rcv_sne_update(struct tcp_sock * tp,u32 seq)3916 static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
3917 {
3918 #ifdef CONFIG_TCP_AO
3919 struct tcp_ao_info *ao;
3920
3921 if (!static_branch_unlikely(&tcp_ao_needed.key))
3922 return;
3923
3924 ao = rcu_dereference_protected(tp->ao_info,
3925 lockdep_sock_is_held((struct sock *)tp));
3926 if (ao && seq < tp->rcv_nxt) {
3927 ao->rcv_sne++;
3928 trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne);
3929 }
3930 #endif
3931 }
3932
3933 /* If we update tp->rcv_nxt, also update tp->bytes_received */
tcp_rcv_nxt_update(struct tcp_sock * tp,u32 seq)3934 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3935 {
3936 u32 delta = seq - tp->rcv_nxt;
3937
3938 sock_owned_by_me((struct sock *)tp);
3939 tp->bytes_received += delta;
3940 tcp_rcv_sne_update(tp, seq);
3941 WRITE_ONCE(tp->rcv_nxt, seq);
3942 }
3943
3944 /* Update our send window.
3945 *
3946 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3947 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3948 */
tcp_ack_update_window(struct sock * sk,const struct sk_buff * skb,u32 ack,u32 ack_seq)3949 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3950 u32 ack_seq)
3951 {
3952 struct tcp_sock *tp = tcp_sk(sk);
3953 int flag = 0;
3954 u32 nwin = ntohs(tcp_hdr(skb)->window);
3955
3956 if (likely(!tcp_hdr(skb)->syn))
3957 nwin <<= tp->rx_opt.snd_wscale;
3958
3959 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3960 flag |= FLAG_WIN_UPDATE;
3961 tcp_update_wl(tp, ack_seq);
3962
3963 if (tp->snd_wnd != nwin) {
3964 tp->snd_wnd = nwin;
3965
3966 /* Note, it is the only place, where
3967 * fast path is recovered for sending TCP.
3968 */
3969 tp->pred_flags = 0;
3970 tcp_fast_path_check(sk);
3971
3972 if (!tcp_write_queue_empty(sk))
3973 tcp_slow_start_after_idle_check(sk);
3974
3975 if (nwin > tp->max_window) {
3976 tp->max_window = nwin;
3977 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
3978 }
3979 }
3980 }
3981
3982 tcp_snd_una_update(tp, ack);
3983
3984 return flag;
3985 }
3986
__tcp_oow_rate_limited(struct net * net,int mib_idx,u32 * last_oow_ack_time)3987 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
3988 u32 *last_oow_ack_time)
3989 {
3990 /* Paired with the WRITE_ONCE() in this function. */
3991 u32 val = READ_ONCE(*last_oow_ack_time);
3992
3993 if (val) {
3994 s32 elapsed = (s32)(tcp_jiffies32 - val);
3995
3996 if (0 <= elapsed &&
3997 elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
3998 NET_INC_STATS(net, mib_idx);
3999 return true; /* rate-limited: don't send yet! */
4000 }
4001 }
4002
4003 /* Paired with the prior READ_ONCE() and with itself,
4004 * as we might be lockless.
4005 */
4006 WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
4007
4008 return false; /* not rate-limited: go ahead, send dupack now! */
4009 }
4010
4011 /* Return true if we're currently rate-limiting out-of-window ACKs and
4012 * thus shouldn't send a dupack right now. We rate-limit dupacks in
4013 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
4014 * attacks that send repeated SYNs or ACKs for the same connection. To
4015 * do this, we do not send a duplicate SYNACK or ACK if the remote
4016 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
4017 */
tcp_oow_rate_limited(struct net * net,const struct sk_buff * skb,int mib_idx,u32 * last_oow_ack_time)4018 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
4019 int mib_idx, u32 *last_oow_ack_time)
4020 {
4021 /* Data packets without SYNs are not likely part of an ACK loop. */
4022 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
4023 !tcp_hdr(skb)->syn)
4024 return false;
4025
4026 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
4027 }
4028
tcp_send_ack_reflect_ect(struct sock * sk,bool accecn_reflector)4029 static void tcp_send_ack_reflect_ect(struct sock *sk, bool accecn_reflector)
4030 {
4031 struct tcp_sock *tp = tcp_sk(sk);
4032 u16 flags = 0;
4033
4034 if (accecn_reflector)
4035 flags = tcp_accecn_reflector_flags(tp->syn_ect_rcv);
4036 __tcp_send_ack(sk, tp->rcv_nxt, flags);
4037 }
4038
4039 /* RFC 5961 7 [ACK Throttling] */
tcp_send_challenge_ack(struct sock * sk,bool accecn_reflector)4040 static void tcp_send_challenge_ack(struct sock *sk, bool accecn_reflector)
4041 {
4042 struct tcp_sock *tp = tcp_sk(sk);
4043 struct net *net = sock_net(sk);
4044 u32 count, now, ack_limit;
4045
4046 /* First check our per-socket dupack rate limit. */
4047 if (__tcp_oow_rate_limited(net,
4048 LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
4049 &tp->last_oow_ack_time))
4050 return;
4051
4052 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
4053 if (ack_limit == INT_MAX)
4054 goto send_ack;
4055
4056 /* Then check host-wide RFC 5961 rate limit. */
4057 now = jiffies / HZ;
4058 if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
4059 u32 half = (ack_limit + 1) >> 1;
4060
4061 WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
4062 WRITE_ONCE(net->ipv4.tcp_challenge_count,
4063 get_random_u32_inclusive(half, ack_limit + half - 1));
4064 }
4065 count = READ_ONCE(net->ipv4.tcp_challenge_count);
4066 if (count > 0) {
4067 WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
4068 send_ack:
4069 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
4070 tcp_send_ack_reflect_ect(sk, accecn_reflector);
4071 }
4072 }
4073
tcp_store_ts_recent(struct tcp_sock * tp)4074 static void tcp_store_ts_recent(struct tcp_sock *tp)
4075 {
4076 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
4077 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
4078 }
4079
__tcp_replace_ts_recent(struct tcp_sock * tp,s32 tstamp_delta)4080 static int __tcp_replace_ts_recent(struct tcp_sock *tp, s32 tstamp_delta)
4081 {
4082 tcp_store_ts_recent(tp);
4083 return tstamp_delta > 0 ? FLAG_TS_PROGRESS : 0;
4084 }
4085
tcp_replace_ts_recent(struct tcp_sock * tp,u32 seq)4086 static int tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
4087 {
4088 s32 delta;
4089
4090 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
4091 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
4092 * extra check below makes sure this can only happen
4093 * for pure ACK frames. -DaveM
4094 *
4095 * Not only, also it occurs for expired timestamps.
4096 */
4097
4098 if (tcp_paws_check(&tp->rx_opt, 0)) {
4099 delta = tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent;
4100 return __tcp_replace_ts_recent(tp, delta);
4101 }
4102 }
4103
4104 return 0;
4105 }
4106
4107 /* This routine deals with acks during a TLP episode and ends an episode by
4108 * resetting tlp_high_seq. Ref: TLP algorithm in RFC8985
4109 */
tcp_process_tlp_ack(struct sock * sk,u32 ack,int flag)4110 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
4111 {
4112 struct tcp_sock *tp = tcp_sk(sk);
4113
4114 if (before(ack, tp->tlp_high_seq))
4115 return;
4116
4117 if (!tp->tlp_retrans) {
4118 /* TLP of new data has been acknowledged */
4119 tp->tlp_high_seq = 0;
4120 } else if (flag & FLAG_DSACK_TLP) {
4121 /* This DSACK means original and TLP probe arrived; no loss */
4122 tp->tlp_high_seq = 0;
4123 } else if (after(ack, tp->tlp_high_seq)) {
4124 /* ACK advances: there was a loss, so reduce cwnd. Reset
4125 * tlp_high_seq in tcp_init_cwnd_reduction()
4126 */
4127 tcp_init_cwnd_reduction(sk);
4128 tcp_set_ca_state(sk, TCP_CA_CWR);
4129 tcp_end_cwnd_reduction(sk);
4130 tcp_try_keep_open(sk);
4131 NET_INC_STATS(sock_net(sk),
4132 LINUX_MIB_TCPLOSSPROBERECOVERY);
4133 } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
4134 FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
4135 /* Pure dupack: original and TLP probe arrived; no loss */
4136 tp->tlp_high_seq = 0;
4137 }
4138 }
4139
tcp_in_ack_event(struct sock * sk,int flag)4140 static void tcp_in_ack_event(struct sock *sk, int flag)
4141 {
4142 const struct inet_connection_sock *icsk = inet_csk(sk);
4143
4144 if (icsk->icsk_ca_ops->in_ack_event) {
4145 u32 ack_ev_flags = 0;
4146
4147 if (flag & FLAG_WIN_UPDATE)
4148 ack_ev_flags |= CA_ACK_WIN_UPDATE;
4149 if (flag & FLAG_SLOWPATH) {
4150 ack_ev_flags |= CA_ACK_SLOWPATH;
4151 if (flag & FLAG_ECE)
4152 ack_ev_flags |= CA_ACK_ECE;
4153 }
4154
4155 icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags);
4156 }
4157 }
4158
4159 /* Congestion control has updated the cwnd already. So if we're in
4160 * loss recovery then now we do any new sends (for FRTO) or
4161 * retransmits (for CA_Loss or CA_recovery) that make sense.
4162 */
tcp_xmit_recovery(struct sock * sk,int rexmit)4163 static void tcp_xmit_recovery(struct sock *sk, int rexmit)
4164 {
4165 struct tcp_sock *tp = tcp_sk(sk);
4166
4167 if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
4168 return;
4169
4170 if (unlikely(rexmit == REXMIT_NEW)) {
4171 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
4172 TCP_NAGLE_OFF);
4173 if (after(tp->snd_nxt, tp->high_seq))
4174 return;
4175 tp->frto = 0;
4176 }
4177 tcp_xmit_retransmit_queue(sk);
4178 }
4179
4180 /* Returns the number of packets newly acked or sacked by the current ACK */
tcp_newly_delivered(struct sock * sk,u32 prior_delivered,u32 ecn_count,int flag)4181 static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered,
4182 u32 ecn_count, int flag)
4183 {
4184 const struct net *net = sock_net(sk);
4185 struct tcp_sock *tp = tcp_sk(sk);
4186 u32 delivered;
4187
4188 delivered = tp->delivered - prior_delivered;
4189 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
4190
4191 if (flag & FLAG_ECE) {
4192 if (tcp_ecn_mode_rfc3168(tp))
4193 ecn_count = delivered;
4194 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, ecn_count);
4195 }
4196
4197 return delivered;
4198 }
4199
4200 /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
4201 *
4202 * If a DSACK is received that seems like it may have been due to reordering
4203 * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded
4204 * by srtt), since there is possibility that spurious retransmission was
4205 * due to reordering delay longer than reo_wnd.
4206 *
4207 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
4208 * no. of successful recoveries (accounts for full DSACK-based loss
4209 * recovery undo). After that, reset it to default (min_rtt/4).
4210 *
4211 * At max, reo_wnd is incremented only once per rtt. So that the new
4212 * DSACK on which we are reacting, is due to the spurious retx (approx)
4213 * after the reo_wnd has been updated last time.
4214 *
4215 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
4216 * absolute value to account for change in rtt.
4217 */
tcp_rack_update_reo_wnd(struct sock * sk,struct rate_sample * rs)4218 static void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
4219 {
4220 struct tcp_sock *tp = tcp_sk(sk);
4221
4222 if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
4223 TCP_RACK_STATIC_REO_WND) ||
4224 !rs->prior_delivered)
4225 return;
4226
4227 /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
4228 if (before(rs->prior_delivered, tp->rack.last_delivered))
4229 tp->rack.dsack_seen = 0;
4230
4231 /* Adjust the reo_wnd if update is pending */
4232 if (tp->rack.dsack_seen) {
4233 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
4234 tp->rack.reo_wnd_steps + 1);
4235 tp->rack.dsack_seen = 0;
4236 tp->rack.last_delivered = tp->delivered;
4237 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
4238 } else if (!tp->rack.reo_wnd_persist) {
4239 tp->rack.reo_wnd_steps = 1;
4240 }
4241 }
4242
4243 /* This routine deals with incoming acks, but not outgoing ones. */
tcp_ack(struct sock * sk,const struct sk_buff * skb,int flag)4244 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
4245 {
4246 struct inet_connection_sock *icsk = inet_csk(sk);
4247 struct tcp_sock *tp = tcp_sk(sk);
4248 struct tcp_sacktag_state sack_state;
4249 struct rate_sample rs = { .prior_delivered = 0 };
4250 u32 prior_snd_una = tp->snd_una;
4251 bool is_sack_reneg = tp->is_sack_reneg;
4252 u32 ack_seq = TCP_SKB_CB(skb)->seq;
4253 u32 ack = TCP_SKB_CB(skb)->ack_seq;
4254 int num_dupack = 0;
4255 int prior_packets = tp->packets_out;
4256 u32 delivered = tp->delivered;
4257 u32 lost = tp->lost;
4258 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
4259 u32 ecn_count = 0; /* Did we receive ECE/an AccECN ACE update? */
4260 u32 prior_fack;
4261
4262 sack_state.first_sackt = 0;
4263 sack_state.rate = &rs;
4264 sack_state.sack_delivered = 0;
4265 sack_state.delivered_bytes = 0;
4266
4267 /* We very likely will need to access rtx queue. */
4268 prefetch(sk->tcp_rtx_queue.rb_node);
4269
4270 /* If the ack is older than previous acks
4271 * then we can probably ignore it.
4272 */
4273 if (before(ack, prior_snd_una)) {
4274 u32 max_window;
4275
4276 /* do not accept ACK for bytes we never sent. */
4277 max_window = min_t(u64, tp->max_window, tp->bytes_acked);
4278 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
4279 if (before(ack, prior_snd_una - max_window)) {
4280 if (!(flag & FLAG_NO_CHALLENGE_ACK))
4281 tcp_send_challenge_ack(sk, false);
4282 return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
4283 }
4284 goto old_ack;
4285 }
4286
4287 /* If the ack includes data we haven't sent yet, discard
4288 * this segment (RFC793 Section 3.9).
4289 */
4290 if (after(ack, tp->snd_nxt))
4291 return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA;
4292
4293 if (after(ack, prior_snd_una)) {
4294 flag |= FLAG_SND_UNA_ADVANCED;
4295 WRITE_ONCE(icsk->icsk_retransmits, 0);
4296
4297 #if IS_ENABLED(CONFIG_TLS_DEVICE)
4298 if (static_branch_unlikely(&clean_acked_data_enabled.key))
4299 if (tp->tcp_clean_acked)
4300 tp->tcp_clean_acked(sk, ack);
4301 #endif
4302 }
4303
4304 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
4305 rs.prior_in_flight = tcp_packets_in_flight(tp);
4306
4307 /* ts_recent update must be made after we are sure that the packet
4308 * is in window.
4309 */
4310 if (flag & FLAG_UPDATE_TS_RECENT)
4311 flag |= tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4312
4313 if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
4314 FLAG_SND_UNA_ADVANCED) {
4315 /* Window is constant, pure forward advance.
4316 * No more checks are required.
4317 * Note, we use the fact that SND.UNA>=SND.WL2.
4318 */
4319 tcp_update_wl(tp, ack_seq);
4320 tcp_snd_una_update(tp, ack);
4321 flag |= FLAG_WIN_UPDATE;
4322
4323 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
4324 } else {
4325 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
4326 flag |= FLAG_DATA;
4327 else
4328 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
4329
4330 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
4331
4332 if (TCP_SKB_CB(skb)->sacked)
4333 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4334 &sack_state);
4335
4336 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb)))
4337 flag |= FLAG_ECE;
4338
4339 if (sack_state.sack_delivered)
4340 tcp_count_delivered(tp, sack_state.sack_delivered,
4341 flag & FLAG_ECE);
4342 }
4343
4344 /* This is a deviation from RFC3168 since it states that:
4345 * "When the TCP data sender is ready to set the CWR bit after reducing
4346 * the congestion window, it SHOULD set the CWR bit only on the first
4347 * new data packet that it transmits."
4348 * We accept CWR on pure ACKs to be more robust
4349 * with widely-deployed TCP implementations that do this.
4350 */
4351 tcp_ecn_accept_cwr(sk, skb);
4352
4353 /* We passed data and got it acked, remove any soft error
4354 * log. Something worked...
4355 */
4356 if (READ_ONCE(sk->sk_err_soft))
4357 WRITE_ONCE(sk->sk_err_soft, 0);
4358 WRITE_ONCE(icsk->icsk_probes_out, 0);
4359 tp->rcv_tstamp = tcp_jiffies32;
4360 if (!prior_packets)
4361 goto no_queue;
4362
4363 /* See if we can take anything off of the retransmit queue. */
4364 flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una,
4365 &sack_state, flag & FLAG_ECE);
4366
4367 tcp_rack_update_reo_wnd(sk, &rs);
4368
4369 if (tcp_ecn_mode_accecn(tp))
4370 ecn_count = tcp_accecn_process(sk, skb,
4371 tp->delivered - delivered,
4372 sack_state.delivered_bytes,
4373 &flag);
4374
4375 tcp_in_ack_event(sk, flag);
4376
4377 if (unlikely(tp->tlp_high_seq))
4378 tcp_process_tlp_ack(sk, ack, flag);
4379
4380 if (tcp_ack_is_dubious(sk, flag)) {
4381 if (!(flag & (FLAG_SND_UNA_ADVANCED |
4382 FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
4383 num_dupack = 1;
4384 /* Consider if pure acks were aggregated in tcp_add_backlog() */
4385 if (!(flag & FLAG_DATA))
4386 num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
4387 }
4388 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4389 &rexmit);
4390 }
4391
4392 /* If needed, reset TLP/RTO timer when RACK doesn't set. */
4393 if (flag & FLAG_SET_XMIT_TIMER)
4394 tcp_set_xmit_timer(sk);
4395
4396 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
4397 sk_dst_confirm(sk);
4398
4399 delivered = tcp_newly_delivered(sk, delivered, ecn_count, flag);
4400
4401 lost = tp->lost - lost; /* freshly marked lost */
4402 rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
4403 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
4404 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
4405 tcp_xmit_recovery(sk, rexmit);
4406 return 1;
4407
4408 no_queue:
4409 if (tcp_ecn_mode_accecn(tp))
4410 ecn_count = tcp_accecn_process(sk, skb,
4411 tp->delivered - delivered,
4412 sack_state.delivered_bytes,
4413 &flag);
4414 tcp_in_ack_event(sk, flag);
4415 /* If data was DSACKed, see if we can undo a cwnd reduction. */
4416 if (flag & FLAG_DSACKING_ACK) {
4417 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4418 &rexmit);
4419 tcp_newly_delivered(sk, delivered, ecn_count, flag);
4420 }
4421 /* If this ack opens up a zero window, clear backoff. It was
4422 * being used to time the probes, and is probably far higher than
4423 * it needs to be for normal retransmission.
4424 */
4425 tcp_ack_probe(sk);
4426
4427 if (unlikely(tp->tlp_high_seq))
4428 tcp_process_tlp_ack(sk, ack, flag);
4429 return 1;
4430
4431 old_ack:
4432 /* If data was SACKed, tag it and see if we should send more data.
4433 * If data was DSACKed, see if we can undo a cwnd reduction.
4434 */
4435 if (TCP_SKB_CB(skb)->sacked) {
4436 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4437 &sack_state);
4438 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4439 &rexmit);
4440 tcp_newly_delivered(sk, delivered, ecn_count, flag);
4441 tcp_xmit_recovery(sk, rexmit);
4442 }
4443
4444 return 0;
4445 }
4446
tcp_parse_fastopen_option(int len,const unsigned char * cookie,bool syn,struct tcp_fastopen_cookie * foc,bool exp_opt)4447 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
4448 bool syn, struct tcp_fastopen_cookie *foc,
4449 bool exp_opt)
4450 {
4451 /* Valid only in SYN or SYN-ACK with an even length. */
4452 if (!foc || !syn || len < 0 || (len & 1))
4453 return;
4454
4455 if (len >= TCP_FASTOPEN_COOKIE_MIN &&
4456 len <= TCP_FASTOPEN_COOKIE_MAX)
4457 memcpy(foc->val, cookie, len);
4458 else if (len != 0)
4459 len = -1;
4460 foc->len = len;
4461 foc->exp = exp_opt;
4462 }
4463
smc_parse_options(const struct tcphdr * th,struct tcp_options_received * opt_rx,const unsigned char * ptr,int opsize)4464 static bool smc_parse_options(const struct tcphdr *th,
4465 struct tcp_options_received *opt_rx,
4466 const unsigned char *ptr,
4467 int opsize)
4468 {
4469 #if IS_ENABLED(CONFIG_SMC)
4470 if (static_branch_unlikely(&tcp_have_smc)) {
4471 if (th->syn && !(opsize & 1) &&
4472 opsize >= TCPOLEN_EXP_SMC_BASE &&
4473 get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) {
4474 opt_rx->smc_ok = 1;
4475 return true;
4476 }
4477 }
4478 #endif
4479 return false;
4480 }
4481
4482 /* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
4483 * value on success.
4484 */
tcp_parse_mss_option(const struct tcphdr * th,u16 user_mss)4485 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
4486 {
4487 const unsigned char *ptr = (const unsigned char *)(th + 1);
4488 int length = (th->doff * 4) - sizeof(struct tcphdr);
4489 u16 mss = 0;
4490
4491 while (length > 0) {
4492 int opcode = *ptr++;
4493 int opsize;
4494
4495 switch (opcode) {
4496 case TCPOPT_EOL:
4497 return mss;
4498 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
4499 length--;
4500 continue;
4501 default:
4502 if (length < 2)
4503 return mss;
4504 opsize = *ptr++;
4505 if (opsize < 2) /* "silly options" */
4506 return mss;
4507 if (opsize > length)
4508 return mss; /* fail on partial options */
4509 if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) {
4510 u16 in_mss = get_unaligned_be16(ptr);
4511
4512 if (in_mss) {
4513 if (user_mss && user_mss < in_mss)
4514 in_mss = user_mss;
4515 mss = in_mss;
4516 }
4517 }
4518 ptr += opsize - 2;
4519 length -= opsize;
4520 }
4521 }
4522 return mss;
4523 }
4524
4525 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
4526 * But, this can also be called on packets in the established flow when
4527 * the fast version below fails.
4528 */
tcp_parse_options(const struct net * net,const struct sk_buff * skb,struct tcp_options_received * opt_rx,int estab,struct tcp_fastopen_cookie * foc)4529 void tcp_parse_options(const struct net *net,
4530 const struct sk_buff *skb,
4531 struct tcp_options_received *opt_rx, int estab,
4532 struct tcp_fastopen_cookie *foc)
4533 {
4534 const unsigned char *ptr;
4535 const struct tcphdr *th = tcp_hdr(skb);
4536 int length = (th->doff * 4) - sizeof(struct tcphdr);
4537
4538 ptr = (const unsigned char *)(th + 1);
4539 opt_rx->saw_tstamp = 0;
4540 opt_rx->accecn = 0;
4541 opt_rx->saw_unknown = 0;
4542
4543 while (length > 0) {
4544 int opcode = *ptr++;
4545 int opsize;
4546
4547 switch (opcode) {
4548 case TCPOPT_EOL:
4549 return;
4550 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
4551 length--;
4552 continue;
4553 default:
4554 if (length < 2)
4555 return;
4556 opsize = *ptr++;
4557 if (opsize < 2) /* "silly options" */
4558 return;
4559 if (opsize > length)
4560 return; /* don't parse partial options */
4561 switch (opcode) {
4562 case TCPOPT_MSS:
4563 if (opsize == TCPOLEN_MSS && th->syn && !estab) {
4564 u16 in_mss = get_unaligned_be16(ptr);
4565 if (in_mss) {
4566 if (opt_rx->user_mss &&
4567 opt_rx->user_mss < in_mss)
4568 in_mss = opt_rx->user_mss;
4569 opt_rx->mss_clamp = in_mss;
4570 }
4571 }
4572 break;
4573 case TCPOPT_WINDOW:
4574 if (opsize == TCPOLEN_WINDOW && th->syn &&
4575 !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) {
4576 __u8 snd_wscale = *(__u8 *)ptr;
4577 opt_rx->wscale_ok = 1;
4578 if (snd_wscale > TCP_MAX_WSCALE) {
4579 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n",
4580 __func__,
4581 snd_wscale,
4582 TCP_MAX_WSCALE);
4583 snd_wscale = TCP_MAX_WSCALE;
4584 }
4585 opt_rx->snd_wscale = snd_wscale;
4586 }
4587 break;
4588 case TCPOPT_TIMESTAMP:
4589 if ((opsize == TCPOLEN_TIMESTAMP) &&
4590 ((estab && opt_rx->tstamp_ok) ||
4591 (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) {
4592 opt_rx->saw_tstamp = 1;
4593 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
4594 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
4595 }
4596 break;
4597 case TCPOPT_SACK_PERM:
4598 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
4599 !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) {
4600 opt_rx->sack_ok = TCP_SACK_SEEN;
4601 tcp_sack_reset(opt_rx);
4602 }
4603 break;
4604
4605 case TCPOPT_SACK:
4606 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
4607 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
4608 opt_rx->sack_ok) {
4609 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
4610 }
4611 break;
4612 #ifdef CONFIG_TCP_MD5SIG
4613 case TCPOPT_MD5SIG:
4614 /* The MD5 Hash has already been
4615 * checked (see tcp_v{4,6}_rcv()).
4616 */
4617 break;
4618 #endif
4619 #ifdef CONFIG_TCP_AO
4620 case TCPOPT_AO:
4621 /* TCP AO has already been checked
4622 * (see tcp_inbound_ao_hash()).
4623 */
4624 break;
4625 #endif
4626 case TCPOPT_FASTOPEN:
4627 tcp_parse_fastopen_option(
4628 opsize - TCPOLEN_FASTOPEN_BASE,
4629 ptr, th->syn, foc, false);
4630 break;
4631
4632 case TCPOPT_ACCECN0:
4633 case TCPOPT_ACCECN1:
4634 /* Save offset of AccECN option in TCP header */
4635 opt_rx->accecn = (ptr - 2) - (__u8 *)th;
4636 break;
4637
4638 case TCPOPT_EXP:
4639 /* Fast Open option shares code 254 using a
4640 * 16 bits magic number.
4641 */
4642 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
4643 get_unaligned_be16(ptr) ==
4644 TCPOPT_FASTOPEN_MAGIC) {
4645 tcp_parse_fastopen_option(opsize -
4646 TCPOLEN_EXP_FASTOPEN_BASE,
4647 ptr + 2, th->syn, foc, true);
4648 break;
4649 }
4650
4651 if (smc_parse_options(th, opt_rx, ptr, opsize))
4652 break;
4653
4654 opt_rx->saw_unknown = 1;
4655 break;
4656
4657 default:
4658 opt_rx->saw_unknown = 1;
4659 }
4660 ptr += opsize-2;
4661 length -= opsize;
4662 }
4663 }
4664 }
4665 EXPORT_SYMBOL(tcp_parse_options);
4666
tcp_parse_aligned_timestamp(struct tcp_sock * tp,const struct tcphdr * th)4667 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
4668 {
4669 const __be32 *ptr = (const __be32 *)(th + 1);
4670
4671 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
4672 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
4673 tp->rx_opt.saw_tstamp = 1;
4674 ++ptr;
4675 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4676 ++ptr;
4677 if (*ptr)
4678 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
4679 else
4680 tp->rx_opt.rcv_tsecr = 0;
4681 return true;
4682 }
4683 return false;
4684 }
4685
4686 /* Fast parse options. This hopes to only see timestamps.
4687 * If it is wrong it falls back on tcp_parse_options().
4688 */
tcp_fast_parse_options(const struct net * net,const struct sk_buff * skb,const struct tcphdr * th,struct tcp_sock * tp)4689 static bool tcp_fast_parse_options(const struct net *net,
4690 const struct sk_buff *skb,
4691 const struct tcphdr *th, struct tcp_sock *tp)
4692 {
4693 /* In the spirit of fast parsing, compare doff directly to constant
4694 * values. Because equality is used, short doff can be ignored here.
4695 */
4696 if (th->doff == (sizeof(*th) / 4)) {
4697 tp->rx_opt.saw_tstamp = 0;
4698 tp->rx_opt.accecn = 0;
4699 return false;
4700 } else if (tp->rx_opt.tstamp_ok &&
4701 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
4702 if (tcp_parse_aligned_timestamp(tp, th)) {
4703 tp->rx_opt.accecn = 0;
4704 return true;
4705 }
4706 }
4707
4708 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
4709 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
4710 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
4711
4712 return true;
4713 }
4714
4715 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
4716 *
4717 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
4718 * it can pass through stack. So, the following predicate verifies that
4719 * this segment is not used for anything but congestion avoidance or
4720 * fast retransmit. Moreover, we even are able to eliminate most of such
4721 * second order effects, if we apply some small "replay" window (~RTO)
4722 * to timestamp space.
4723 *
4724 * All these measures still do not guarantee that we reject wrapped ACKs
4725 * on networks with high bandwidth, when sequence space is recycled fastly,
4726 * but it guarantees that such events will be very rare and do not affect
4727 * connection seriously. This doesn't look nice, but alas, PAWS is really
4728 * buggy extension.
4729 *
4730 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
4731 * states that events when retransmit arrives after original data are rare.
4732 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
4733 * the biggest problem on large power networks even with minor reordering.
4734 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
4735 * up to bandwidth of 18Gigabit/sec. 8) ]
4736 */
4737
4738 /* Estimates max number of increments of remote peer TSval in
4739 * a replay window (based on our current RTO estimation).
4740 */
tcp_tsval_replay(const struct sock * sk)4741 static u32 tcp_tsval_replay(const struct sock *sk)
4742 {
4743 /* If we use usec TS resolution,
4744 * then expect the remote peer to use the same resolution.
4745 */
4746 if (tcp_sk(sk)->tcp_usec_ts)
4747 return inet_csk(sk)->icsk_rto * (USEC_PER_SEC / HZ);
4748
4749 /* RFC 7323 recommends a TSval clock between 1ms and 1sec.
4750 * We know that some OS (including old linux) can use 1200 Hz.
4751 */
4752 return inet_csk(sk)->icsk_rto * 1200 / HZ;
4753 }
4754
tcp_disordered_ack_check(const struct sock * sk,const struct sk_buff * skb)4755 static enum skb_drop_reason tcp_disordered_ack_check(const struct sock *sk,
4756 const struct sk_buff *skb)
4757 {
4758 const struct tcp_sock *tp = tcp_sk(sk);
4759 const struct tcphdr *th = tcp_hdr(skb);
4760 SKB_DR_INIT(reason, TCP_RFC7323_PAWS);
4761 u32 ack = TCP_SKB_CB(skb)->ack_seq;
4762 u32 seq = TCP_SKB_CB(skb)->seq;
4763
4764 /* 1. Is this not a pure ACK ? */
4765 if (!th->ack || seq != TCP_SKB_CB(skb)->end_seq)
4766 return reason;
4767
4768 /* 2. Is its sequence not the expected one ? */
4769 if (seq != tp->rcv_nxt)
4770 return before(seq, tp->rcv_nxt) ?
4771 SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK :
4772 reason;
4773
4774 /* 3. Is this not a duplicate ACK ? */
4775 if (ack != tp->snd_una)
4776 return reason;
4777
4778 /* 4. Is this updating the window ? */
4779 if (tcp_may_update_window(tp, ack, seq, ntohs(th->window) <<
4780 tp->rx_opt.snd_wscale))
4781 return reason;
4782
4783 /* 5. Is this not in the replay window ? */
4784 if ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) >
4785 tcp_tsval_replay(sk))
4786 return reason;
4787
4788 return 0;
4789 }
4790
4791 /* Check segment sequence number for validity.
4792 *
4793 * Segment controls are considered valid, if the segment
4794 * fits to the window after truncation to the window. Acceptability
4795 * of data (and SYN, FIN, of course) is checked separately.
4796 * See tcp_data_queue(), for example.
4797 *
4798 * Also, controls (RST is main one) are accepted using RCV.WUP instead
4799 * of RCV.NXT. Peer still did not advance his SND.UNA when we
4800 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
4801 * (borrowed from freebsd)
4802 */
4803
tcp_sequence(const struct sock * sk,u32 seq,u32 end_seq,const struct tcphdr * th)4804 static enum skb_drop_reason tcp_sequence(const struct sock *sk,
4805 u32 seq, u32 end_seq,
4806 const struct tcphdr *th)
4807 {
4808 const struct tcp_sock *tp = tcp_sk(sk);
4809
4810 if (before(end_seq, tp->rcv_wup))
4811 return SKB_DROP_REASON_TCP_OLD_SEQUENCE;
4812
4813 if (unlikely(after(end_seq, tp->rcv_nxt + tcp_max_receive_window(tp)))) {
4814 /* Some stacks are known to handle FIN incorrectly; allow the
4815 * FIN to extend beyond the window and check it in detail later.
4816 */
4817 if (!after(end_seq - th->fin, tp->rcv_nxt + tcp_receive_window(tp)))
4818 return SKB_NOT_DROPPED_YET;
4819
4820 if (after(seq, tp->rcv_nxt + tcp_max_receive_window(tp)))
4821 return SKB_DROP_REASON_TCP_INVALID_SEQUENCE;
4822
4823 /* Only accept this packet if receive queue is empty. */
4824 if (skb_queue_len(&sk->sk_receive_queue))
4825 return SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE;
4826 }
4827
4828 return SKB_NOT_DROPPED_YET;
4829 }
4830
4831
tcp_done_with_error(struct sock * sk,int err)4832 void tcp_done_with_error(struct sock *sk, int err)
4833 {
4834 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4835 WRITE_ONCE(sk->sk_err, err);
4836 smp_wmb();
4837
4838 tcp_write_queue_purge(sk);
4839 tcp_done(sk);
4840
4841 if (!sock_flag(sk, SOCK_DEAD))
4842 sk_error_report(sk);
4843 }
4844
4845 /* When we get a reset we do this. */
tcp_reset(struct sock * sk,struct sk_buff * skb)4846 void tcp_reset(struct sock *sk, struct sk_buff *skb)
4847 {
4848 int err;
4849
4850 trace_tcp_receive_reset(sk);
4851
4852 /* mptcp can't tell us to ignore reset pkts,
4853 * so just ignore the return value of mptcp_incoming_options().
4854 */
4855 if (sk_is_mptcp(sk))
4856 mptcp_incoming_options(sk, skb);
4857
4858 /* We want the right error as BSD sees it (and indeed as we do). */
4859 switch (sk->sk_state) {
4860 case TCP_SYN_SENT:
4861 err = ECONNREFUSED;
4862 break;
4863 case TCP_CLOSE_WAIT:
4864 err = EPIPE;
4865 break;
4866 case TCP_CLOSE:
4867 return;
4868 default:
4869 err = ECONNRESET;
4870 }
4871 tcp_done_with_error(sk, err);
4872 }
4873
4874 /*
4875 * Process the FIN bit. This now behaves as it is supposed to work
4876 * and the FIN takes effect when it is validly part of sequence
4877 * space. Not before when we get holes.
4878 *
4879 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
4880 * (and thence onto LAST-ACK and finally, CLOSE, we never enter
4881 * TIME-WAIT)
4882 *
4883 * If we are in FINWAIT-1, a received FIN indicates simultaneous
4884 * close and we go into CLOSING (and later onto TIME-WAIT)
4885 *
4886 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4887 */
tcp_fin(struct sock * sk)4888 void tcp_fin(struct sock *sk)
4889 {
4890 struct tcp_sock *tp = tcp_sk(sk);
4891
4892 inet_csk_schedule_ack(sk);
4893
4894 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
4895 sock_set_flag(sk, SOCK_DONE);
4896
4897 switch (sk->sk_state) {
4898 case TCP_SYN_RECV:
4899 case TCP_ESTABLISHED:
4900 /* Move to CLOSE_WAIT */
4901 tcp_set_state(sk, TCP_CLOSE_WAIT);
4902 inet_csk_enter_pingpong_mode(sk);
4903 break;
4904
4905 case TCP_CLOSE_WAIT:
4906 case TCP_CLOSING:
4907 /* Received a retransmission of the FIN, do
4908 * nothing.
4909 */
4910 break;
4911 case TCP_LAST_ACK:
4912 /* RFC793: Remain in the LAST-ACK state. */
4913 break;
4914
4915 case TCP_FIN_WAIT1:
4916 /* This case occurs when a simultaneous close
4917 * happens, we must ack the received FIN and
4918 * enter the CLOSING state.
4919 */
4920 tcp_send_ack(sk);
4921 tcp_set_state(sk, TCP_CLOSING);
4922 break;
4923 case TCP_FIN_WAIT2:
4924 /* Received a FIN -- send ACK and enter TIME_WAIT. */
4925 tcp_send_ack(sk);
4926 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4927 break;
4928 default:
4929 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
4930 * cases we should never reach this piece of code.
4931 */
4932 pr_err("%s: Impossible, sk->sk_state=%d\n",
4933 __func__, sk->sk_state);
4934 break;
4935 }
4936
4937 /* It _is_ possible, that we have something out-of-order _after_ FIN.
4938 * Probably, we should reset in this case. For now drop them.
4939 */
4940 skb_rbtree_purge(&tp->out_of_order_queue);
4941 if (tcp_is_sack(tp))
4942 tcp_sack_reset(&tp->rx_opt);
4943
4944 if (!sock_flag(sk, SOCK_DEAD)) {
4945 sk->sk_state_change(sk);
4946
4947 /* Do not send POLL_HUP for half duplex close. */
4948 if (sk->sk_shutdown == SHUTDOWN_MASK ||
4949 sk->sk_state == TCP_CLOSE)
4950 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
4951 else
4952 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
4953 }
4954 }
4955
tcp_sack_extend(struct tcp_sack_block * sp,u32 seq,u32 end_seq)4956 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4957 u32 end_seq)
4958 {
4959 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
4960 if (before(seq, sp->start_seq))
4961 sp->start_seq = seq;
4962 if (after(end_seq, sp->end_seq))
4963 sp->end_seq = end_seq;
4964 return true;
4965 }
4966 return false;
4967 }
4968
tcp_dsack_set(struct sock * sk,u32 seq,u32 end_seq)4969 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4970 {
4971 struct tcp_sock *tp = tcp_sk(sk);
4972
4973 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
4974 int mib_idx;
4975
4976 if (before(seq, tp->rcv_nxt))
4977 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
4978 else
4979 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4980
4981 NET_INC_STATS(sock_net(sk), mib_idx);
4982
4983 tp->rx_opt.dsack = 1;
4984 tp->duplicate_sack[0].start_seq = seq;
4985 tp->duplicate_sack[0].end_seq = end_seq;
4986 }
4987 }
4988
tcp_dsack_extend(struct sock * sk,u32 seq,u32 end_seq)4989 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4990 {
4991 struct tcp_sock *tp = tcp_sk(sk);
4992
4993 if (!tp->rx_opt.dsack)
4994 tcp_dsack_set(sk, seq, end_seq);
4995 else
4996 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4997 }
4998
tcp_rcv_spurious_retrans(struct sock * sk,const struct sk_buff * skb)4999 static void tcp_rcv_spurious_retrans(struct sock *sk,
5000 const struct sk_buff *skb)
5001 {
5002 struct tcp_sock *tp = tcp_sk(sk);
5003
5004 /* When the ACK path fails or drops most ACKs, the sender would
5005 * timeout and spuriously retransmit the same segment repeatedly.
5006 * If it seems our ACKs are not reaching the other side,
5007 * based on receiving a duplicate data segment with new flowlabel
5008 * (suggesting the sender suffered an RTO), and we are not already
5009 * repathing due to our own RTO, then rehash the socket to repath our
5010 * packets.
5011 */
5012 #if IS_ENABLED(CONFIG_IPV6)
5013 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss &&
5014 skb->protocol == htons(ETH_P_IPV6) &&
5015 (tcp_sk(sk)->inet_conn.icsk_ack.lrcv_flowlabel !=
5016 ntohl(ip6_flowlabel(ipv6_hdr(skb)))) &&
5017 sk_rethink_txhash(sk))
5018 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
5019
5020 /* Save last flowlabel after a spurious retrans. */
5021 tcp_save_lrcv_flowlabel(sk, skb);
5022 #endif
5023 /* Check DSACK info to detect that the previous ACK carrying the
5024 * AccECN option was lost after the second retransmision, and then
5025 * stop sending AccECN option in all subsequent ACKs.
5026 */
5027 if (tcp_ecn_mode_accecn(tp) &&
5028 tp->accecn_opt_sent_w_dsack &&
5029 TCP_SKB_CB(skb)->seq == tp->duplicate_sack[0].start_seq)
5030 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_SEND);
5031 }
5032
tcp_send_dupack(struct sock * sk,const struct sk_buff * skb)5033 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
5034 {
5035 struct tcp_sock *tp = tcp_sk(sk);
5036
5037 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5038 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5039 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
5040 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5041
5042 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
5043 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
5044
5045 tcp_rcv_spurious_retrans(sk, skb);
5046 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
5047 end_seq = tp->rcv_nxt;
5048 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
5049 }
5050 }
5051
5052 tcp_send_ack(sk);
5053 }
5054
5055 /* These routines update the SACK block as out-of-order packets arrive or
5056 * in-order packets close up the sequence space.
5057 */
tcp_sack_maybe_coalesce(struct tcp_sock * tp)5058 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
5059 {
5060 int this_sack;
5061 struct tcp_sack_block *sp = &tp->selective_acks[0];
5062 struct tcp_sack_block *swalk = sp + 1;
5063
5064 /* See if the recent change to the first SACK eats into
5065 * or hits the sequence space of other SACK blocks, if so coalesce.
5066 */
5067 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
5068 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
5069 int i;
5070
5071 /* Zap SWALK, by moving every further SACK up by one slot.
5072 * Decrease num_sacks.
5073 */
5074 tp->rx_opt.num_sacks--;
5075 for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
5076 sp[i] = sp[i + 1];
5077 continue;
5078 }
5079 this_sack++;
5080 swalk++;
5081 }
5082 }
5083
tcp_sack_compress_send_ack(struct sock * sk)5084 void tcp_sack_compress_send_ack(struct sock *sk)
5085 {
5086 struct tcp_sock *tp = tcp_sk(sk);
5087
5088 if (!tp->compressed_ack)
5089 return;
5090
5091 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
5092 __sock_put(sk);
5093
5094 /* Since we have to send one ack finally,
5095 * substract one from tp->compressed_ack to keep
5096 * LINUX_MIB_TCPACKCOMPRESSED accurate.
5097 */
5098 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
5099 tp->compressed_ack - 1);
5100
5101 tp->compressed_ack = 0;
5102 tcp_send_ack(sk);
5103 }
5104
5105 /* Reasonable amount of sack blocks included in TCP SACK option
5106 * The max is 4, but this becomes 3 if TCP timestamps are there.
5107 * Given that SACK packets might be lost, be conservative and use 2.
5108 */
5109 #define TCP_SACK_BLOCKS_EXPECTED 2
5110
tcp_sack_new_ofo_skb(struct sock * sk,u32 seq,u32 end_seq)5111 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
5112 {
5113 struct tcp_sock *tp = tcp_sk(sk);
5114 struct tcp_sack_block *sp = &tp->selective_acks[0];
5115 int cur_sacks = tp->rx_opt.num_sacks;
5116 int this_sack;
5117
5118 if (!cur_sacks)
5119 goto new_sack;
5120
5121 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
5122 if (tcp_sack_extend(sp, seq, end_seq)) {
5123 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
5124 tcp_sack_compress_send_ack(sk);
5125 /* Rotate this_sack to the first one. */
5126 for (; this_sack > 0; this_sack--, sp--)
5127 swap(*sp, *(sp - 1));
5128 if (cur_sacks > 1)
5129 tcp_sack_maybe_coalesce(tp);
5130 return;
5131 }
5132 }
5133
5134 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
5135 tcp_sack_compress_send_ack(sk);
5136
5137 /* Could not find an adjacent existing SACK, build a new one,
5138 * put it at the front, and shift everyone else down. We
5139 * always know there is at least one SACK present already here.
5140 *
5141 * If the sack array is full, forget about the last one.
5142 */
5143 if (this_sack >= TCP_NUM_SACKS) {
5144 this_sack--;
5145 tp->rx_opt.num_sacks--;
5146 sp--;
5147 }
5148 for (; this_sack > 0; this_sack--, sp--)
5149 *sp = *(sp - 1);
5150
5151 new_sack:
5152 /* Build the new head SACK, and we're done. */
5153 sp->start_seq = seq;
5154 sp->end_seq = end_seq;
5155 tp->rx_opt.num_sacks++;
5156 }
5157
5158 /* RCV.NXT advances, some SACKs should be eaten. */
5159
tcp_sack_remove(struct tcp_sock * tp)5160 static void tcp_sack_remove(struct tcp_sock *tp)
5161 {
5162 struct tcp_sack_block *sp = &tp->selective_acks[0];
5163 int num_sacks = tp->rx_opt.num_sacks;
5164 int this_sack;
5165
5166 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
5167 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5168 tp->rx_opt.num_sacks = 0;
5169 return;
5170 }
5171
5172 for (this_sack = 0; this_sack < num_sacks;) {
5173 /* Check if the start of the sack is covered by RCV.NXT. */
5174 if (!before(tp->rcv_nxt, sp->start_seq)) {
5175 int i;
5176
5177 /* RCV.NXT must cover all the block! */
5178 WARN_ON(before(tp->rcv_nxt, sp->end_seq));
5179
5180 /* Zap this SACK, by moving forward any other SACKS. */
5181 for (i = this_sack+1; i < num_sacks; i++)
5182 tp->selective_acks[i-1] = tp->selective_acks[i];
5183 num_sacks--;
5184 continue;
5185 }
5186 this_sack++;
5187 sp++;
5188 }
5189 tp->rx_opt.num_sacks = num_sacks;
5190 }
5191
5192 /**
5193 * tcp_try_coalesce - try to merge skb to prior one
5194 * @sk: socket
5195 * @to: prior buffer
5196 * @from: buffer to add in queue
5197 * @fragstolen: pointer to boolean
5198 *
5199 * Before queueing skb @from after @to, try to merge them
5200 * to reduce overall memory use and queue lengths, if cost is small.
5201 * Packets in ofo or receive queues can stay a long time.
5202 * Better try to coalesce them right now to avoid future collapses.
5203 * Returns true if caller should free @from instead of queueing it
5204 */
tcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen)5205 static bool tcp_try_coalesce(struct sock *sk,
5206 struct sk_buff *to,
5207 struct sk_buff *from,
5208 bool *fragstolen)
5209 {
5210 int delta;
5211
5212 *fragstolen = false;
5213
5214 /* Its possible this segment overlaps with prior segment in queue */
5215 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
5216 return false;
5217
5218 if (!tcp_skb_can_collapse_rx(to, from))
5219 return false;
5220
5221 if (!skb_try_coalesce(to, from, fragstolen, &delta))
5222 return false;
5223
5224 atomic_add(delta, &sk->sk_rmem_alloc);
5225 sk_mem_charge(sk, delta);
5226 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
5227 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
5228 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
5229 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
5230
5231 if (TCP_SKB_CB(from)->has_rxtstamp) {
5232 TCP_SKB_CB(to)->has_rxtstamp = true;
5233 to->tstamp = from->tstamp;
5234 skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
5235 }
5236
5237 return true;
5238 }
5239
tcp_ooo_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen)5240 static bool tcp_ooo_try_coalesce(struct sock *sk,
5241 struct sk_buff *to,
5242 struct sk_buff *from,
5243 bool *fragstolen)
5244 {
5245 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
5246
5247 /* In case tcp_drop_reason() is called later, update to->gso_segs */
5248 if (res) {
5249 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
5250 max_t(u16, 1, skb_shinfo(from)->gso_segs);
5251
5252 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
5253 }
5254 return res;
5255 }
5256
5257 noinline_for_tracing static void
tcp_drop_reason(struct sock * sk,struct sk_buff * skb,enum skb_drop_reason reason)5258 tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
5259 {
5260 sk_drops_skbadd(sk, skb);
5261 sk_skb_reason_drop(sk, skb, reason);
5262 }
5263
5264 /* This one checks to see if we can put data from the
5265 * out_of_order queue into the receive_queue.
5266 */
tcp_ofo_queue(struct sock * sk)5267 static void tcp_ofo_queue(struct sock *sk)
5268 {
5269 struct tcp_sock *tp = tcp_sk(sk);
5270 __u32 dsack_high = tp->rcv_nxt;
5271 bool fin, fragstolen, eaten;
5272 struct sk_buff *skb, *tail;
5273 struct rb_node *p;
5274
5275 p = rb_first(&tp->out_of_order_queue);
5276 while (p) {
5277 skb = rb_to_skb(p);
5278 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
5279 break;
5280
5281 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
5282 __u32 dsack = dsack_high;
5283
5284 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
5285 dsack = TCP_SKB_CB(skb)->end_seq;
5286 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
5287 }
5288 p = rb_next(p);
5289 rb_erase(&skb->rbnode, &tp->out_of_order_queue);
5290
5291 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
5292 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_DROP);
5293 continue;
5294 }
5295
5296 tail = skb_peek_tail(&sk->sk_receive_queue);
5297 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
5298 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
5299 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
5300 if (!eaten)
5301 tcp_add_receive_queue(sk, skb);
5302 else
5303 kfree_skb_partial(skb, fragstolen);
5304
5305 if (unlikely(fin)) {
5306 tcp_fin(sk);
5307 /* tcp_fin() purges tp->out_of_order_queue,
5308 * so we must end this loop right now.
5309 */
5310 break;
5311 }
5312 }
5313 }
5314
5315 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
5316 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
5317
tcp_can_ingest(const struct sock * sk,const struct sk_buff * skb)5318 static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
5319 {
5320 unsigned int rmem = atomic_read(&sk->sk_rmem_alloc);
5321
5322 return rmem <= sk->sk_rcvbuf;
5323 }
5324
tcp_try_rmem_schedule(struct sock * sk,const struct sk_buff * skb,unsigned int size)5325 static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
5326 unsigned int size)
5327 {
5328 if (!tcp_can_ingest(sk, skb) ||
5329 !sk_rmem_schedule(sk, skb, size)) {
5330
5331 if (tcp_prune_queue(sk, skb) < 0)
5332 return -1;
5333
5334 while (!sk_rmem_schedule(sk, skb, size)) {
5335 if (!tcp_prune_ofo_queue(sk, skb))
5336 return -1;
5337 }
5338 }
5339 return 0;
5340 }
5341
tcp_data_queue_ofo(struct sock * sk,struct sk_buff * skb)5342 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
5343 {
5344 struct tcp_sock *tp = tcp_sk(sk);
5345 struct rb_node **p, *parent;
5346 struct sk_buff *skb1;
5347 u32 seq, end_seq;
5348 bool fragstolen;
5349
5350 tcp_save_lrcv_flowlabel(sk, skb);
5351 tcp_data_ecn_check(sk, skb);
5352
5353 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
5354 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
5355 READ_ONCE(sk->sk_data_ready)(sk);
5356 tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM);
5357 return;
5358 }
5359
5360 tcp_measure_rcv_mss(sk, skb);
5361 /* Disable header prediction. */
5362 tp->pred_flags = 0;
5363 inet_csk_schedule_ack(sk);
5364
5365 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
5366 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
5367 seq = TCP_SKB_CB(skb)->seq;
5368 end_seq = TCP_SKB_CB(skb)->end_seq;
5369
5370 p = &tp->out_of_order_queue.rb_node;
5371 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5372 /* Initial out of order segment, build 1 SACK. */
5373 if (tcp_is_sack(tp)) {
5374 tp->rx_opt.num_sacks = 1;
5375 tp->selective_acks[0].start_seq = seq;
5376 tp->selective_acks[0].end_seq = end_seq;
5377 }
5378 rb_link_node(&skb->rbnode, NULL, p);
5379 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
5380 tp->ooo_last_skb = skb;
5381 goto end;
5382 }
5383
5384 /* In the typical case, we are adding an skb to the end of the list.
5385 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
5386 */
5387 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
5388 skb, &fragstolen)) {
5389 coalesce_done:
5390 /* For non sack flows, do not grow window to force DUPACK
5391 * and trigger fast retransmit.
5392 */
5393 if (tcp_is_sack(tp))
5394 tcp_grow_window(sk, skb, true);
5395 kfree_skb_partial(skb, fragstolen);
5396 skb = NULL;
5397 goto add_sack;
5398 }
5399 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
5400 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
5401 parent = &tp->ooo_last_skb->rbnode;
5402 p = &parent->rb_right;
5403 goto insert;
5404 }
5405
5406 /* Find place to insert this segment. Handle overlaps on the way. */
5407 parent = NULL;
5408 while (*p) {
5409 parent = *p;
5410 skb1 = rb_to_skb(parent);
5411 if (before(seq, TCP_SKB_CB(skb1)->seq)) {
5412 p = &parent->rb_left;
5413 continue;
5414 }
5415 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
5416 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5417 /* All the bits are present. Drop. */
5418 NET_INC_STATS(sock_net(sk),
5419 LINUX_MIB_TCPOFOMERGE);
5420 tcp_drop_reason(sk, skb,
5421 SKB_DROP_REASON_TCP_OFOMERGE);
5422 skb = NULL;
5423 tcp_dsack_set(sk, seq, end_seq);
5424 goto add_sack;
5425 }
5426 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
5427 /* Partial overlap. */
5428 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
5429 } else {
5430 /* skb's seq == skb1's seq and skb covers skb1.
5431 * Replace skb1 with skb.
5432 */
5433 rb_replace_node(&skb1->rbnode, &skb->rbnode,
5434 &tp->out_of_order_queue);
5435 tcp_dsack_extend(sk,
5436 TCP_SKB_CB(skb1)->seq,
5437 TCP_SKB_CB(skb1)->end_seq);
5438 NET_INC_STATS(sock_net(sk),
5439 LINUX_MIB_TCPOFOMERGE);
5440 tcp_drop_reason(sk, skb1,
5441 SKB_DROP_REASON_TCP_OFOMERGE);
5442 goto merge_right;
5443 }
5444 } else if (tcp_ooo_try_coalesce(sk, skb1,
5445 skb, &fragstolen)) {
5446 goto coalesce_done;
5447 }
5448 p = &parent->rb_right;
5449 }
5450 insert:
5451 /* Insert segment into RB tree. */
5452 rb_link_node(&skb->rbnode, parent, p);
5453 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
5454
5455 merge_right:
5456 /* Remove other segments covered by skb. */
5457 while ((skb1 = skb_rb_next(skb)) != NULL) {
5458 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
5459 break;
5460 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5461 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
5462 end_seq);
5463 break;
5464 }
5465 rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
5466 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
5467 TCP_SKB_CB(skb1)->end_seq);
5468 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
5469 tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE);
5470 }
5471 /* If there is no skb after us, we are the last_skb ! */
5472 if (!skb1)
5473 tp->ooo_last_skb = skb;
5474
5475 add_sack:
5476 if (tcp_is_sack(tp))
5477 tcp_sack_new_ofo_skb(sk, seq, end_seq);
5478 end:
5479 if (skb) {
5480 /* For non sack flows, do not grow window to force DUPACK
5481 * and trigger fast retransmit.
5482 */
5483 if (tcp_is_sack(tp))
5484 tcp_grow_window(sk, skb, false);
5485 skb_condense(skb);
5486 skb_set_owner_r(skb, sk);
5487 }
5488 /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
5489 if (sk->sk_socket)
5490 tcp_rcvbuf_grow(sk, tp->rcvq_space.space);
5491 }
5492
tcp_queue_rcv(struct sock * sk,struct sk_buff * skb,bool * fragstolen)5493 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
5494 bool *fragstolen)
5495 {
5496 int eaten;
5497 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
5498
5499 eaten = (tail &&
5500 tcp_try_coalesce(sk, tail,
5501 skb, fragstolen)) ? 1 : 0;
5502 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
5503 if (!eaten) {
5504 tcp_add_receive_queue(sk, skb);
5505 skb_set_owner_r(skb, sk);
5506 }
5507 return eaten;
5508 }
5509
tcp_send_rcvq(struct sock * sk,struct msghdr * msg,size_t size)5510 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
5511 {
5512 struct sk_buff *skb;
5513 int err = -ENOMEM;
5514 int data_len = 0;
5515 bool fragstolen;
5516
5517 if (size == 0)
5518 return 0;
5519
5520 if (size > PAGE_SIZE) {
5521 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
5522
5523 data_len = npages << PAGE_SHIFT;
5524 size = data_len + (size & ~PAGE_MASK);
5525 }
5526 skb = alloc_skb_with_frags(size - data_len, data_len,
5527 PAGE_ALLOC_COSTLY_ORDER,
5528 &err, sk->sk_allocation);
5529 if (!skb)
5530 goto err;
5531
5532 skb_put(skb, size - data_len);
5533 skb->data_len = data_len;
5534 skb->len = size;
5535
5536 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5537 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5538 goto err_free;
5539 }
5540
5541 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
5542 if (err)
5543 goto err_free;
5544
5545 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
5546 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
5547 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
5548
5549 if (tcp_queue_rcv(sk, skb, &fragstolen)) {
5550 WARN_ON_ONCE(fragstolen); /* should not happen */
5551 __kfree_skb(skb);
5552 }
5553 return size;
5554
5555 err_free:
5556 kfree_skb(skb);
5557 err:
5558 return err;
5559
5560 }
5561
tcp_data_ready(struct sock * sk)5562 void tcp_data_ready(struct sock *sk)
5563 {
5564 if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE))
5565 READ_ONCE(sk->sk_data_ready)(sk);
5566 }
5567
tcp_data_queue(struct sock * sk,struct sk_buff * skb)5568 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
5569 {
5570 struct tcp_sock *tp = tcp_sk(sk);
5571 enum skb_drop_reason reason;
5572 bool fragstolen;
5573 int eaten;
5574
5575 /* If a subflow has been reset, the packet should not continue
5576 * to be processed, drop the packet.
5577 */
5578 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
5579 __kfree_skb(skb);
5580 return;
5581 }
5582
5583 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
5584 __kfree_skb(skb);
5585 return;
5586 }
5587 tcp_cleanup_skb(skb);
5588 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
5589
5590 reason = SKB_DROP_REASON_NOT_SPECIFIED;
5591 tp->rx_opt.dsack = 0;
5592
5593 /* Queue data for delivery to the user.
5594 * Packets in sequence go to the receive queue.
5595 * Out of sequence packets to the out_of_order_queue.
5596 */
5597 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
5598 if (tcp_receive_window(tp) == 0) {
5599 /* Some stacks are known to send bare FIN packets
5600 * in a loop even if we send RWIN 0 in our ACK.
5601 * Accepting this FIN does not hurt memory pressure
5602 * because the FIN flag will simply be merged to the
5603 * receive queue tail skb in most cases.
5604 */
5605 if (!skb->len &&
5606 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
5607 goto queue_and_out;
5608
5609 reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5610 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5611 goto out_of_window;
5612 }
5613
5614 /* Ok. In sequence. In window. */
5615 queue_and_out:
5616 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5617 /* TODO: maybe ratelimit these WIN 0 ACK ? */
5618 inet_csk(sk)->icsk_ack.pending |=
5619 (ICSK_ACK_NOMEM | ICSK_ACK_NOW);
5620 inet_csk_schedule_ack(sk);
5621 READ_ONCE(sk->sk_data_ready)(sk);
5622
5623 if (skb_queue_len(&sk->sk_receive_queue) && skb->len) {
5624 reason = SKB_DROP_REASON_PROTO_MEM;
5625 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5626 goto drop;
5627 }
5628 sk_forced_mem_schedule(sk, skb->truesize);
5629 }
5630
5631 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
5632 if (skb->len)
5633 tcp_event_data_recv(sk, skb);
5634 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
5635 tcp_fin(sk);
5636
5637 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5638 tcp_ofo_queue(sk);
5639
5640 /* RFC5681. 4.2. SHOULD send immediate ACK, when
5641 * gap in queue is filled.
5642 */
5643 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5644 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
5645 }
5646
5647 if (tp->rx_opt.num_sacks)
5648 tcp_sack_remove(tp);
5649
5650 tcp_fast_path_check(sk);
5651
5652 if (eaten > 0)
5653 kfree_skb_partial(skb, fragstolen);
5654 if (!sock_flag(sk, SOCK_DEAD))
5655 tcp_data_ready(sk);
5656 return;
5657 }
5658
5659 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
5660 tcp_rcv_spurious_retrans(sk, skb);
5661 /* A retransmit, 2nd most common case. Force an immediate ack. */
5662 reason = SKB_DROP_REASON_TCP_OLD_DATA;
5663 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
5664 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
5665
5666 out_of_window:
5667 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5668 inet_csk_schedule_ack(sk);
5669 drop:
5670 tcp_drop_reason(sk, skb, reason);
5671 return;
5672 }
5673
5674 /* Out of window. F.e. zero window probe. */
5675 if (!before(TCP_SKB_CB(skb)->seq,
5676 tp->rcv_nxt + tcp_receive_window(tp))) {
5677 reason = SKB_DROP_REASON_TCP_OVERWINDOW;
5678 NET_INC_STATS(sock_net(sk), LINUX_MIB_BEYOND_WINDOW);
5679 goto out_of_window;
5680 }
5681
5682 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5683 /* Partial packet, seq < rcv_next < end_seq */
5684 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
5685
5686 /* If window is closed, drop tail of packet. But after
5687 * remembering D-SACK for its head made in previous line.
5688 */
5689 if (!tcp_receive_window(tp)) {
5690 reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5691 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5692 goto out_of_window;
5693 }
5694 goto queue_and_out;
5695 }
5696
5697 tcp_data_queue_ofo(sk, skb);
5698 }
5699
tcp_skb_next(struct sk_buff * skb,struct sk_buff_head * list)5700 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
5701 {
5702 if (list)
5703 return !skb_queue_is_last(list, skb) ? skb->next : NULL;
5704
5705 return skb_rb_next(skb);
5706 }
5707
tcp_collapse_one(struct sock * sk,struct sk_buff * skb,struct sk_buff_head * list,struct rb_root * root)5708 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5709 struct sk_buff_head *list,
5710 struct rb_root *root)
5711 {
5712 struct sk_buff *next = tcp_skb_next(skb, list);
5713
5714 if (list)
5715 __skb_unlink(skb, list);
5716 else
5717 rb_erase(&skb->rbnode, root);
5718
5719 __kfree_skb(skb);
5720 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
5721
5722 return next;
5723 }
5724
5725 /* Collapse contiguous sequence of skbs head..tail with
5726 * sequence numbers start..end.
5727 *
5728 * If tail is NULL, this means until the end of the queue.
5729 *
5730 * Segments with FIN/SYN are not collapsed (only because this
5731 * simplifies code)
5732 */
5733 static void
tcp_collapse(struct sock * sk,struct sk_buff_head * list,struct rb_root * root,struct sk_buff * head,struct sk_buff * tail,u32 start,u32 end)5734 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
5735 struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
5736 {
5737 struct sk_buff *skb = head, *n;
5738 struct sk_buff_head tmp;
5739 bool end_of_skbs;
5740
5741 /* First, check that queue is collapsible and find
5742 * the point where collapsing can be useful.
5743 */
5744 restart:
5745 for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
5746 n = tcp_skb_next(skb, list);
5747
5748 if (!skb_frags_readable(skb))
5749 goto skip_this;
5750
5751 /* No new bits? It is possible on ofo queue. */
5752 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5753 skb = tcp_collapse_one(sk, skb, list, root);
5754 if (!skb)
5755 break;
5756 goto restart;
5757 }
5758
5759 /* The first skb to collapse is:
5760 * - not SYN/FIN and
5761 * - bloated or contains data before "start" or
5762 * overlaps to the next one and mptcp allow collapsing.
5763 */
5764 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
5765 (tcp_win_from_space(sk, skb->truesize) > skb->len ||
5766 before(TCP_SKB_CB(skb)->seq, start))) {
5767 end_of_skbs = false;
5768 break;
5769 }
5770
5771 if (n && n != tail && skb_frags_readable(n) &&
5772 tcp_skb_can_collapse_rx(skb, n) &&
5773 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
5774 end_of_skbs = false;
5775 break;
5776 }
5777
5778 skip_this:
5779 /* Decided to skip this, advance start seq. */
5780 start = TCP_SKB_CB(skb)->end_seq;
5781 }
5782 if (end_of_skbs ||
5783 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
5784 !skb_frags_readable(skb))
5785 return;
5786
5787 __skb_queue_head_init(&tmp);
5788
5789 while (before(start, end)) {
5790 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
5791 struct sk_buff *nskb;
5792
5793 nskb = alloc_skb(copy, GFP_ATOMIC);
5794 if (!nskb)
5795 break;
5796
5797 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
5798 skb_copy_decrypted(nskb, skb);
5799 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
5800 if (list)
5801 __skb_queue_before(list, skb, nskb);
5802 else
5803 __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
5804 skb_set_owner_r(nskb, sk);
5805 mptcp_skb_ext_move(nskb, skb);
5806
5807 /* Copy data, releasing collapsed skbs. */
5808 while (copy > 0) {
5809 int offset = start - TCP_SKB_CB(skb)->seq;
5810 int size = TCP_SKB_CB(skb)->end_seq - start;
5811
5812 BUG_ON(offset < 0);
5813 if (size > 0) {
5814 size = min(copy, size);
5815 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
5816 BUG();
5817 TCP_SKB_CB(nskb)->end_seq += size;
5818 copy -= size;
5819 start += size;
5820 }
5821 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5822 skb = tcp_collapse_one(sk, skb, list, root);
5823 if (!skb ||
5824 skb == tail ||
5825 !tcp_skb_can_collapse_rx(nskb, skb) ||
5826 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
5827 !skb_frags_readable(skb))
5828 goto end;
5829 }
5830 }
5831 }
5832 end:
5833 skb_queue_walk_safe(&tmp, skb, n)
5834 tcp_rbtree_insert(root, skb);
5835 }
5836
5837 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
5838 * and tcp_collapse() them until all the queue is collapsed.
5839 */
tcp_collapse_ofo_queue(struct sock * sk)5840 static void tcp_collapse_ofo_queue(struct sock *sk)
5841 {
5842 struct tcp_sock *tp = tcp_sk(sk);
5843 u32 range_truesize, sum_tiny = 0;
5844 struct sk_buff *skb, *head;
5845 u32 start, end;
5846
5847 skb = skb_rb_first(&tp->out_of_order_queue);
5848 new_range:
5849 if (!skb) {
5850 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5851 return;
5852 }
5853 start = TCP_SKB_CB(skb)->seq;
5854 end = TCP_SKB_CB(skb)->end_seq;
5855 range_truesize = skb->truesize;
5856
5857 for (head = skb;;) {
5858 skb = skb_rb_next(skb);
5859
5860 /* Range is terminated when we see a gap or when
5861 * we are at the queue end.
5862 */
5863 if (!skb ||
5864 after(TCP_SKB_CB(skb)->seq, end) ||
5865 before(TCP_SKB_CB(skb)->end_seq, start)) {
5866 /* Do not attempt collapsing tiny skbs */
5867 if (range_truesize != head->truesize ||
5868 end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
5869 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
5870 head, skb, start, end);
5871 } else {
5872 sum_tiny += range_truesize;
5873 if (sum_tiny > sk->sk_rcvbuf >> 3)
5874 return;
5875 }
5876 goto new_range;
5877 }
5878
5879 range_truesize += skb->truesize;
5880 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
5881 start = TCP_SKB_CB(skb)->seq;
5882 if (after(TCP_SKB_CB(skb)->end_seq, end))
5883 end = TCP_SKB_CB(skb)->end_seq;
5884 }
5885 }
5886
5887 /*
5888 * Clean the out-of-order queue to make room.
5889 * We drop high sequences packets to :
5890 * 1) Let a chance for holes to be filled.
5891 * This means we do not drop packets from ooo queue if their sequence
5892 * is before incoming packet sequence.
5893 * 2) not add too big latencies if thousands of packets sit there.
5894 * (But if application shrinks SO_RCVBUF, we could still end up
5895 * freeing whole queue here)
5896 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
5897 *
5898 * Return true if queue has shrunk.
5899 */
tcp_prune_ofo_queue(struct sock * sk,const struct sk_buff * in_skb)5900 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
5901 {
5902 struct tcp_sock *tp = tcp_sk(sk);
5903 struct rb_node *node, *prev;
5904 bool pruned = false;
5905 int goal;
5906
5907 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5908 return false;
5909
5910 goal = sk->sk_rcvbuf >> 3;
5911 node = &tp->ooo_last_skb->rbnode;
5912
5913 do {
5914 struct sk_buff *skb = rb_to_skb(node);
5915
5916 /* If incoming skb would land last in ofo queue, stop pruning. */
5917 if (after(TCP_SKB_CB(in_skb)->seq, TCP_SKB_CB(skb)->seq))
5918 break;
5919 pruned = true;
5920 prev = rb_prev(node);
5921 rb_erase(node, &tp->out_of_order_queue);
5922 goal -= skb->truesize;
5923 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
5924 tp->ooo_last_skb = rb_to_skb(prev);
5925 if (!prev || goal <= 0) {
5926 if (tcp_can_ingest(sk, in_skb) &&
5927 !tcp_under_memory_pressure(sk))
5928 break;
5929 goal = sk->sk_rcvbuf >> 3;
5930 }
5931 node = prev;
5932 } while (node);
5933
5934 if (pruned) {
5935 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
5936 /* Reset SACK state. A conforming SACK implementation will
5937 * do the same at a timeout based retransmit. When a connection
5938 * is in a sad state like this, we care only about integrity
5939 * of the connection not performance.
5940 */
5941 if (tp->rx_opt.sack_ok)
5942 tcp_sack_reset(&tp->rx_opt);
5943 }
5944 return pruned;
5945 }
5946
5947 /* Reduce allocated memory if we can, trying to get
5948 * the socket within its memory limits again.
5949 *
5950 * Return less than zero if we should start dropping frames
5951 * until the socket owning process reads some of the data
5952 * to stabilize the situation.
5953 */
tcp_prune_queue(struct sock * sk,const struct sk_buff * in_skb)5954 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
5955 {
5956 struct tcp_sock *tp = tcp_sk(sk);
5957
5958 /* Do nothing if our queues are empty. */
5959 if (!atomic_read(&sk->sk_rmem_alloc))
5960 return -1;
5961
5962 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
5963
5964 if (!tcp_can_ingest(sk, in_skb))
5965 tcp_clamp_window(sk);
5966 else if (tcp_under_memory_pressure(sk))
5967 tcp_adjust_rcv_ssthresh(sk);
5968
5969 if (tcp_can_ingest(sk, in_skb))
5970 return 0;
5971
5972 tcp_collapse_ofo_queue(sk);
5973 if (!skb_queue_empty(&sk->sk_receive_queue))
5974 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
5975 skb_peek(&sk->sk_receive_queue),
5976 NULL,
5977 tp->copied_seq, tp->rcv_nxt);
5978
5979 if (tcp_can_ingest(sk, in_skb))
5980 return 0;
5981
5982 /* Collapsing did not help, destructive actions follow.
5983 * This must not ever occur. */
5984
5985 tcp_prune_ofo_queue(sk, in_skb);
5986
5987 if (tcp_can_ingest(sk, in_skb))
5988 return 0;
5989
5990 /* If we are really being abused, tell the caller to silently
5991 * drop receive data on the floor. It will get retransmitted
5992 * and hopefully then we'll have sufficient space.
5993 */
5994 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
5995
5996 /* Massive buffer overcommit. */
5997 tp->pred_flags = 0;
5998 return -1;
5999 }
6000
tcp_should_expand_sndbuf(struct sock * sk)6001 static bool tcp_should_expand_sndbuf(struct sock *sk)
6002 {
6003 const struct tcp_sock *tp = tcp_sk(sk);
6004
6005 /* If the user specified a specific send buffer setting, do
6006 * not modify it.
6007 */
6008 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
6009 return false;
6010
6011 /* If we are under global TCP memory pressure, do not expand. */
6012 if (tcp_under_memory_pressure(sk)) {
6013 int unused_mem = sk_unused_reserved_mem(sk);
6014
6015 /* Adjust sndbuf according to reserved mem. But make sure
6016 * it never goes below SOCK_MIN_SNDBUF.
6017 * See sk_stream_moderate_sndbuf() for more details.
6018 */
6019 if (unused_mem > SOCK_MIN_SNDBUF)
6020 WRITE_ONCE(sk->sk_sndbuf, unused_mem);
6021
6022 return false;
6023 }
6024
6025 /* If we are under soft global TCP memory pressure, do not expand. */
6026 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
6027 return false;
6028
6029 /* If we filled the congestion window, do not expand. */
6030 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
6031 return false;
6032
6033 return true;
6034 }
6035
tcp_new_space(struct sock * sk)6036 static void tcp_new_space(struct sock *sk)
6037 {
6038 struct tcp_sock *tp = tcp_sk(sk);
6039
6040 if (tcp_should_expand_sndbuf(sk)) {
6041 tcp_sndbuf_expand(sk);
6042 tp->snd_cwnd_stamp = tcp_jiffies32;
6043 }
6044
6045 INDIRECT_CALL_1(READ_ONCE(sk->sk_write_space),
6046 sk_stream_write_space,
6047 sk);
6048 }
6049
6050 /* Caller made space either from:
6051 * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
6052 * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
6053 *
6054 * We might be able to generate EPOLLOUT to the application if:
6055 * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
6056 * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
6057 * small enough that tcp_stream_memory_free() decides it
6058 * is time to generate EPOLLOUT.
6059 */
__tcp_check_space(struct sock * sk)6060 void __tcp_check_space(struct sock *sk)
6061 {
6062 tcp_new_space(sk);
6063 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
6064 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
6065 }
6066
tcp_data_snd_check(struct sock * sk)6067 static inline void tcp_data_snd_check(struct sock *sk)
6068 {
6069 tcp_push_pending_frames(sk);
6070 tcp_check_space(sk);
6071 }
6072
6073 /*
6074 * Check if sending an ack is needed.
6075 */
__tcp_ack_snd_check(struct sock * sk,int ofo_possible)6076 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
6077 {
6078 struct tcp_sock *tp = tcp_sk(sk);
6079 struct net *net = sock_net(sk);
6080 unsigned long rtt;
6081 u64 delay;
6082
6083 /* More than one full frame received... */
6084 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
6085 /* ... and right edge of window advances far enough.
6086 * (tcp_recvmsg() will send ACK otherwise).
6087 * If application uses SO_RCVLOWAT, we want send ack now if
6088 * we have not received enough bytes to satisfy the condition.
6089 */
6090 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
6091 __tcp_select_window(sk) >= tp->rcv_wnd)) ||
6092 /* We ACK each frame or... */
6093 tcp_in_quickack_mode(sk) ||
6094 /* Protocol state mandates a one-time immediate ACK */
6095 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
6096 /* If we are running from __release_sock() in user context,
6097 * Defer the ack until tcp_release_cb().
6098 */
6099 if (sock_owned_by_user_nocheck(sk) &&
6100 READ_ONCE(net->ipv4.sysctl_tcp_backlog_ack_defer)) {
6101 set_bit(TCP_ACK_DEFERRED, &sk->sk_tsq_flags);
6102 return;
6103 }
6104 send_now:
6105 tcp_send_ack(sk);
6106 return;
6107 }
6108
6109 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
6110 tcp_send_delayed_ack(sk);
6111 return;
6112 }
6113
6114 if (!tcp_is_sack(tp) ||
6115 tp->compressed_ack >= READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_nr))
6116 goto send_now;
6117
6118 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
6119 tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
6120 tp->dup_ack_counter = 0;
6121 }
6122 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
6123 tp->dup_ack_counter++;
6124 goto send_now;
6125 }
6126 tp->compressed_ack++;
6127 if (hrtimer_is_queued(&tp->compressed_ack_timer))
6128 return;
6129
6130 /* compress ack timer : comp_sack_rtt_percent of rtt,
6131 * but no more than tcp_comp_sack_delay_ns.
6132 */
6133
6134 rtt = tp->rcv_rtt_est.rtt_us;
6135 if (tp->srtt_us && tp->srtt_us < rtt)
6136 rtt = tp->srtt_us;
6137
6138 /* delay = (rtt >> 3) * NSEC_PER_USEC * comp_sack_rtt_percent / 100
6139 * ->
6140 * delay = rtt * 1.25 * comp_sack_rtt_percent
6141 */
6142 delay = (u64)(rtt + (rtt >> 2)) *
6143 READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_rtt_percent);
6144
6145 delay = min(delay, READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_delay_ns));
6146
6147 sock_hold(sk);
6148 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
6149 READ_ONCE(net->ipv4.sysctl_tcp_comp_sack_slack_ns),
6150 HRTIMER_MODE_REL_PINNED_SOFT);
6151 }
6152
tcp_ack_snd_check(struct sock * sk)6153 static inline void tcp_ack_snd_check(struct sock *sk)
6154 {
6155 if (!inet_csk_ack_scheduled(sk)) {
6156 /* We sent a data segment already. */
6157 return;
6158 }
6159 __tcp_ack_snd_check(sk, 1);
6160 }
6161
6162 /*
6163 * This routine is only called when we have urgent data
6164 * signaled. Its the 'slow' part of tcp_urg. It could be
6165 * moved inline now as tcp_urg is only called from one
6166 * place. We handle URGent data wrong. We have to - as
6167 * BSD still doesn't use the correction from RFC961.
6168 * For 1003.1g we should support a new option TCP_STDURG to permit
6169 * either form (or just set the sysctl tcp_stdurg).
6170 */
6171
tcp_check_urg(struct sock * sk,const struct tcphdr * th)6172 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
6173 {
6174 struct tcp_sock *tp = tcp_sk(sk);
6175 u32 ptr = ntohs(th->urg_ptr);
6176
6177 if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg))
6178 ptr--;
6179 ptr += ntohl(th->seq);
6180
6181 /* Ignore urgent data that we've already seen and read. */
6182 if (after(tp->copied_seq, ptr))
6183 return;
6184
6185 /* Do not replay urg ptr.
6186 *
6187 * NOTE: interesting situation not covered by specs.
6188 * Misbehaving sender may send urg ptr, pointing to segment,
6189 * which we already have in ofo queue. We are not able to fetch
6190 * such data and will stay in TCP_URG_NOTYET until will be eaten
6191 * by recvmsg(). Seems, we are not obliged to handle such wicked
6192 * situations. But it is worth to think about possibility of some
6193 * DoSes using some hypothetical application level deadlock.
6194 */
6195 if (before(ptr, tp->rcv_nxt))
6196 return;
6197
6198 /* Do we already have a newer (or duplicate) urgent pointer? */
6199 if (tp->urg_data && !after(ptr, tp->urg_seq))
6200 return;
6201
6202 /* Tell the world about our new urgent pointer. */
6203 sk_send_sigurg(sk);
6204
6205 /* We may be adding urgent data when the last byte read was
6206 * urgent. To do this requires some care. We cannot just ignore
6207 * tp->copied_seq since we would read the last urgent byte again
6208 * as data, nor can we alter copied_seq until this data arrives
6209 * or we break the semantics of SIOCATMARK (and thus sockatmark())
6210 *
6211 * NOTE. Double Dutch. Rendering to plain English: author of comment
6212 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
6213 * and expect that both A and B disappear from stream. This is _wrong_.
6214 * Though this happens in BSD with high probability, this is occasional.
6215 * Any application relying on this is buggy. Note also, that fix "works"
6216 * only in this artificial test. Insert some normal data between A and B and we will
6217 * decline of BSD again. Verdict: it is better to remove to trap
6218 * buggy users.
6219 */
6220 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
6221 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
6222 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
6223 tp->copied_seq++;
6224 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
6225 __skb_unlink(skb, &sk->sk_receive_queue);
6226 __kfree_skb(skb);
6227 }
6228 }
6229
6230 WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET);
6231 WRITE_ONCE(tp->urg_seq, ptr);
6232
6233 /* Disable header prediction. */
6234 tp->pred_flags = 0;
6235 }
6236
6237 /* This is the 'fast' part of urgent handling. */
tcp_urg(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th)6238 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
6239 {
6240 struct tcp_sock *tp = tcp_sk(sk);
6241
6242 /* Check if we get a new urgent pointer - normally not. */
6243 if (unlikely(th->urg))
6244 tcp_check_urg(sk, th);
6245
6246 /* Do we wait for any urgent data? - normally not... */
6247 if (unlikely(tp->urg_data == TCP_URG_NOTYET)) {
6248 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
6249 th->syn;
6250
6251 /* Is the urgent pointer pointing into this packet? */
6252 if (ptr < skb->len) {
6253 u8 tmp;
6254 if (skb_copy_bits(skb, ptr, &tmp, 1))
6255 BUG();
6256 WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
6257 if (!sock_flag(sk, SOCK_DEAD))
6258 READ_ONCE(sk->sk_data_ready)(sk);
6259 }
6260 }
6261 }
6262
6263 /* Accept RST for rcv_nxt - 1 after a FIN.
6264 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
6265 * FIN is sent followed by a RST packet. The RST is sent with the same
6266 * sequence number as the FIN, and thus according to RFC 5961 a challenge
6267 * ACK should be sent. However, Mac OSX rate limits replies to challenge
6268 * ACKs on the closed socket. In addition middleboxes can drop either the
6269 * challenge ACK or a subsequent RST.
6270 */
tcp_reset_check(const struct sock * sk,const struct sk_buff * skb)6271 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
6272 {
6273 const struct tcp_sock *tp = tcp_sk(sk);
6274
6275 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
6276 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
6277 TCPF_CLOSING));
6278 }
6279
6280 /* Does PAWS and seqno based validation of an incoming segment, flags will
6281 * play significant role here.
6282 */
tcp_validate_incoming(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th,int syn_inerr)6283 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
6284 const struct tcphdr *th, int syn_inerr)
6285 {
6286 struct tcp_sock *tp = tcp_sk(sk);
6287 bool accecn_reflector = false;
6288 SKB_DR(reason);
6289
6290 /* RFC1323: H1. Apply PAWS check first. */
6291 if (!tcp_fast_parse_options(sock_net(sk), skb, th, tp) ||
6292 !tp->rx_opt.saw_tstamp ||
6293 tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW))
6294 goto step1;
6295
6296 reason = tcp_disordered_ack_check(sk, skb);
6297 if (!reason)
6298 goto step1;
6299 /* Reset is accepted even if it did not pass PAWS. */
6300 if (th->rst)
6301 goto step1;
6302 if (unlikely(th->syn))
6303 goto syn_challenge;
6304
6305 /* Old ACK are common, increment PAWS_OLD_ACK
6306 * and do not send a dupack.
6307 */
6308 if (reason == SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK) {
6309 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWS_OLD_ACK);
6310 goto discard;
6311 }
6312 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
6313 if (!tcp_oow_rate_limited(sock_net(sk), skb,
6314 LINUX_MIB_TCPACKSKIPPEDPAWS,
6315 &tp->last_oow_ack_time))
6316 tcp_send_dupack(sk, skb);
6317 goto discard;
6318
6319 step1:
6320 /* Step 1: check sequence number */
6321 reason = tcp_sequence(sk, TCP_SKB_CB(skb)->seq,
6322 TCP_SKB_CB(skb)->end_seq, th);
6323 if (reason) {
6324 /* RFC793, page 37: "In all states except SYN-SENT, all reset
6325 * (RST) segments are validated by checking their SEQ-fields."
6326 * And page 69: "If an incoming segment is not acceptable,
6327 * an acknowledgment should be sent in reply (unless the RST
6328 * bit is set, if so drop the segment and return)".
6329 */
6330 if (!th->rst) {
6331 if (th->syn)
6332 goto syn_challenge;
6333
6334 if (reason == SKB_DROP_REASON_TCP_INVALID_SEQUENCE ||
6335 reason == SKB_DROP_REASON_TCP_INVALID_END_SEQUENCE)
6336 NET_INC_STATS(sock_net(sk),
6337 LINUX_MIB_BEYOND_WINDOW);
6338 if (!tcp_oow_rate_limited(sock_net(sk), skb,
6339 LINUX_MIB_TCPACKSKIPPEDSEQ,
6340 &tp->last_oow_ack_time))
6341 tcp_send_dupack(sk, skb);
6342 } else if (tcp_reset_check(sk, skb)) {
6343 goto reset;
6344 }
6345 goto discard;
6346 }
6347
6348 /* Step 2: check RST bit */
6349 if (th->rst) {
6350 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
6351 * FIN and SACK too if available):
6352 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
6353 * the right-most SACK block,
6354 * then
6355 * RESET the connection
6356 * else
6357 * Send a challenge ACK
6358 */
6359 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
6360 tcp_reset_check(sk, skb))
6361 goto reset;
6362
6363 if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
6364 struct tcp_sack_block *sp = &tp->selective_acks[0];
6365 int max_sack = sp[0].end_seq;
6366 int this_sack;
6367
6368 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
6369 ++this_sack) {
6370 max_sack = after(sp[this_sack].end_seq,
6371 max_sack) ?
6372 sp[this_sack].end_seq : max_sack;
6373 }
6374
6375 if (TCP_SKB_CB(skb)->seq == max_sack)
6376 goto reset;
6377 }
6378
6379 /* Disable TFO if RST is out-of-order
6380 * and no data has been received
6381 * for current active TFO socket
6382 */
6383 if (tp->syn_fastopen && !tp->data_segs_in &&
6384 sk->sk_state == TCP_ESTABLISHED)
6385 tcp_fastopen_active_disable(sk);
6386 tcp_send_challenge_ack(sk, false);
6387 SKB_DR_SET(reason, TCP_RESET);
6388 goto discard;
6389 }
6390
6391 /* step 3: check security and precedence [ignored] */
6392
6393 /* step 4: Check for a SYN
6394 * RFC 5961 4.2 : Send a challenge ack
6395 */
6396 if (th->syn) {
6397 if (tcp_ecn_mode_accecn(tp)) {
6398 accecn_reflector = true;
6399 tp->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
6400 INET_ECN_MASK;
6401 if (tp->rx_opt.accecn &&
6402 tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
6403 u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
6404
6405 tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
6406 tcp_accecn_opt_demand_min(sk, 1);
6407 }
6408 }
6409 if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
6410 TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
6411 TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
6412 TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
6413 goto pass;
6414 syn_challenge:
6415 if (syn_inerr)
6416 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6417 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
6418 tcp_send_challenge_ack(sk, accecn_reflector);
6419 SKB_DR_SET(reason, TCP_INVALID_SYN);
6420 goto discard;
6421 }
6422
6423 pass:
6424 bpf_skops_parse_hdr(sk, skb);
6425
6426 return true;
6427
6428 discard:
6429 tcp_drop_reason(sk, skb, reason);
6430 return false;
6431
6432 reset:
6433 tcp_reset(sk, skb);
6434 __kfree_skb(skb);
6435 return false;
6436 }
6437
6438 /*
6439 * TCP receive function for the ESTABLISHED state.
6440 *
6441 * It is split into a fast path and a slow path. The fast path is
6442 * disabled when:
6443 * - A zero window was announced from us - zero window probing
6444 * is only handled properly in the slow path.
6445 * - Out of order segments arrived.
6446 * - Urgent data is expected.
6447 * - There is no buffer space left
6448 * - Unexpected TCP flags/window values/header lengths are received
6449 * (detected by checking the TCP header against pred_flags)
6450 * - Data is sent in both directions. Fast path only supports pure senders
6451 * or pure receivers (this means either the sequence number or the ack
6452 * value must stay constant)
6453 * - Unexpected TCP option.
6454 *
6455 * When these conditions are not satisfied it drops into a standard
6456 * receive procedure patterned after RFC793 to handle all cases.
6457 * The first three cases are guaranteed by proper pred_flags setting,
6458 * the rest is checked inline. Fast processing is turned on in
6459 * tcp_data_queue when everything is OK.
6460 */
tcp_rcv_established(struct sock * sk,struct sk_buff * skb)6461 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
6462 {
6463 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
6464 const struct tcphdr *th = (const struct tcphdr *)skb->data;
6465 struct tcp_sock *tp = tcp_sk(sk);
6466 unsigned int len = skb->len;
6467
6468 /* TCP congestion window tracking */
6469 trace_tcp_probe(sk, skb);
6470
6471 tcp_mstamp_refresh(tp);
6472 if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
6473 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
6474 /*
6475 * Header prediction.
6476 * The code loosely follows the one in the famous
6477 * "30 instruction TCP receive" Van Jacobson mail.
6478 *
6479 * Van's trick is to deposit buffers into socket queue
6480 * on a device interrupt, to call tcp_recv function
6481 * on the receive process context and checksum and copy
6482 * the buffer to user space. smart...
6483 *
6484 * Our current scheme is not silly either but we take the
6485 * extra cost of the net_bh soft interrupt processing...
6486 * We do checksum and copy also but from device to kernel.
6487 */
6488
6489 tp->rx_opt.saw_tstamp = 0;
6490 tp->rx_opt.accecn = 0;
6491
6492 /* pred_flags is 0xS?10 << 16 + snd_wnd
6493 * if header_prediction is to be made
6494 * 'S' will always be tp->tcp_header_len >> 2
6495 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
6496 * turn it off (when there are holes in the receive
6497 * space for instance)
6498 * PSH flag is ignored.
6499 */
6500
6501 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
6502 TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
6503 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6504 int tcp_header_len = tp->tcp_header_len;
6505 s32 delta = 0;
6506 int flag = 0;
6507
6508 /* Timestamp header prediction: tcp_header_len
6509 * is automatically equal to th->doff*4 due to pred_flags
6510 * match.
6511 */
6512
6513 /* Check timestamp */
6514 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
6515 /* No? Slow path! */
6516 if (!tcp_parse_aligned_timestamp(tp, th))
6517 goto slow_path;
6518
6519 delta = tp->rx_opt.rcv_tsval -
6520 tp->rx_opt.ts_recent;
6521 /* If PAWS failed, check it more carefully in slow path */
6522 if (delta < 0)
6523 goto slow_path;
6524
6525 /* DO NOT update ts_recent here, if checksum fails
6526 * and timestamp was corrupted part, it will result
6527 * in a hung connection since we will drop all
6528 * future packets due to the PAWS test.
6529 */
6530 }
6531
6532 if (len <= tcp_header_len) {
6533 /* Bulk data transfer: sender */
6534 if (len == tcp_header_len) {
6535 /* Predicted packet is in window by definition.
6536 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6537 * Hence, check seq<=rcv_wup reduces to:
6538 */
6539 if (tcp_header_len ==
6540 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6541 tp->rcv_nxt == tp->rcv_wup)
6542 flag |= __tcp_replace_ts_recent(tp,
6543 delta);
6544
6545 tcp_ecn_received_counters(sk, skb, 0);
6546
6547 /* We know that such packets are checksummed
6548 * on entry.
6549 */
6550 tcp_ack(sk, skb, flag);
6551 __kfree_skb(skb);
6552 tcp_data_snd_check(sk);
6553 /* When receiving pure ack in fast path, update
6554 * last ts ecr directly instead of calling
6555 * tcp_rcv_rtt_measure_ts()
6556 */
6557 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
6558 return;
6559 } else { /* Header too small */
6560 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
6561 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6562 goto discard;
6563 }
6564 } else {
6565 int eaten = 0;
6566 bool fragstolen = false;
6567
6568 if (tcp_checksum_complete(skb))
6569 goto csum_error;
6570
6571 if (after(TCP_SKB_CB(skb)->end_seq,
6572 tp->rcv_nxt + tcp_receive_window(tp)))
6573 goto validate;
6574
6575 if ((int)skb->truesize > sk->sk_forward_alloc)
6576 goto step5;
6577
6578 /* Predicted packet is in window by definition.
6579 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6580 * Hence, check seq<=rcv_wup reduces to:
6581 */
6582 if (tcp_header_len ==
6583 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6584 tp->rcv_nxt == tp->rcv_wup)
6585 flag |= __tcp_replace_ts_recent(tp,
6586 delta);
6587
6588 tcp_rcv_rtt_measure_ts(sk, skb);
6589
6590 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
6591
6592 /* Bulk data transfer: receiver */
6593 tcp_cleanup_skb(skb);
6594 __skb_pull(skb, tcp_header_len);
6595 tcp_ecn_received_counters(sk, skb,
6596 len - tcp_header_len);
6597 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
6598
6599 tcp_event_data_recv(sk, skb);
6600
6601 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
6602 /* Well, only one small jumplet in fast path... */
6603 tcp_ack(sk, skb, flag | FLAG_DATA);
6604 tcp_data_snd_check(sk);
6605 if (!inet_csk_ack_scheduled(sk))
6606 goto no_ack;
6607 } else {
6608 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
6609 }
6610
6611 __tcp_ack_snd_check(sk, 0);
6612 no_ack:
6613 if (eaten)
6614 kfree_skb_partial(skb, fragstolen);
6615 tcp_data_ready(sk);
6616 return;
6617 }
6618 }
6619
6620 slow_path:
6621 if (len < (th->doff << 2) || tcp_checksum_complete(skb))
6622 goto csum_error;
6623
6624 if (!th->ack && !th->rst && !th->syn) {
6625 reason = SKB_DROP_REASON_TCP_FLAGS;
6626 goto discard;
6627 }
6628
6629 /*
6630 * Standard slow path.
6631 */
6632 validate:
6633 if (!tcp_validate_incoming(sk, skb, th, 1))
6634 return;
6635
6636 step5:
6637 tcp_ecn_received_counters_payload(sk, skb);
6638
6639 reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT);
6640 if ((int)reason < 0) {
6641 reason = -reason;
6642 goto discard;
6643 }
6644 tcp_rcv_rtt_measure_ts(sk, skb);
6645
6646 /* Process urgent data. */
6647 tcp_urg(sk, skb, th);
6648
6649 /* step 7: process the segment text */
6650 tcp_data_queue(sk, skb);
6651
6652 tcp_data_snd_check(sk);
6653 tcp_ack_snd_check(sk);
6654 return;
6655
6656 csum_error:
6657 reason = SKB_DROP_REASON_TCP_CSUM;
6658 trace_tcp_bad_csum(skb);
6659 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
6660 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6661
6662 discard:
6663 tcp_drop_reason(sk, skb, reason);
6664 }
6665
tcp_init_transfer(struct sock * sk,int bpf_op,struct sk_buff * skb)6666 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
6667 {
6668 struct inet_connection_sock *icsk = inet_csk(sk);
6669 struct tcp_sock *tp = tcp_sk(sk);
6670
6671 tcp_mtup_init(sk);
6672 icsk->icsk_af_ops->rebuild_header(sk);
6673 tcp_init_metrics(sk);
6674
6675 /* Initialize the congestion window to start the transfer.
6676 * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
6677 * retransmitted. In light of RFC6298 more aggressive 1sec
6678 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
6679 * retransmission has occurred.
6680 */
6681 if (tp->total_retrans > 1 && tp->undo_marker)
6682 tcp_snd_cwnd_set(tp, 1);
6683 else
6684 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
6685 tp->snd_cwnd_stamp = tcp_jiffies32;
6686
6687 bpf_skops_established(sk, bpf_op, skb);
6688 /* Initialize congestion control unless BPF initialized it already: */
6689 if (!icsk->icsk_ca_initialized)
6690 tcp_init_congestion_control(sk);
6691 tcp_init_buffer_space(sk);
6692 }
6693
tcp_finish_connect(struct sock * sk,struct sk_buff * skb)6694 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
6695 {
6696 struct tcp_sock *tp = tcp_sk(sk);
6697 struct inet_connection_sock *icsk = inet_csk(sk);
6698
6699 tcp_ao_finish_connect(sk, skb);
6700 tcp_set_state(sk, TCP_ESTABLISHED);
6701 icsk->icsk_ack.lrcvtime = tcp_jiffies32;
6702
6703 if (skb) {
6704 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
6705 security_inet_conn_established(sk, skb);
6706 sk_mark_napi_id(sk, skb);
6707 }
6708
6709 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, skb);
6710
6711 /* Prevent spurious tcp_cwnd_restart() on first data
6712 * packet.
6713 */
6714 tp->lsndtime = tcp_jiffies32;
6715
6716 if (sock_flag(sk, SOCK_KEEPOPEN))
6717 tcp_reset_keepalive_timer(sk, keepalive_time_when(tp));
6718
6719 if (!tp->rx_opt.snd_wscale)
6720 __tcp_fast_path_on(tp, tp->snd_wnd);
6721 else
6722 tp->pred_flags = 0;
6723 }
6724
tcp_rcv_fastopen_synack(struct sock * sk,struct sk_buff * synack,struct tcp_fastopen_cookie * cookie)6725 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
6726 struct tcp_fastopen_cookie *cookie)
6727 {
6728 struct tcp_sock *tp = tcp_sk(sk);
6729 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL;
6730 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
6731 bool syn_drop = false;
6732
6733 if (mss == READ_ONCE(tp->rx_opt.user_mss)) {
6734 struct tcp_options_received opt;
6735
6736 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
6737 tcp_clear_options(&opt);
6738 opt.user_mss = opt.mss_clamp = 0;
6739 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
6740 mss = opt.mss_clamp;
6741 }
6742
6743 if (!tp->syn_fastopen) {
6744 /* Ignore an unsolicited cookie */
6745 cookie->len = -1;
6746 } else if (tp->total_retrans) {
6747 /* SYN timed out and the SYN-ACK neither has a cookie nor
6748 * acknowledges data. Presumably the remote received only
6749 * the retransmitted (regular) SYNs: either the original
6750 * SYN-data or the corresponding SYN-ACK was dropped.
6751 */
6752 syn_drop = (cookie->len < 0 && data);
6753 } else if (cookie->len < 0 && !tp->syn_data) {
6754 /* We requested a cookie but didn't get it. If we did not use
6755 * the (old) exp opt format then try so next time (try_exp=1).
6756 * Otherwise we go back to use the RFC7413 opt (try_exp=2).
6757 */
6758 try_exp = tp->syn_fastopen_exp ? 2 : 1;
6759 }
6760
6761 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
6762
6763 if (data) { /* Retransmit unacked data in SYN */
6764 if (tp->total_retrans)
6765 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED;
6766 else
6767 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
6768 skb_rbtree_walk_from(data)
6769 tcp_mark_skb_lost(sk, data);
6770 tcp_non_congestion_loss_retransmit(sk);
6771 NET_INC_STATS(sock_net(sk),
6772 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
6773 return true;
6774 }
6775 tp->syn_data_acked = tp->syn_data;
6776 if (tp->syn_data_acked) {
6777 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
6778 /* SYN-data is counted as two separate packets in tcp_ack() */
6779 if (tp->delivered > 1)
6780 --tp->delivered;
6781 }
6782
6783 tcp_fastopen_add_skb(sk, synack);
6784
6785 return false;
6786 }
6787
smc_check_reset_syn(struct tcp_sock * tp)6788 static void smc_check_reset_syn(struct tcp_sock *tp)
6789 {
6790 #if IS_ENABLED(CONFIG_SMC)
6791 if (static_branch_unlikely(&tcp_have_smc)) {
6792 if (tp->syn_smc && !tp->rx_opt.smc_ok)
6793 tp->syn_smc = 0;
6794 }
6795 #endif
6796 }
6797
tcp_try_undo_spurious_syn(struct sock * sk)6798 static void tcp_try_undo_spurious_syn(struct sock *sk)
6799 {
6800 struct tcp_sock *tp = tcp_sk(sk);
6801 u32 syn_stamp;
6802
6803 /* undo_marker is set when SYN or SYNACK times out. The timeout is
6804 * spurious if the ACK's timestamp option echo value matches the
6805 * original SYN timestamp.
6806 */
6807 syn_stamp = tp->retrans_stamp;
6808 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp &&
6809 syn_stamp == tp->rx_opt.rcv_tsecr)
6810 tp->undo_marker = 0;
6811 }
6812
tcp_rcv_synsent_state_process(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th)6813 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
6814 const struct tcphdr *th)
6815 {
6816 struct inet_connection_sock *icsk = inet_csk(sk);
6817 struct tcp_sock *tp = tcp_sk(sk);
6818 struct tcp_fastopen_cookie foc = { .len = -1 };
6819 int saved_clamp = tp->rx_opt.mss_clamp;
6820 bool fastopen_fail;
6821 SKB_DR(reason);
6822
6823 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
6824 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
6825 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
6826
6827 if (th->ack) {
6828 /* rfc793:
6829 * "If the state is SYN-SENT then
6830 * first check the ACK bit
6831 * If the ACK bit is set
6832 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
6833 * a reset (unless the RST bit is set, if so drop
6834 * the segment and return)"
6835 */
6836 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
6837 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6838 /* Previous FIN/ACK or RST/ACK might be ignored. */
6839 if (icsk->icsk_retransmits == 0)
6840 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
6841 TCP_TIMEOUT_MIN, false);
6842 SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
6843 goto reset_and_undo;
6844 }
6845
6846 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
6847 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
6848 tcp_time_stamp_ts(tp))) {
6849 NET_INC_STATS(sock_net(sk),
6850 LINUX_MIB_PAWSACTIVEREJECTED);
6851 SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6852 goto reset_and_undo;
6853 }
6854
6855 /* Now ACK is acceptable.
6856 *
6857 * "If the RST bit is set
6858 * If the ACK was acceptable then signal the user "error:
6859 * connection reset", drop the segment, enter CLOSED state,
6860 * delete TCB, and return."
6861 */
6862
6863 if (th->rst) {
6864 tcp_reset(sk, skb);
6865 consume:
6866 __kfree_skb(skb);
6867 return 0;
6868 }
6869
6870 /* rfc793:
6871 * "fifth, if neither of the SYN or RST bits is set then
6872 * drop the segment and return."
6873 *
6874 * See note below!
6875 * --ANK(990513)
6876 */
6877 if (!th->syn) {
6878 SKB_DR_SET(reason, TCP_FLAGS);
6879 goto discard_and_undo;
6880 }
6881 /* rfc793:
6882 * "If the SYN bit is on ...
6883 * are acceptable then ...
6884 * (our SYN has been ACKed), change the connection
6885 * state to ESTABLISHED..."
6886 */
6887
6888 if (tcp_ecn_mode_any(tp))
6889 tcp_ecn_rcv_synack(sk, skb, th,
6890 TCP_SKB_CB(skb)->ip_dsfield);
6891
6892 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
6893 tcp_try_undo_spurious_syn(sk);
6894 tcp_ack(sk, skb, FLAG_SLOWPATH);
6895
6896 /* Ok.. it's good. Set up sequence numbers and
6897 * move to established.
6898 */
6899 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
6900 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
6901 tp->rcv_mwnd_seq = tp->rcv_wup + tp->rcv_wnd;
6902
6903 /* RFC1323: The window in SYN & SYN/ACK segments is
6904 * never scaled.
6905 */
6906 tp->snd_wnd = ntohs(th->window);
6907
6908 if (!tp->rx_opt.wscale_ok) {
6909 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
6910 WRITE_ONCE(tp->window_clamp,
6911 min(tp->window_clamp, 65535U));
6912 }
6913
6914 if (tp->rx_opt.saw_tstamp) {
6915 tp->rx_opt.tstamp_ok = 1;
6916 tp->tcp_header_len =
6917 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
6918 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6919 tcp_store_ts_recent(tp);
6920 } else {
6921 tp->tcp_header_len = sizeof(struct tcphdr);
6922 }
6923
6924 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6925 tcp_initialize_rcv_mss(sk);
6926
6927 /* Remember, tcp_poll() does not lock socket!
6928 * Change state from SYN-SENT only after copied_seq
6929 * is initialized. */
6930 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6931
6932 smc_check_reset_syn(tp);
6933
6934 smp_mb();
6935
6936 tcp_finish_connect(sk, skb);
6937
6938 fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
6939 tcp_rcv_fastopen_synack(sk, skb, &foc);
6940
6941 if (!sock_flag(sk, SOCK_DEAD)) {
6942 sk->sk_state_change(sk);
6943 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
6944 }
6945 if (fastopen_fail)
6946 return -1;
6947 if (sk->sk_write_pending ||
6948 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) ||
6949 inet_csk_in_pingpong_mode(sk)) {
6950 /* Save one ACK. Data will be ready after
6951 * several ticks, if write_pending is set.
6952 *
6953 * It may be deleted, but with this feature tcpdumps
6954 * look so _wonderfully_ clever, that I was not able
6955 * to stand against the temptation 8) --ANK
6956 */
6957 inet_csk_schedule_ack(sk);
6958 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
6959 tcp_reset_xmit_timer(sk, ICSK_TIME_DACK,
6960 TCP_DELACK_MAX, false);
6961 goto consume;
6962 }
6963 tcp_send_ack_reflect_ect(sk, tcp_ecn_mode_accecn(tp));
6964 return -1;
6965 }
6966
6967 /* No ACK in the segment */
6968
6969 if (th->rst) {
6970 /* rfc793:
6971 * "If the RST bit is set
6972 *
6973 * Otherwise (no ACK) drop the segment and return."
6974 */
6975 SKB_DR_SET(reason, TCP_RESET);
6976 goto discard_and_undo;
6977 }
6978
6979 /* PAWS check. */
6980 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
6981 tcp_paws_reject(&tp->rx_opt, 0)) {
6982 SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6983 goto discard_and_undo;
6984 }
6985 if (th->syn) {
6986 /* We see SYN without ACK. It is attempt of
6987 * simultaneous connect with crossed SYNs.
6988 * Particularly, it can be connect to self.
6989 */
6990 #ifdef CONFIG_TCP_AO
6991 struct tcp_ao_info *ao;
6992
6993 ao = rcu_dereference_protected(tp->ao_info,
6994 lockdep_sock_is_held(sk));
6995 if (ao) {
6996 WRITE_ONCE(ao->risn, th->seq);
6997 ao->rcv_sne = 0;
6998 }
6999 #endif
7000 tcp_set_state(sk, TCP_SYN_RECV);
7001
7002 if (tp->rx_opt.saw_tstamp) {
7003 tp->rx_opt.tstamp_ok = 1;
7004 tcp_store_ts_recent(tp);
7005 tp->tcp_header_len =
7006 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
7007 } else {
7008 tp->tcp_header_len = sizeof(struct tcphdr);
7009 }
7010
7011 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
7012 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
7013 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
7014 tp->rcv_mwnd_seq = tp->rcv_wup + tp->rcv_wnd;
7015
7016 /* RFC1323: The window in SYN & SYN/ACK segments is
7017 * never scaled.
7018 */
7019 tp->snd_wnd = ntohs(th->window);
7020 tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
7021 tp->max_window = tp->snd_wnd;
7022
7023 tcp_ecn_rcv_syn(sk, th, skb);
7024
7025 tcp_mtup_init(sk);
7026 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
7027 tcp_initialize_rcv_mss(sk);
7028
7029 tcp_send_synack(sk);
7030 #if 0
7031 /* Note, we could accept data and URG from this segment.
7032 * There are no obstacles to make this (except that we must
7033 * either change tcp_recvmsg() to prevent it from returning data
7034 * before 3WHS completes per RFC793, or employ TCP Fast Open).
7035 *
7036 * However, if we ignore data in ACKless segments sometimes,
7037 * we have no reasons to accept it sometimes.
7038 * Also, seems the code doing it in step6 of tcp_rcv_state_process
7039 * is not flawless. So, discard packet for sanity.
7040 * Uncomment this return to process the data.
7041 */
7042 return -1;
7043 #else
7044 goto consume;
7045 #endif
7046 }
7047 /* "fifth, if neither of the SYN or RST bits is set then
7048 * drop the segment and return."
7049 */
7050
7051 discard_and_undo:
7052 tcp_clear_options(&tp->rx_opt);
7053 tp->rx_opt.mss_clamp = saved_clamp;
7054 tcp_drop_reason(sk, skb, reason);
7055 return 0;
7056
7057 reset_and_undo:
7058 tcp_clear_options(&tp->rx_opt);
7059 tp->rx_opt.mss_clamp = saved_clamp;
7060 /* we can reuse/return @reason to its caller to handle the exception */
7061 return reason;
7062 }
7063
tcp_rcv_synrecv_state_fastopen(struct sock * sk)7064 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
7065 {
7066 struct tcp_sock *tp = tcp_sk(sk);
7067 struct request_sock *req;
7068
7069 /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
7070 * undo. If peer SACKs triggered fast recovery, we can't undo here.
7071 */
7072 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
7073 tcp_try_undo_recovery(sk);
7074
7075 tcp_update_rto_time(tp);
7076 WRITE_ONCE(inet_csk(sk)->icsk_retransmits, 0);
7077 /* In tcp_fastopen_synack_timer() on the first SYNACK RTO we set
7078 * retrans_stamp but don't enter CA_Loss, so in case that happened we
7079 * need to zero retrans_stamp here to prevent spurious
7080 * retransmits_timed_out(). However, if the ACK of our SYNACK caused us
7081 * to enter CA_Recovery then we need to leave retrans_stamp as it was
7082 * set entering CA_Recovery, for correct retransmits_timed_out() and
7083 * undo behavior.
7084 */
7085 tcp_retrans_stamp_cleanup(sk);
7086
7087 /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
7088 * we no longer need req so release it.
7089 */
7090 req = rcu_dereference_protected(tp->fastopen_rsk,
7091 lockdep_sock_is_held(sk));
7092 reqsk_fastopen_remove(sk, req, false);
7093
7094 /* Re-arm the timer because data may have been sent out.
7095 * This is similar to the regular data transmission case
7096 * when new data has just been ack'ed.
7097 *
7098 * (TFO) - we could try to be more aggressive and
7099 * retransmitting any data sooner based on when they
7100 * are sent out.
7101 */
7102 tcp_rearm_rto(sk);
7103 }
7104
7105 /*
7106 * This function implements the receiving procedure of RFC 793 for
7107 * all states except ESTABLISHED and TIME_WAIT.
7108 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
7109 * address independent.
7110 */
7111
7112 enum skb_drop_reason
tcp_rcv_state_process(struct sock * sk,struct sk_buff * skb)7113 tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
7114 {
7115 struct tcp_sock *tp = tcp_sk(sk);
7116 struct inet_connection_sock *icsk = inet_csk(sk);
7117 const struct tcphdr *th = tcp_hdr(skb);
7118 struct request_sock *req;
7119 int queued = 0;
7120 SKB_DR(reason);
7121
7122 switch (sk->sk_state) {
7123 case TCP_CLOSE:
7124 SKB_DR_SET(reason, TCP_CLOSE);
7125 goto discard;
7126
7127 case TCP_LISTEN:
7128 if (th->ack)
7129 return SKB_DROP_REASON_TCP_FLAGS;
7130
7131 if (th->rst) {
7132 SKB_DR_SET(reason, TCP_RESET);
7133 goto discard;
7134 }
7135 if (th->syn) {
7136 if (th->fin) {
7137 SKB_DR_SET(reason, TCP_FLAGS);
7138 goto discard;
7139 }
7140 /* It is possible that we process SYN packets from backlog,
7141 * so we need to make sure to disable BH and RCU right there.
7142 */
7143 rcu_read_lock();
7144 local_bh_disable();
7145 icsk->icsk_af_ops->conn_request(sk, skb);
7146 local_bh_enable();
7147 rcu_read_unlock();
7148
7149 consume_skb(skb);
7150 return 0;
7151 }
7152 SKB_DR_SET(reason, TCP_FLAGS);
7153 goto discard;
7154
7155 case TCP_SYN_SENT:
7156 tp->rx_opt.saw_tstamp = 0;
7157 tcp_mstamp_refresh(tp);
7158 queued = tcp_rcv_synsent_state_process(sk, skb, th);
7159 if (queued >= 0)
7160 return queued;
7161
7162 /* Do step6 onward by hand. */
7163 tcp_urg(sk, skb, th);
7164 __kfree_skb(skb);
7165 tcp_data_snd_check(sk);
7166 return 0;
7167 }
7168
7169 tcp_mstamp_refresh(tp);
7170 tp->rx_opt.saw_tstamp = 0;
7171 req = rcu_dereference_protected(tp->fastopen_rsk,
7172 lockdep_sock_is_held(sk));
7173 if (req) {
7174 bool req_stolen;
7175
7176 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
7177 sk->sk_state != TCP_FIN_WAIT1);
7178
7179 SKB_DR_SET(reason, TCP_FASTOPEN);
7180 if (!tcp_check_req(sk, skb, req, true, &req_stolen, &reason))
7181 goto discard;
7182 }
7183
7184 if (!th->ack && !th->rst && !th->syn) {
7185 SKB_DR_SET(reason, TCP_FLAGS);
7186 goto discard;
7187 }
7188 if (!tcp_validate_incoming(sk, skb, th, 0))
7189 return 0;
7190
7191 /* step 5: check the ACK field */
7192 reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
7193 FLAG_UPDATE_TS_RECENT |
7194 FLAG_NO_CHALLENGE_ACK);
7195
7196 if ((int)reason <= 0) {
7197 if (sk->sk_state == TCP_SYN_RECV) {
7198 /* send one RST */
7199 if (!reason)
7200 return SKB_DROP_REASON_TCP_OLD_ACK;
7201 return -reason;
7202 }
7203 /* accept old ack during closing */
7204 if ((int)reason < 0) {
7205 tcp_send_challenge_ack(sk, false);
7206 reason = -reason;
7207 goto discard;
7208 }
7209 }
7210 SKB_DR_SET(reason, NOT_SPECIFIED);
7211 switch (sk->sk_state) {
7212 case TCP_SYN_RECV:
7213 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
7214 if (!tp->srtt_us)
7215 tcp_synack_rtt_meas(sk, req);
7216
7217 if (tp->rx_opt.tstamp_ok)
7218 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
7219
7220 if (req) {
7221 tcp_rcv_synrecv_state_fastopen(sk);
7222 } else {
7223 tcp_try_undo_spurious_syn(sk);
7224 tp->retrans_stamp = 0;
7225 tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,
7226 skb);
7227 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
7228 }
7229 tcp_ao_established(sk);
7230 smp_mb();
7231 tcp_set_state(sk, TCP_ESTABLISHED);
7232 sk->sk_state_change(sk);
7233
7234 /* Note, that this wakeup is only for marginal crossed SYN case.
7235 * Passively open sockets are not waked up, because
7236 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
7237 */
7238 if (sk->sk_socket)
7239 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
7240
7241 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
7242 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
7243 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
7244
7245 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
7246 tcp_update_pacing_rate(sk);
7247
7248 /* Prevent spurious tcp_cwnd_restart() on first data packet */
7249 tp->lsndtime = tcp_jiffies32;
7250
7251 tcp_initialize_rcv_mss(sk);
7252 if (tcp_ecn_mode_accecn(tp))
7253 tcp_accecn_third_ack(sk, skb, tp->syn_ect_snt);
7254 tcp_fast_path_on(tp);
7255 if (sk->sk_shutdown & SEND_SHUTDOWN)
7256 tcp_shutdown(sk, SEND_SHUTDOWN);
7257
7258 break;
7259
7260 case TCP_FIN_WAIT1: {
7261 int tmo;
7262
7263 if (req)
7264 tcp_rcv_synrecv_state_fastopen(sk);
7265
7266 if (tp->snd_una != tp->write_seq)
7267 break;
7268
7269 tcp_set_state(sk, TCP_FIN_WAIT2);
7270 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN);
7271
7272 sk_dst_confirm(sk);
7273
7274 if (!sock_flag(sk, SOCK_DEAD)) {
7275 /* Wake up lingering close() */
7276 sk->sk_state_change(sk);
7277 break;
7278 }
7279
7280 if (READ_ONCE(tp->linger2) < 0) {
7281 tcp_done(sk);
7282 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
7283 return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
7284 }
7285 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
7286 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
7287 /* Receive out of order FIN after close() */
7288 if (tp->syn_fastopen && th->fin)
7289 tcp_fastopen_active_disable(sk);
7290 tcp_done(sk);
7291 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
7292 return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
7293 }
7294
7295 tmo = tcp_fin_time(sk);
7296 if (tmo > TCP_TIMEWAIT_LEN) {
7297 tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
7298 } else if (th->fin || sock_owned_by_user(sk)) {
7299 /* Bad case. We could lose such FIN otherwise.
7300 * It is not a big problem, but it looks confusing
7301 * and not so rare event. We still can lose it now,
7302 * if it spins in bh_lock_sock(), but it is really
7303 * marginal case.
7304 */
7305 tcp_reset_keepalive_timer(sk, tmo);
7306 } else {
7307 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
7308 goto consume;
7309 }
7310 break;
7311 }
7312
7313 case TCP_CLOSING:
7314 if (tp->snd_una == tp->write_seq) {
7315 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
7316 goto consume;
7317 }
7318 break;
7319
7320 case TCP_LAST_ACK:
7321 if (tp->snd_una == tp->write_seq) {
7322 tcp_update_metrics(sk);
7323 tcp_done(sk);
7324 goto consume;
7325 }
7326 break;
7327 }
7328
7329 /* step 6: check the URG bit */
7330 tcp_urg(sk, skb, th);
7331
7332 /* step 7: process the segment text */
7333 switch (sk->sk_state) {
7334 case TCP_CLOSE_WAIT:
7335 case TCP_CLOSING:
7336 case TCP_LAST_ACK:
7337 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
7338 /* If a subflow has been reset, the packet should not
7339 * continue to be processed, drop the packet.
7340 */
7341 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
7342 goto discard;
7343 break;
7344 }
7345 fallthrough;
7346 case TCP_FIN_WAIT1:
7347 case TCP_FIN_WAIT2:
7348 /* RFC 793 says to queue data in these states,
7349 * RFC 1122 says we MUST send a reset.
7350 * BSD 4.4 also does reset.
7351 */
7352 if (sk->sk_shutdown & RCV_SHUTDOWN) {
7353 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
7354 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
7355 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
7356 tcp_reset(sk, skb);
7357 return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
7358 }
7359 }
7360 fallthrough;
7361 case TCP_ESTABLISHED:
7362 tcp_data_queue(sk, skb);
7363 queued = 1;
7364 break;
7365 }
7366
7367 /* tcp_data could move socket to TIME-WAIT */
7368 if (sk->sk_state != TCP_CLOSE) {
7369 tcp_data_snd_check(sk);
7370 tcp_ack_snd_check(sk);
7371 }
7372
7373 if (!queued) {
7374 discard:
7375 tcp_drop_reason(sk, skb, reason);
7376 }
7377 return 0;
7378
7379 consume:
7380 __kfree_skb(skb);
7381 return 0;
7382 }
7383
pr_drop_req(struct request_sock * req,__u16 port,int family)7384 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
7385 {
7386 struct inet_request_sock *ireq = inet_rsk(req);
7387
7388 if (family == AF_INET)
7389 net_dbg_ratelimited("drop open request from %pI4/%u\n",
7390 &ireq->ir_rmt_addr, port);
7391 #if IS_ENABLED(CONFIG_IPV6)
7392 else if (family == AF_INET6)
7393 net_dbg_ratelimited("drop open request from %pI6/%u\n",
7394 &ireq->ir_v6_rmt_addr, port);
7395 #endif
7396 }
7397
7398 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
7399 *
7400 * If we receive a SYN packet with these bits set, it means a
7401 * network is playing bad games with TOS bits. In order to
7402 * avoid possible false congestion notifications, we disable
7403 * TCP ECN negotiation.
7404 *
7405 * Exception: tcp_ca wants ECN. This is required for DCTCP
7406 * congestion control: Linux DCTCP asserts ECT on all packets,
7407 * including SYN, which is most optimal solution; however,
7408 * others, such as FreeBSD do not.
7409 *
7410 * Exception: At least one of the reserved bits of the TCP header (th->res1) is
7411 * set, indicating the use of a future TCP extension (such as AccECN). See
7412 * RFC8311 §4.3 which updates RFC3168 to allow the development of such
7413 * extensions.
7414 */
tcp_ecn_create_request(struct request_sock * req,const struct sk_buff * skb,const struct sock * listen_sk,const struct dst_entry * dst)7415 static void tcp_ecn_create_request(struct request_sock *req,
7416 const struct sk_buff *skb,
7417 const struct sock *listen_sk,
7418 const struct dst_entry *dst)
7419 {
7420 const struct tcphdr *th = tcp_hdr(skb);
7421 const struct net *net = sock_net(listen_sk);
7422 bool th_ecn = th->ece && th->cwr;
7423 bool ect, ecn_ok;
7424 u32 ecn_ok_dst;
7425
7426 if (tcp_accecn_syn_requested(th) &&
7427 (READ_ONCE(net->ipv4.sysctl_tcp_ecn) >= 3 ||
7428 tcp_ca_needs_accecn(listen_sk))) {
7429 inet_rsk(req)->ecn_ok = 1;
7430 tcp_rsk(req)->accecn_ok = 1;
7431 tcp_rsk(req)->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
7432 INET_ECN_MASK;
7433 return;
7434 }
7435
7436 if (!th_ecn)
7437 return;
7438
7439 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
7440 ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
7441 ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;
7442
7443 if (((!ect || th->res1 || th->ae) && ecn_ok) ||
7444 tcp_ca_needs_ecn(listen_sk) ||
7445 (ecn_ok_dst & DST_FEATURE_ECN_CA) ||
7446 tcp_bpf_ca_needs_ecn((struct sock *)req))
7447 inet_rsk(req)->ecn_ok = 1;
7448 }
7449
tcp_openreq_init(struct request_sock * req,const struct tcp_options_received * rx_opt,struct sk_buff * skb,const struct sock * sk)7450 static void tcp_openreq_init(struct request_sock *req,
7451 const struct tcp_options_received *rx_opt,
7452 struct sk_buff *skb, const struct sock *sk)
7453 {
7454 struct inet_request_sock *ireq = inet_rsk(req);
7455
7456 req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */
7457 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
7458 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
7459 tcp_rsk(req)->snt_synack = 0;
7460 tcp_rsk(req)->snt_tsval_first = 0;
7461 tcp_rsk(req)->last_oow_ack_time = 0;
7462 tcp_rsk(req)->accecn_ok = 0;
7463 tcp_rsk(req)->saw_accecn_opt = TCP_ACCECN_OPT_NOT_SEEN;
7464 tcp_rsk(req)->accecn_fail_mode = 0;
7465 tcp_rsk(req)->syn_ect_rcv = 0;
7466 tcp_rsk(req)->syn_ect_snt = 0;
7467 req->mss = rx_opt->mss_clamp;
7468 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
7469 ireq->tstamp_ok = rx_opt->tstamp_ok;
7470 ireq->sack_ok = rx_opt->sack_ok;
7471 ireq->snd_wscale = rx_opt->snd_wscale;
7472 ireq->wscale_ok = rx_opt->wscale_ok;
7473 ireq->acked = 0;
7474 ireq->ecn_ok = 0;
7475 ireq->ir_rmt_port = tcp_hdr(skb)->source;
7476 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
7477 ireq->ir_mark = inet_request_mark(sk, skb);
7478 #if IS_ENABLED(CONFIG_SMC)
7479 ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested &&
7480 tcp_sk(sk)->smc_hs_congested(sk));
7481 #endif
7482 }
7483
7484 /*
7485 * Return true if a syncookie should be sent
7486 */
tcp_syn_flood_action(struct sock * sk,const char * proto)7487 static bool tcp_syn_flood_action(struct sock *sk, const char *proto)
7488 {
7489 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
7490 const char *msg = "Dropping request";
7491 struct net *net = sock_net(sk);
7492 bool want_cookie = false;
7493 u8 syncookies;
7494
7495 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7496
7497 #ifdef CONFIG_SYN_COOKIES
7498 if (syncookies) {
7499 msg = "Sending cookies";
7500 want_cookie = true;
7501 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
7502 } else
7503 #endif
7504 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
7505
7506 if (syncookies != 2 && !READ_ONCE(queue->synflood_warned)) {
7507 WRITE_ONCE(queue->synflood_warned, 1);
7508 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_family == AF_INET6) {
7509 net_info_ratelimited("%s: Possible SYN flooding on port [%pI6c]:%u. %s.\n",
7510 proto, inet6_rcv_saddr(sk),
7511 sk->sk_num, msg);
7512 } else {
7513 net_info_ratelimited("%s: Possible SYN flooding on port %pI4:%u. %s.\n",
7514 proto, &sk->sk_rcv_saddr,
7515 sk->sk_num, msg);
7516 }
7517 }
7518
7519 return want_cookie;
7520 }
7521
tcp_reqsk_record_syn(const struct sock * sk,struct request_sock * req,const struct sk_buff * skb)7522 static void tcp_reqsk_record_syn(const struct sock *sk,
7523 struct request_sock *req,
7524 const struct sk_buff *skb)
7525 {
7526 if (tcp_sk(sk)->save_syn) {
7527 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
7528 struct saved_syn *saved_syn;
7529 u32 mac_hdrlen;
7530 void *base;
7531
7532 if (tcp_sk(sk)->save_syn == 2) { /* Save full header. */
7533 base = skb_mac_header(skb);
7534 mac_hdrlen = skb_mac_header_len(skb);
7535 len += mac_hdrlen;
7536 } else {
7537 base = skb_network_header(skb);
7538 mac_hdrlen = 0;
7539 }
7540
7541 saved_syn = kmalloc_flex(*saved_syn, data, len, GFP_ATOMIC);
7542 if (saved_syn) {
7543 saved_syn->mac_hdrlen = mac_hdrlen;
7544 saved_syn->network_hdrlen = skb_network_header_len(skb);
7545 saved_syn->tcp_hdrlen = tcp_hdrlen(skb);
7546 memcpy(saved_syn->data, base, len);
7547 req->saved_syn = saved_syn;
7548 }
7549 }
7550 }
7551
7552 /* If a SYN cookie is required and supported, returns a clamped MSS value to be
7553 * used for SYN cookie generation.
7554 */
tcp_get_syncookie_mss(struct request_sock_ops * rsk_ops,const struct tcp_request_sock_ops * af_ops,struct sock * sk,struct tcphdr * th)7555 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
7556 const struct tcp_request_sock_ops *af_ops,
7557 struct sock *sk, struct tcphdr *th)
7558 {
7559 struct tcp_sock *tp = tcp_sk(sk);
7560 u16 mss;
7561
7562 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 &&
7563 !inet_csk_reqsk_queue_is_full(sk))
7564 return 0;
7565
7566 if (!tcp_syn_flood_action(sk, rsk_ops->slab_name))
7567 return 0;
7568
7569 if (sk_acceptq_is_full(sk)) {
7570 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7571 return 0;
7572 }
7573
7574 mss = tcp_parse_mss_option(th, READ_ONCE(tp->rx_opt.user_mss));
7575 if (!mss)
7576 mss = af_ops->mss_clamp;
7577
7578 return mss;
7579 }
7580
tcp_conn_request(struct request_sock_ops * rsk_ops,const struct tcp_request_sock_ops * af_ops,struct sock * sk,struct sk_buff * skb)7581 int tcp_conn_request(struct request_sock_ops *rsk_ops,
7582 const struct tcp_request_sock_ops *af_ops,
7583 struct sock *sk, struct sk_buff *skb)
7584 {
7585 struct tcp_fastopen_cookie foc = { .len = -1 };
7586 struct tcp_options_received tmp_opt;
7587 const struct tcp_sock *tp = tcp_sk(sk);
7588 struct net *net = sock_net(sk);
7589 struct sock *fastopen_sk = NULL;
7590 union tcp_seq_and_ts_off st;
7591 struct request_sock *req;
7592 bool want_cookie = false;
7593 struct dst_entry *dst;
7594 struct flowi fl;
7595 u8 syncookies;
7596 u32 isn;
7597
7598 #ifdef CONFIG_TCP_AO
7599 const struct tcp_ao_hdr *aoh;
7600 #endif
7601
7602 isn = __this_cpu_read(tcp_tw_isn);
7603 if (isn) {
7604 /* TW buckets are converted to open requests without
7605 * limitations, they conserve resources and peer is
7606 * evidently real one.
7607 */
7608 __this_cpu_write(tcp_tw_isn, 0);
7609 } else {
7610 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7611
7612 if (syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) {
7613 want_cookie = tcp_syn_flood_action(sk,
7614 rsk_ops->slab_name);
7615 if (!want_cookie)
7616 goto drop;
7617 }
7618 }
7619
7620 if (sk_acceptq_is_full(sk)) {
7621 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7622 goto drop;
7623 }
7624
7625 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
7626 if (!req)
7627 goto drop;
7628
7629 req->syncookie = want_cookie;
7630 tcp_rsk(req)->af_specific = af_ops;
7631 tcp_rsk(req)->ts_off = 0;
7632 tcp_rsk(req)->req_usec_ts = false;
7633 #if IS_ENABLED(CONFIG_MPTCP)
7634 tcp_rsk(req)->is_mptcp = 0;
7635 #endif
7636
7637 tcp_clear_options(&tmp_opt);
7638 tmp_opt.mss_clamp = af_ops->mss_clamp;
7639 tmp_opt.user_mss = READ_ONCE(tp->rx_opt.user_mss);
7640 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
7641 want_cookie ? NULL : &foc);
7642
7643 if (want_cookie && !tmp_opt.saw_tstamp)
7644 tcp_clear_options(&tmp_opt);
7645
7646 if (IS_ENABLED(CONFIG_SMC) && want_cookie)
7647 tmp_opt.smc_ok = 0;
7648
7649 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
7650 tcp_openreq_init(req, &tmp_opt, skb, sk);
7651 inet_rsk(req)->no_srccheck = inet_test_bit(TRANSPARENT, sk);
7652
7653 /* Note: tcp_v6_init_req() might override ir_iif for link locals */
7654 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
7655
7656 dst = af_ops->route_req(sk, skb, &fl, req, isn);
7657 if (!dst)
7658 goto drop_and_free;
7659
7660 if (tmp_opt.tstamp_ok || (!want_cookie && !isn))
7661 st = INDIRECT_CALL_INET(af_ops->init_seq_and_ts_off,
7662 tcp_v6_init_seq_and_ts_off,
7663 tcp_v4_init_seq_and_ts_off,
7664 net, skb);
7665
7666 if (tmp_opt.tstamp_ok) {
7667 tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
7668 tcp_rsk(req)->ts_off = st.ts_off;
7669 }
7670 if (!want_cookie && !isn) {
7671 int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
7672
7673 /* Kill the following clause, if you dislike this way. */
7674 if (!syncookies &&
7675 (max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
7676 (max_syn_backlog >> 2)) &&
7677 !tcp_peer_is_proven(req, dst)) {
7678 /* Without syncookies last quarter of
7679 * backlog is filled with destinations,
7680 * proven to be alive.
7681 * It means that we continue to communicate
7682 * to destinations, already remembered
7683 * to the moment of synflood.
7684 */
7685 pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
7686 rsk_ops->family);
7687 goto drop_and_release;
7688 }
7689
7690 isn = st.seq;
7691 }
7692
7693 tcp_ecn_create_request(req, skb, sk, dst);
7694
7695 if (want_cookie) {
7696 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
7697 if (!tmp_opt.tstamp_ok)
7698 inet_rsk(req)->ecn_ok = 0;
7699 }
7700
7701 #ifdef CONFIG_TCP_AO
7702 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
7703 goto drop_and_release; /* Invalid TCP options */
7704 if (aoh) {
7705 tcp_rsk(req)->used_tcp_ao = true;
7706 tcp_rsk(req)->ao_rcv_next = aoh->keyid;
7707 tcp_rsk(req)->ao_keyid = aoh->rnext_keyid;
7708
7709 } else {
7710 tcp_rsk(req)->used_tcp_ao = false;
7711 }
7712 #endif
7713 tcp_rsk(req)->snt_isn = isn;
7714 tcp_rsk(req)->txhash = net_tx_rndhash();
7715 tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
7716 tcp_openreq_init_rwin(req, sk, dst);
7717 sk_rx_queue_set(req_to_sk(req), skb);
7718 if (!want_cookie) {
7719 tcp_reqsk_record_syn(sk, req, skb);
7720 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
7721 }
7722 if (fastopen_sk) {
7723 af_ops->send_synack(fastopen_sk, dst, &fl, req,
7724 &foc, TCP_SYNACK_FASTOPEN, skb);
7725 /* Add the child socket directly into the accept queue */
7726 if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
7727 bh_unlock_sock(fastopen_sk);
7728 sock_put(fastopen_sk);
7729 goto drop_and_free;
7730 }
7731 READ_ONCE(sk->sk_data_ready)(sk);
7732 bh_unlock_sock(fastopen_sk);
7733 sock_put(fastopen_sk);
7734 } else {
7735 tcp_rsk(req)->tfo_listener = false;
7736 if (!want_cookie &&
7737 unlikely(!inet_csk_reqsk_queue_hash_add(sk, req))) {
7738 reqsk_free(req);
7739 dst_release(dst);
7740 return 0;
7741 }
7742 af_ops->send_synack(sk, dst, &fl, req, &foc,
7743 !want_cookie ? TCP_SYNACK_NORMAL :
7744 TCP_SYNACK_COOKIE,
7745 skb);
7746 if (want_cookie) {
7747 reqsk_free(req);
7748 return 0;
7749 }
7750 }
7751 reqsk_put(req);
7752 return 0;
7753
7754 drop_and_release:
7755 dst_release(dst);
7756 drop_and_free:
7757 __reqsk_free(req);
7758 drop:
7759 tcp_listendrop(sk);
7760 return 0;
7761 }
7762