1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14 #ifndef _TCP_H
15 #define _TCP_H
16
17 #define FASTRETRANS_DEBUG 1
18
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 #include <linux/bits.h>
30
31 #include <net/inet_connection_sock.h>
32 #include <net/inet_timewait_sock.h>
33 #include <net/inet_hashtables.h>
34 #include <net/checksum.h>
35 #include <net/request_sock.h>
36 #include <net/sock_reuseport.h>
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 #include <net/ip.h>
40 #include <net/tcp_states.h>
41 #include <net/tcp_ao.h>
42 #include <net/inet_ecn.h>
43 #include <net/dst.h>
44 #include <net/mptcp.h>
45 #include <net/xfrm.h>
46 #include <net/secure_seq.h>
47
48 #include <linux/seq_file.h>
49 #include <linux/memcontrol.h>
50 #include <linux/bpf-cgroup.h>
51 #include <linux/siphash.h>
52
53 extern struct inet_hashinfo tcp_hashinfo;
54
55 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
56 int tcp_orphan_count_sum(void);
57
tcp_orphan_count_inc(void)58 static inline void tcp_orphan_count_inc(void)
59 {
60 this_cpu_inc(tcp_orphan_count);
61 }
62
tcp_orphan_count_dec(void)63 static inline void tcp_orphan_count_dec(void)
64 {
65 this_cpu_dec(tcp_orphan_count);
66 }
67
68 DECLARE_PER_CPU(u32, tcp_tw_isn);
69
70 void tcp_time_wait(struct sock *sk, int state, int timeo);
71
72 #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
73 #define MAX_TCP_OPTION_SPACE 40
74 #define TCP_MIN_SND_MSS 48
75 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
76
77 /*
78 * Never offer a window over 32767 without using window scaling. Some
79 * poor stacks do signed 16bit maths!
80 */
81 #define MAX_TCP_WINDOW 32767U
82
83 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
84 #define TCP_MIN_MSS 88U
85
86 /* The initial MTU to use for probing */
87 #define TCP_BASE_MSS 1024
88
89 /* probing interval, default to 10 minutes as per RFC4821 */
90 #define TCP_PROBE_INTERVAL 600
91
92 /* Specify interval when tcp mtu probing will stop */
93 #define TCP_PROBE_THRESHOLD 8
94
95 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
96 #define TCP_FASTRETRANS_THRESH 3
97
98 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
99 #define TCP_MAX_QUICKACKS 16U
100
101 /* Maximal number of window scale according to RFC1323 */
102 #define TCP_MAX_WSCALE 14U
103
104 /* Default sending frequency of accurate ECN option per RTT */
105 #define TCP_ACCECN_OPTION_BEACON 3
106
107 /* urg_data states */
108 #define TCP_URG_VALID 0x0100
109 #define TCP_URG_NOTYET 0x0200
110 #define TCP_URG_READ 0x0400
111
112 #define TCP_RETR1 3 /*
113 * This is how many retries it does before it
114 * tries to figure out if the gateway is
115 * down. Minimal RFC value is 3; it corresponds
116 * to ~3sec-8min depending on RTO.
117 */
118
119 #define TCP_RETR2 15 /*
120 * This should take at least
121 * 90 minutes to time out.
122 * RFC1122 says that the limit is 100 sec.
123 * 15 is ~13-30min depending on RTO.
124 */
125
126 #define TCP_SYN_RETRIES 6 /* This is how many retries are done
127 * when active opening a connection.
128 * RFC1122 says the minimum retry MUST
129 * be at least 180secs. Nevertheless
130 * this value is corresponding to
131 * 63secs of retransmission with the
132 * current initial RTO.
133 */
134
135 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
136 * when passive opening a connection.
137 * This is corresponding to 31secs of
138 * retransmission with the current
139 * initial RTO.
140 */
141
142 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
143 * state, about 60 seconds */
144 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
145 /* BSD style FIN_WAIT2 deadlock breaker.
146 * It used to be 3min, new value is 60sec,
147 * to combine FIN-WAIT-2 timeout with
148 * TIME-WAIT timer.
149 */
150 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
151
152 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
153 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
154
155 #if HZ >= 100
156 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
157 #define TCP_ATO_MIN ((unsigned)(HZ/25))
158 #else
159 #define TCP_DELACK_MIN 4U
160 #define TCP_ATO_MIN 4U
161 #endif
162 #define TCP_RTO_MAX_SEC 120
163 #define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ))
164 #define TCP_RTO_MIN ((unsigned)(HZ / 5))
165 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
166
167 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
168
169 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
170 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
171 * used as a fallback RTO for the
172 * initial data transmission if no
173 * valid RTT sample has been acquired,
174 * most likely due to retrans in 3WHS.
175 */
176
177 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
178 * for local resources.
179 */
180 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
181 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
182 #define TCP_KEEPALIVE_INTVL (75*HZ)
183
184 #define MAX_TCP_KEEPIDLE 32767
185 #define MAX_TCP_KEEPINTVL 32767
186 #define MAX_TCP_KEEPCNT 127
187 #define MAX_TCP_SYNCNT 127
188
189 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
190 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
191 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
192 */
193 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
194
195 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
196 * after this time. It should be equal
197 * (or greater than) TCP_TIMEWAIT_LEN
198 * to provide reliability equal to one
199 * provided by timewait state.
200 */
201 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
202 * timestamps. It must be less than
203 * minimal timewait lifetime.
204 */
205 /*
206 * TCP option
207 */
208
209 #define TCPOPT_NOP 1 /* Padding */
210 #define TCPOPT_EOL 0 /* End of options */
211 #define TCPOPT_MSS 2 /* Segment size negotiating */
212 #define TCPOPT_WINDOW 3 /* Window scaling */
213 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
214 #define TCPOPT_SACK 5 /* SACK Block */
215 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
216 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
217 #define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
218 #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
219 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
220 #define TCPOPT_ACCECN0 172 /* 0xAC: Accurate ECN Order 0 */
221 #define TCPOPT_ACCECN1 174 /* 0xAE: Accurate ECN Order 1 */
222 #define TCPOPT_EXP 254 /* Experimental */
223 /* Magic number to be after the option value for sharing TCP
224 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
225 */
226 #define TCPOPT_FASTOPEN_MAGIC 0xF989
227 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9
228
229 /*
230 * TCP option lengths
231 */
232
233 #define TCPOLEN_MSS 4
234 #define TCPOLEN_WINDOW 3
235 #define TCPOLEN_SACK_PERM 2
236 #define TCPOLEN_TIMESTAMP 10
237 #define TCPOLEN_MD5SIG 18
238 #define TCPOLEN_FASTOPEN_BASE 2
239 #define TCPOLEN_ACCECN_BASE 2
240 #define TCPOLEN_EXP_FASTOPEN_BASE 4
241 #define TCPOLEN_EXP_SMC_BASE 6
242
243 /* But this is what stacks really send out. */
244 #define TCPOLEN_TSTAMP_ALIGNED 12
245 #define TCPOLEN_WSCALE_ALIGNED 4
246 #define TCPOLEN_SACKPERM_ALIGNED 4
247 #define TCPOLEN_SACK_BASE 2
248 #define TCPOLEN_SACK_BASE_ALIGNED 4
249 #define TCPOLEN_SACK_PERBLOCK 8
250 #define TCPOLEN_MD5SIG_ALIGNED 20
251 #define TCPOLEN_MSS_ALIGNED 4
252 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
253 #define TCPOLEN_ACCECN_PERFIELD 3
254
255 /* Maximum number of byte counters in AccECN option + size */
256 #define TCP_ACCECN_NUMFIELDS 3
257 #define TCP_ACCECN_MAXSIZE (TCPOLEN_ACCECN_BASE + \
258 TCPOLEN_ACCECN_PERFIELD * \
259 TCP_ACCECN_NUMFIELDS)
260 #define TCP_ACCECN_SAFETY_SHIFT 1 /* SAFETY_FACTOR in accecn draft */
261
262 /* Flags in tp->nonagle */
263 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
264 #define TCP_NAGLE_CORK 2 /* Socket is corked */
265 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
266
267 /* TCP thin-stream limits */
268 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
269
270 /* TCP initial congestion window as per rfc6928 */
271 #define TCP_INIT_CWND 10
272
273 /* Bit Flags for sysctl_tcp_fastopen */
274 #define TFO_CLIENT_ENABLE 1
275 #define TFO_SERVER_ENABLE 2
276 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
277
278 /* Accept SYN data w/o any cookie option */
279 #define TFO_SERVER_COOKIE_NOT_REQD 0x200
280
281 /* Force enable TFO on all listeners, i.e., not requiring the
282 * TCP_FASTOPEN socket option.
283 */
284 #define TFO_SERVER_WO_SOCKOPT1 0x400
285
286
287 /* sysctl variables for tcp */
288 extern int sysctl_tcp_max_orphans;
289 extern long sysctl_tcp_mem[3];
290
291 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
292 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
293 #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
294
295 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
296
297 extern struct percpu_counter tcp_sockets_allocated;
298 extern unsigned long tcp_memory_pressure;
299
300 /* optimized version of sk_under_memory_pressure() for TCP sockets */
tcp_under_memory_pressure(const struct sock * sk)301 static inline bool tcp_under_memory_pressure(const struct sock *sk)
302 {
303 if (mem_cgroup_sk_enabled(sk) &&
304 mem_cgroup_sk_under_memory_pressure(sk))
305 return true;
306
307 if (sk->sk_bypass_prot_mem)
308 return false;
309
310 return READ_ONCE(tcp_memory_pressure);
311 }
312 /*
313 * The next routines deal with comparing 32 bit unsigned ints
314 * and worry about wraparound (automatic with unsigned arithmetic).
315 */
316
before(__u32 seq1,__u32 seq2)317 static inline bool before(__u32 seq1, __u32 seq2)
318 {
319 return (__s32)(seq1-seq2) < 0;
320 }
321 #define after(seq2, seq1) before(seq1, seq2)
322
323 /* is s2<=s1<=s3 ? */
between(__u32 seq1,__u32 seq2,__u32 seq3)324 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
325 {
326 return seq3 - seq2 >= seq1 - seq2;
327 }
328
tcp_wmem_free_skb(struct sock * sk,struct sk_buff * skb)329 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
330 {
331 sk_wmem_queued_add(sk, -skb->truesize);
332 if (!skb_zcopy_pure(skb))
333 sk_mem_uncharge(sk, skb->truesize);
334 else
335 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
336 __kfree_skb(skb);
337 }
338
339 void sk_forced_mem_schedule(struct sock *sk, int size);
340
341 bool tcp_check_oom(const struct sock *sk, int shift);
342
343
344 extern struct proto tcp_prot;
345
346 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
347 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
348 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
349 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
350
351 /*
352 * TCP splice context
353 */
354 struct tcp_splice_state {
355 struct pipe_inode_info *pipe;
356 size_t len;
357 unsigned int flags;
358 };
359
360 void tcp_tsq_work_init(void);
361
362 int tcp_v4_err(struct sk_buff *skb, u32);
363
364 void tcp_shutdown(struct sock *sk, int how);
365
366 int tcp_v4_rcv(struct sk_buff *skb);
367
368 void tcp_remove_empty_skb(struct sock *sk);
369 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
370 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
371 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
372 size_t size, struct ubuf_info *uarg);
373 void tcp_splice_eof(struct socket *sock);
374 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
375 int tcp_wmem_schedule(struct sock *sk, int copy);
376 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
377 int size_goal);
378
379 void tcp_release_cb(struct sock *sk);
380
tcp_release_cb_cond(struct sock * sk)381 static inline bool tcp_release_cb_cond(struct sock *sk)
382 {
383 #ifdef CONFIG_INET
384 if (likely(sk->sk_prot->release_cb == tcp_release_cb)) {
385 if (unlikely(smp_load_acquire(&sk->sk_tsq_flags) & TCP_DEFERRED_ALL))
386 tcp_release_cb(sk);
387 return true;
388 }
389 #endif
390 return false;
391 }
392
393 void tcp_wfree(struct sk_buff *skb);
394 void tcp_write_timer_handler(struct sock *sk);
395 void tcp_delack_timer_handler(struct sock *sk);
396 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
397 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
398 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
399 void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
400 void tcp_rcv_space_adjust(struct sock *sk);
401 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
402 void tcp_twsk_destructor(struct sock *sk);
403 void tcp_twsk_purge(struct list_head *net_exit_list);
404 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
405 unsigned int offset, size_t len);
406 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
407 struct pipe_inode_info *pipe, size_t len,
408 unsigned int flags);
409 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
410 bool force_schedule);
411
tcp_dec_quickack_mode(struct sock * sk)412 static inline void tcp_dec_quickack_mode(struct sock *sk)
413 {
414 struct inet_connection_sock *icsk = inet_csk(sk);
415
416 if (icsk->icsk_ack.quick) {
417 /* How many ACKs S/ACKing new data have we sent? */
418 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
419
420 if (pkts >= icsk->icsk_ack.quick) {
421 icsk->icsk_ack.quick = 0;
422 /* Leaving quickack mode we deflate ATO. */
423 icsk->icsk_ack.ato = TCP_ATO_MIN;
424 } else
425 icsk->icsk_ack.quick -= pkts;
426 }
427 }
428
429 #define TCP_ECN_MODE_RFC3168 BIT(0)
430 #define TCP_ECN_QUEUE_CWR BIT(1)
431 #define TCP_ECN_DEMAND_CWR BIT(2)
432 #define TCP_ECN_SEEN BIT(3)
433 #define TCP_ECN_MODE_ACCECN BIT(4)
434
435 #define TCP_ECN_DISABLED 0
436 #define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
437 #define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
438
tcp_ecn_mode_any(const struct tcp_sock * tp)439 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
440 {
441 return tp->ecn_flags & TCP_ECN_MODE_ANY;
442 }
443
tcp_ecn_mode_rfc3168(const struct tcp_sock * tp)444 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
445 {
446 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
447 }
448
tcp_ecn_mode_accecn(const struct tcp_sock * tp)449 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
450 {
451 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
452 }
453
tcp_ecn_disabled(const struct tcp_sock * tp)454 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
455 {
456 return !tcp_ecn_mode_any(tp);
457 }
458
tcp_ecn_mode_pending(const struct tcp_sock * tp)459 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
460 {
461 return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
462 }
463
tcp_ecn_mode_set(struct tcp_sock * tp,u8 mode)464 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
465 {
466 tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
467 tp->ecn_flags |= mode;
468 }
469
470 enum tcp_tw_status {
471 TCP_TW_SUCCESS = 0,
472 TCP_TW_RST = 1,
473 TCP_TW_ACK = 2,
474 TCP_TW_SYN = 3,
475 TCP_TW_ACK_OOW = 4
476 };
477
478
479 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
480 struct sk_buff *skb,
481 const struct tcphdr *th,
482 u32 *tw_isn,
483 enum skb_drop_reason *drop_reason);
484 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
485 struct request_sock *req, bool fastopen,
486 bool *lost_race, enum skb_drop_reason *drop_reason);
487 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
488 struct sk_buff *skb);
489 void tcp_enter_loss(struct sock *sk);
490 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
491 void tcp_clear_retrans(struct tcp_sock *tp);
492 void tcp_update_pacing_rate(struct sock *sk);
493 void tcp_set_rto(struct sock *sk);
494 void tcp_update_metrics(struct sock *sk);
495 void tcp_init_metrics(struct sock *sk);
496 void tcp_metrics_init(void);
497 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
498 void __tcp_close(struct sock *sk, long timeout);
499 void tcp_close(struct sock *sk, long timeout);
500 void tcp_init_sock(struct sock *sk);
501 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
502 __poll_t tcp_poll(struct file *file, struct socket *sock,
503 struct poll_table_struct *wait);
504 int do_tcp_getsockopt(struct sock *sk, int level,
505 int optname, sockptr_t optval, sockptr_t optlen);
506 int tcp_getsockopt(struct sock *sk, int level, int optname,
507 char __user *optval, int __user *optlen);
508 bool tcp_bpf_bypass_getsockopt(int level, int optname);
509 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
510 sockptr_t optval, unsigned int optlen);
511 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
512 unsigned int optlen);
513 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
514 void tcp_set_keepalive(struct sock *sk, int val);
515 void tcp_syn_ack_timeout(const struct request_sock *req);
516 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
517 int flags);
518 int tcp_set_rcvlowat(struct sock *sk, int val);
519 void tcp_set_rcvbuf(struct sock *sk, int val);
520 int tcp_set_window_clamp(struct sock *sk, int val);
521
522 static inline void
tcp_update_recv_tstamps(struct sk_buff * skb,struct scm_timestamping_internal * tss)523 tcp_update_recv_tstamps(struct sk_buff *skb,
524 struct scm_timestamping_internal *tss)
525 {
526 tss->ts[0] = skb->tstamp;
527 tss->ts[2] = skb_hwtstamps(skb)->hwtstamp;
528 }
529
530 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
531 struct scm_timestamping_internal *tss);
532 void tcp_data_ready(struct sock *sk);
533 #ifdef CONFIG_MMU
534 int tcp_mmap(struct file *file, struct socket *sock,
535 struct vm_area_struct *vma);
536 #endif
537 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
538 struct tcp_options_received *opt_rx,
539 int estab, struct tcp_fastopen_cookie *foc);
540
541 /*
542 * BPF SKB-less helpers
543 */
544 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
545 struct tcphdr *th, u32 *cookie);
546 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
547 struct tcphdr *th, u32 *cookie);
548 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
549 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
550 const struct tcp_request_sock_ops *af_ops,
551 struct sock *sk, struct tcphdr *th);
552 /*
553 * TCP v4 functions exported for the inet6 API
554 */
555
556 void tcp_v4_mtu_reduced(struct sock *sk);
557 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
558 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
559 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
560 struct sock *tcp_create_openreq_child(const struct sock *sk,
561 struct request_sock *req,
562 struct sk_buff *skb);
563 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
564 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
565 struct request_sock *req,
566 struct dst_entry *dst,
567 struct request_sock *req_unhash,
568 bool *own_req,
569 void (*opt_child_init)(struct sock *newsk,
570 const struct sock *sk));
571 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
572 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
573 int tcp_connect(struct sock *sk);
574 enum tcp_synack_type {
575 TCP_SYNACK_NORMAL,
576 TCP_SYNACK_FASTOPEN,
577 TCP_SYNACK_COOKIE,
578 TCP_SYNACK_RETRANS,
579 };
580 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
581 struct request_sock *req,
582 struct tcp_fastopen_cookie *foc,
583 enum tcp_synack_type synack_type,
584 struct sk_buff *syn_skb);
585 int tcp_disconnect(struct sock *sk, int flags);
586
587 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
588 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
589 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
590
591 /* From syncookies.c */
592 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
593 struct request_sock *req,
594 struct dst_entry *dst);
595 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
596 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
597 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
598 struct sock *sk, struct sk_buff *skb,
599 struct tcp_options_received *tcp_opt,
600 int mss, u32 tsoff);
601
602 #if IS_ENABLED(CONFIG_BPF)
603 struct bpf_tcp_req_attrs {
604 u32 rcv_tsval;
605 u32 rcv_tsecr;
606 u16 mss;
607 u8 rcv_wscale;
608 u8 snd_wscale;
609 u8 ecn_ok;
610 u8 wscale_ok;
611 u8 sack_ok;
612 u8 tstamp_ok;
613 u8 usec_ts_ok;
614 u8 reserved[3];
615 };
616 #endif
617
618 #ifdef CONFIG_SYN_COOKIES
619
620 /* Syncookies use a monotonic timer which increments every 60 seconds.
621 * This counter is used both as a hash input and partially encoded into
622 * the cookie value. A cookie is only validated further if the delta
623 * between the current counter value and the encoded one is less than this,
624 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
625 * the counter advances immediately after a cookie is generated).
626 */
627 #define MAX_SYNCOOKIE_AGE 2
628 #define TCP_SYNCOOKIE_PERIOD (60 * HZ)
629 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
630
631 /* syncookies: remember time of last synqueue overflow
632 * But do not dirty this field too often (once per second is enough)
633 * It is racy as we do not hold a lock, but race is very minor.
634 */
tcp_synq_overflow(const struct sock * sk)635 static inline void tcp_synq_overflow(const struct sock *sk)
636 {
637 unsigned int last_overflow;
638 unsigned int now = jiffies;
639
640 if (sk->sk_reuseport) {
641 struct sock_reuseport *reuse;
642
643 reuse = rcu_dereference(sk->sk_reuseport_cb);
644 if (likely(reuse)) {
645 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
646 if (!time_between32(now, last_overflow,
647 last_overflow + HZ))
648 WRITE_ONCE(reuse->synq_overflow_ts, now);
649 return;
650 }
651 }
652
653 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
654 if (!time_between32(now, last_overflow, last_overflow + HZ))
655 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
656 }
657
658 /* syncookies: no recent synqueue overflow on this listening socket? */
tcp_synq_no_recent_overflow(const struct sock * sk)659 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
660 {
661 unsigned int last_overflow;
662 unsigned int now = jiffies;
663
664 if (sk->sk_reuseport) {
665 struct sock_reuseport *reuse;
666
667 reuse = rcu_dereference(sk->sk_reuseport_cb);
668 if (likely(reuse)) {
669 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
670 return !time_between32(now, last_overflow - HZ,
671 last_overflow +
672 TCP_SYNCOOKIE_VALID);
673 }
674 }
675
676 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
677
678 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
679 * then we're under synflood. However, we have to use
680 * 'last_overflow - HZ' as lower bound. That's because a concurrent
681 * tcp_synq_overflow() could update .ts_recent_stamp after we read
682 * jiffies but before we store .ts_recent_stamp into last_overflow,
683 * which could lead to rejecting a valid syncookie.
684 */
685 return !time_between32(now, last_overflow - HZ,
686 last_overflow + TCP_SYNCOOKIE_VALID);
687 }
688
tcp_cookie_time(void)689 static inline u32 tcp_cookie_time(void)
690 {
691 u64 val = get_jiffies_64();
692
693 do_div(val, TCP_SYNCOOKIE_PERIOD);
694 return val;
695 }
696
697 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
tcp_ns_to_ts(bool usec_ts,u64 val)698 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
699 {
700 if (usec_ts)
701 return div_u64(val, NSEC_PER_USEC);
702
703 return div_u64(val, NSEC_PER_MSEC);
704 }
705
706 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
707 u16 *mssp);
708 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
709 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
710 bool cookie_timestamp_decode(const struct net *net,
711 struct tcp_options_received *opt);
712
cookie_ecn_ok(const struct net * net,const struct dst_entry * dst)713 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
714 {
715 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
716 dst_feature(dst, RTAX_FEATURE_ECN);
717 }
718
719 #if IS_ENABLED(CONFIG_BPF)
cookie_bpf_ok(struct sk_buff * skb)720 static inline bool cookie_bpf_ok(struct sk_buff *skb)
721 {
722 return skb->sk;
723 }
724
725 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
726 #else
cookie_bpf_ok(struct sk_buff * skb)727 static inline bool cookie_bpf_ok(struct sk_buff *skb)
728 {
729 return false;
730 }
731
cookie_bpf_check(struct net * net,struct sock * sk,struct sk_buff * skb)732 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
733 struct sk_buff *skb)
734 {
735 return NULL;
736 }
737 #endif
738
739 /* From net/ipv6/syncookies.c */
740 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
741 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
742
743 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
744 const struct tcphdr *th, u16 *mssp);
745 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
746 #endif
747 /* tcp_output.c */
748
749 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
750 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
751 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
752 int nonagle);
753 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
754 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
755 void tcp_retransmit_timer(struct sock *sk);
756 void tcp_xmit_retransmit_queue(struct sock *);
757 void tcp_simple_retransmit(struct sock *);
758 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
759 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
760 enum tcp_queue {
761 TCP_FRAG_IN_WRITE_QUEUE,
762 TCP_FRAG_IN_RTX_QUEUE,
763 };
764 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
765 struct sk_buff *skb, u32 len,
766 unsigned int mss_now, gfp_t gfp);
767
768 void tcp_send_probe0(struct sock *);
769 int tcp_write_wakeup(struct sock *, int mib);
770 void tcp_send_fin(struct sock *sk);
771 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
772 enum sk_rst_reason reason);
773 int tcp_send_synack(struct sock *);
774 void tcp_push_one(struct sock *, unsigned int mss_now);
775 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
776 void tcp_send_ack(struct sock *sk);
777 void tcp_send_delayed_ack(struct sock *sk);
778 void tcp_send_loss_probe(struct sock *sk);
779 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
780 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
781 const struct sk_buff *next_skb);
782
783 /* tcp_input.c */
784 void tcp_rearm_rto(struct sock *sk);
785 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
786 void tcp_done_with_error(struct sock *sk, int err);
787 void tcp_reset(struct sock *sk, struct sk_buff *skb);
788 void tcp_fin(struct sock *sk);
789 void __tcp_check_space(struct sock *sk);
tcp_check_space(struct sock * sk)790 static inline void tcp_check_space(struct sock *sk)
791 {
792 /* pairs with tcp_poll() */
793 smp_mb();
794
795 if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
796 __tcp_check_space(sk);
797 }
798 void tcp_sack_compress_send_ack(struct sock *sk);
799
tcp_cleanup_skb(struct sk_buff * skb)800 static inline void tcp_cleanup_skb(struct sk_buff *skb)
801 {
802 skb_dst_drop(skb);
803 secpath_reset(skb);
804 }
805
tcp_add_receive_queue(struct sock * sk,struct sk_buff * skb)806 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
807 {
808 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
809 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
810 __skb_queue_tail(&sk->sk_receive_queue, skb);
811 }
812
813 /* tcp_timer.c */
814 void tcp_init_xmit_timers(struct sock *);
tcp_clear_xmit_timers(struct sock * sk)815 static inline void tcp_clear_xmit_timers(struct sock *sk)
816 {
817 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
818 __sock_put(sk);
819
820 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
821 __sock_put(sk);
822
823 inet_csk_clear_xmit_timers(sk);
824 }
825
826 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
827 unsigned int tcp_current_mss(struct sock *sk);
828 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
829
830 /* Bound MSS / TSO packet size with the half of the window */
tcp_bound_to_half_wnd(struct tcp_sock * tp,int pktsize)831 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
832 {
833 int cutoff;
834
835 /* When peer uses tiny windows, there is no use in packetizing
836 * to sub-MSS pieces for the sake of SWS or making sure there
837 * are enough packets in the pipe for fast recovery.
838 *
839 * On the other hand, for extremely large MSS devices, handling
840 * smaller than MSS windows in this way does make sense.
841 */
842 if (tp->max_window > TCP_MSS_DEFAULT)
843 cutoff = (tp->max_window >> 1);
844 else
845 cutoff = tp->max_window;
846
847 if (cutoff && pktsize > cutoff)
848 return max_t(int, cutoff, 68U - tp->tcp_header_len);
849 else
850 return pktsize;
851 }
852
853 /* tcp.c */
854 void tcp_get_info(struct sock *, struct tcp_info *);
855 void tcp_rate_check_app_limited(struct sock *sk);
856
857 /* Read 'sendfile()'-style from a TCP socket */
858 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
859 sk_read_actor_t recv_actor);
860 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
861 sk_read_actor_t recv_actor, bool noack,
862 u32 *copied_seq);
863 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
864 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
865 void tcp_read_done(struct sock *sk, size_t len);
866
867 void tcp_initialize_rcv_mss(struct sock *sk);
868
869 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
870 int tcp_mss_to_mtu(struct sock *sk, int mss);
871 void tcp_mtup_init(struct sock *sk);
872
tcp_rto_max(const struct sock * sk)873 static inline unsigned int tcp_rto_max(const struct sock *sk)
874 {
875 return READ_ONCE(inet_csk(sk)->icsk_rto_max);
876 }
877
tcp_bound_rto(struct sock * sk)878 static inline void tcp_bound_rto(struct sock *sk)
879 {
880 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
881 }
882
__tcp_set_rto(const struct tcp_sock * tp)883 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
884 {
885 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
886 }
887
tcp_reqsk_timeout(struct request_sock * req)888 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req)
889 {
890 u64 timeout = (u64)req->timeout << req->num_timeout;
891
892 return (unsigned long)min_t(u64, timeout,
893 tcp_rto_max(req->rsk_listener));
894 }
895
896 u32 tcp_delack_max(const struct sock *sk);
897
898 /* Compute the actual rto_min value */
tcp_rto_min(const struct sock * sk)899 static inline u32 tcp_rto_min(const struct sock *sk)
900 {
901 const struct dst_entry *dst = __sk_dst_get(sk);
902 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
903
904 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
905 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
906 return rto_min;
907 }
908
tcp_rto_min_us(const struct sock * sk)909 static inline u32 tcp_rto_min_us(const struct sock *sk)
910 {
911 return jiffies_to_usecs(tcp_rto_min(sk));
912 }
913
tcp_ca_dst_locked(const struct dst_entry * dst)914 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
915 {
916 return dst_metric_locked(dst, RTAX_CC_ALGO);
917 }
918
919 /* Minimum RTT in usec. ~0 means not available. */
tcp_min_rtt(const struct tcp_sock * tp)920 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
921 {
922 return minmax_get(&tp->rtt_min);
923 }
924
925 /* Compute the actual receive window we are currently advertising.
926 * Rcv_nxt can be after the window if our peer push more data
927 * than the offered window.
928 */
tcp_receive_window(const struct tcp_sock * tp)929 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
930 {
931 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
932
933 if (win < 0)
934 win = 0;
935 return (u32) win;
936 }
937
938 /* Compute the maximum receive window we ever advertised.
939 * Rcv_nxt can be after the window if our peer push more data
940 * than the offered window.
941 */
tcp_max_receive_window(const struct tcp_sock * tp)942 static inline u32 tcp_max_receive_window(const struct tcp_sock *tp)
943 {
944 s32 win = tp->rcv_mwnd_seq - tp->rcv_nxt;
945
946 if (win < 0)
947 win = 0;
948 return (u32) win;
949 }
950
951 /* Check if we need to update the maximum receive window sequence number */
tcp_update_max_rcv_wnd_seq(struct tcp_sock * tp)952 static inline void tcp_update_max_rcv_wnd_seq(struct tcp_sock *tp)
953 {
954 u32 wre = tp->rcv_wup + tp->rcv_wnd;
955
956 if (after(wre, tp->rcv_mwnd_seq))
957 tp->rcv_mwnd_seq = wre;
958 }
959
960 /* Choose a new window, without checks for shrinking, and without
961 * scaling applied to the result. The caller does these things
962 * if necessary. This is a "raw" window selection.
963 */
964 u32 __tcp_select_window(struct sock *sk);
965
966 void tcp_send_window_probe(struct sock *sk);
967
968 /* TCP uses 32bit jiffies to save some space.
969 * Note that this is different from tcp_time_stamp, which
970 * historically has been the same until linux-4.13.
971 */
972 #define tcp_jiffies32 ((u32)jiffies)
973
974 /*
975 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
976 * It is no longer tied to jiffies, but to 1 ms clock.
977 * Note: double check if you want to use tcp_jiffies32 instead of this.
978 */
979 #define TCP_TS_HZ 1000
980
tcp_clock_ns(void)981 static inline u64 tcp_clock_ns(void)
982 {
983 return ktime_get_ns();
984 }
985
tcp_clock_us(void)986 static inline u64 tcp_clock_us(void)
987 {
988 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
989 }
990
tcp_clock_ms(void)991 static inline u64 tcp_clock_ms(void)
992 {
993 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
994 }
995
996 /* TCP Timestamp included in TS option (RFC 1323) can either use ms
997 * or usec resolution. Each socket carries a flag to select one or other
998 * resolution, as the route attribute could change anytime.
999 * Each flow must stick to initial resolution.
1000 */
tcp_clock_ts(bool usec_ts)1001 static inline u32 tcp_clock_ts(bool usec_ts)
1002 {
1003 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
1004 }
1005
tcp_time_stamp_ms(const struct tcp_sock * tp)1006 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
1007 {
1008 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
1009 }
1010
tcp_time_stamp_ts(const struct tcp_sock * tp)1011 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
1012 {
1013 if (tp->tcp_usec_ts)
1014 return tp->tcp_mstamp;
1015 return tcp_time_stamp_ms(tp);
1016 }
1017
1018 void tcp_mstamp_refresh(struct tcp_sock *tp);
1019
tcp_stamp_us_delta(u64 t1,u64 t0)1020 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
1021 {
1022 return max_t(s64, t1 - t0, 0);
1023 }
1024
1025 /* provide the departure time in us unit */
tcp_skb_timestamp_us(const struct sk_buff * skb)1026 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
1027 {
1028 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
1029 }
1030
1031 /* Provide skb TSval in usec or ms unit */
tcp_skb_timestamp_ts(bool usec_ts,const struct sk_buff * skb)1032 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
1033 {
1034 if (usec_ts)
1035 return tcp_skb_timestamp_us(skb);
1036
1037 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
1038 }
1039
tcp_tw_tsval(const struct tcp_timewait_sock * tcptw)1040 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
1041 {
1042 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
1043 }
1044
tcp_rsk_tsval(const struct tcp_request_sock * treq)1045 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
1046 {
1047 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
1048 }
1049
1050 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
1051
1052 #define TCPHDR_FIN BIT(0)
1053 #define TCPHDR_SYN BIT(1)
1054 #define TCPHDR_RST BIT(2)
1055 #define TCPHDR_PSH BIT(3)
1056 #define TCPHDR_ACK BIT(4)
1057 #define TCPHDR_URG BIT(5)
1058 #define TCPHDR_ECE BIT(6)
1059 #define TCPHDR_CWR BIT(7)
1060 #define TCPHDR_AE BIT(8)
1061 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
1062 TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
1063 TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1064 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
1065 TCPHDR_FLAGS_MASK)
1066
1067 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1068 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
1069 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
1070
1071 #define TCP_ACCECN_CEP_ACE_MASK 0x7
1072 #define TCP_ACCECN_ACE_MAX_DELTA 6
1073
1074 /* To avoid/detect middlebox interference, not all counters start at 0.
1075 * See draft-ietf-tcpm-accurate-ecn for the latest values.
1076 */
1077 #define TCP_ACCECN_CEP_INIT_OFFSET 5
1078 #define TCP_ACCECN_E1B_INIT_OFFSET 1
1079 #define TCP_ACCECN_E0B_INIT_OFFSET 1
1080 #define TCP_ACCECN_CEB_INIT_OFFSET 0
1081
1082 /* State flags for sacked in struct tcp_skb_cb */
1083 enum tcp_skb_cb_sacked_flags {
1084 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */
1085 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */
1086 TCPCB_LOST = (1 << 2), /* SKB is lost */
1087 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
1088 TCPCB_LOST), /* All tag bits */
1089 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */
1090 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */
1091 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
1092 TCPCB_REPAIRED),
1093 };
1094
1095 /* This is what the send packet queuing engine uses to pass
1096 * TCP per-packet control information to the transmission code.
1097 * We also store the host-order sequence numbers in here too.
1098 * This is 44 bytes if IPV6 is enabled.
1099 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1100 */
1101 struct tcp_skb_cb {
1102 __u32 seq; /* Starting sequence number */
1103 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1104 union {
1105 /* Note :
1106 * tcp_gso_segs/size are used in write queue only,
1107 * cf tcp_skb_pcount()/tcp_skb_mss()
1108 */
1109 struct {
1110 u16 tcp_gso_segs;
1111 u16 tcp_gso_size;
1112 };
1113 };
1114 __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/
1115
1116 __u8 sacked; /* State flags for SACK. */
1117 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1118 #define TSTAMP_ACK_SK 0x1
1119 #define TSTAMP_ACK_BPF 0x2
1120 __u8 txstamp_ack:2, /* Record TX timestamp for ack? */
1121 eor:1, /* Is skb MSG_EOR marked? */
1122 has_rxtstamp:1, /* SKB has a RX timestamp */
1123 unused:4;
1124 __u32 ack_seq; /* Sequence number ACK'd */
1125 union {
1126 struct {
1127 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
1128 /* There is space for up to 24 bytes */
1129 __u32 is_app_limited:1, /* cwnd not fully used? */
1130 delivered_ce:20,
1131 unused:11;
1132 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
1133 __u32 delivered;
1134 /* start of send pipeline phase */
1135 u64 first_tx_mstamp;
1136 /* when we reached the "delivered" count */
1137 u64 delivered_mstamp;
1138 } tx; /* only used for outgoing skbs */
1139 union {
1140 struct inet_skb_parm h4;
1141 #if IS_ENABLED(CONFIG_IPV6)
1142 struct inet6_skb_parm h6;
1143 #endif
1144 } header; /* For incoming skbs */
1145 };
1146 };
1147
1148 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1149
1150 extern const struct inet_connection_sock_af_ops ipv4_specific;
1151
1152 #if IS_ENABLED(CONFIG_IPV6)
1153 /* This is the variant of inet6_iif() that must be used by TCP,
1154 * as TCP moves IP6CB into a different location in skb->cb[]
1155 */
tcp_v6_iif(const struct sk_buff * skb)1156 static inline int tcp_v6_iif(const struct sk_buff *skb)
1157 {
1158 return TCP_SKB_CB(skb)->header.h6.iif;
1159 }
1160
tcp_v6_iif_l3_slave(const struct sk_buff * skb)1161 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1162 {
1163 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1164
1165 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1166 }
1167
1168 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v6_sdif(const struct sk_buff * skb)1169 static inline int tcp_v6_sdif(const struct sk_buff *skb)
1170 {
1171 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1172 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1173 return TCP_SKB_CB(skb)->header.h6.iif;
1174 #endif
1175 return 0;
1176 }
1177
1178 extern const struct inet_connection_sock_af_ops ipv6_specific;
1179
1180 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1181
1182 #endif
1183
1184 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v4_sdif(struct sk_buff * skb)1185 static inline int tcp_v4_sdif(struct sk_buff *skb)
1186 {
1187 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1188 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1189 return TCP_SKB_CB(skb)->header.h4.iif;
1190 #endif
1191 return 0;
1192 }
1193
1194 /* Due to TSO, an SKB can be composed of multiple actual
1195 * packets. To keep these tracked properly, we use this.
1196 */
tcp_skb_pcount(const struct sk_buff * skb)1197 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1198 {
1199 return TCP_SKB_CB(skb)->tcp_gso_segs;
1200 }
1201
tcp_skb_pcount_set(struct sk_buff * skb,int segs)1202 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1203 {
1204 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1205 }
1206
tcp_skb_pcount_add(struct sk_buff * skb,int segs)1207 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1208 {
1209 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1210 }
1211
1212 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
tcp_skb_mss(const struct sk_buff * skb)1213 static inline int tcp_skb_mss(const struct sk_buff *skb)
1214 {
1215 return TCP_SKB_CB(skb)->tcp_gso_size;
1216 }
1217
tcp_skb_can_collapse_to(const struct sk_buff * skb)1218 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1219 {
1220 return likely(!TCP_SKB_CB(skb)->eor);
1221 }
1222
tcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)1223 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1224 const struct sk_buff *from)
1225 {
1226 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1227 return likely(tcp_skb_can_collapse_to(to) &&
1228 mptcp_skb_can_collapse(to, from) &&
1229 skb_pure_zcopy_same(to, from) &&
1230 skb_frags_readable(to) == skb_frags_readable(from));
1231 }
1232
tcp_skb_can_collapse_rx(const struct sk_buff * to,const struct sk_buff * from)1233 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1234 const struct sk_buff *from)
1235 {
1236 return likely(mptcp_skb_can_collapse(to, from) &&
1237 !skb_cmp_decrypted(to, from));
1238 }
1239
1240 /* Events passed to congestion control interface */
1241 enum tcp_ca_event {
1242 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1243 CA_EVENT_CWND_RESTART, /* congestion window restart */
1244 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1245 CA_EVENT_LOSS, /* loss timeout */
1246 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1247 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1248 };
1249
1250 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1251 enum tcp_ca_ack_event_flags {
1252 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1253 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1254 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1255 };
1256
1257 /*
1258 * Interface for adding new TCP congestion control handlers
1259 */
1260 #define TCP_CA_NAME_MAX 16
1261 #define TCP_CA_MAX 128
1262 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1263
1264 #define TCP_CA_UNSPEC 0
1265
1266 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1267 #define TCP_CONG_NON_RESTRICTED BIT(0)
1268 /* Requires ECN/ECT set on all packets */
1269 #define TCP_CONG_NEEDS_ECN BIT(1)
1270 /* Require successfully negotiated AccECN capability */
1271 #define TCP_CONG_NEEDS_ACCECN BIT(2)
1272 /* Use ECT(1) instead of ECT(0) while the CA is uninitialized */
1273 #define TCP_CONG_ECT_1_NEGOTIATION BIT(3)
1274 /* Cannot fallback to RFC3168 during AccECN negotiation */
1275 #define TCP_CONG_NO_FALLBACK_RFC3168 BIT(4)
1276 #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \
1277 TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \
1278 TCP_CONG_NO_FALLBACK_RFC3168)
1279
1280 union tcp_cc_info;
1281
1282 struct ack_sample {
1283 u32 pkts_acked;
1284 s32 rtt_us;
1285 u32 in_flight;
1286 };
1287
1288 /* A rate sample measures the number of (original/retransmitted) data
1289 * packets delivered "delivered" over an interval of time "interval_us".
1290 * The tcp_rate.c code fills in the rate sample, and congestion
1291 * control modules that define a cong_control function to run at the end
1292 * of ACK processing can optionally chose to consult this sample when
1293 * setting cwnd and pacing rate.
1294 * A sample is invalid if "delivered" or "interval_us" is negative.
1295 */
1296 struct rate_sample {
1297 u64 prior_mstamp; /* starting timestamp for interval */
1298 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1299 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1300 s32 delivered; /* number of packets delivered over interval */
1301 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1302 long interval_us; /* time for tp->delivered to incr "delivered" */
1303 u32 snd_interval_us; /* snd interval for delivered packets */
1304 u32 rcv_interval_us; /* rcv interval for delivered packets */
1305 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1306 int losses; /* number of packets marked lost upon ACK */
1307 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1308 u32 prior_in_flight; /* in flight before this ACK */
1309 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1310 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1311 bool is_retrans; /* is sample from retransmission? */
1312 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1313 };
1314
1315 struct tcp_congestion_ops {
1316 /* fast path fields are put first to fill one cache line */
1317
1318 /* A congestion control (CC) must provide one of either:
1319 *
1320 * (a) a cong_avoid function, if the CC wants to use the core TCP
1321 * stack's default functionality to implement a "classic"
1322 * (Reno/CUBIC-style) response to packet loss, RFC3168 ECN,
1323 * idle periods, pacing rate computations, etc.
1324 *
1325 * (b) a cong_control function, if the CC wants custom behavior and
1326 * complete control of all congestion control behaviors.
1327 */
1328 /* (a) "classic" response: calculate new cwnd.
1329 */
1330 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1331 /* (b) "custom" response: call when packets are delivered to update
1332 * cwnd and pacing rate, after all the ca_state processing.
1333 */
1334 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1335
1336 /* return slow start threshold (required) */
1337 u32 (*ssthresh)(struct sock *sk);
1338
1339 /* call before changing ca_state (optional) */
1340 void (*set_state)(struct sock *sk, u8 new_state);
1341
1342 /* call when cwnd event occurs (optional) */
1343 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1344
1345 /* call when CA_EVENT_TX_START cwnd event occurs (optional) */
1346 void (*cwnd_event_tx_start)(struct sock *sk);
1347
1348 /* call when ack arrives (optional) */
1349 void (*in_ack_event)(struct sock *sk, u32 flags);
1350
1351 /* hook for packet ack accounting (optional) */
1352 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1353
1354 /* override sysctl_tcp_min_tso_segs (optional) */
1355 u32 (*min_tso_segs)(struct sock *sk);
1356
1357 /* new value of cwnd after loss (required) */
1358 u32 (*undo_cwnd)(struct sock *sk);
1359 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1360 u32 (*sndbuf_expand)(struct sock *sk);
1361
1362 /* control/slow paths put last */
1363 /* get info for inet_diag (optional) */
1364 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1365 union tcp_cc_info *info);
1366
1367 char name[TCP_CA_NAME_MAX];
1368 struct module *owner;
1369 struct list_head list;
1370 u32 key;
1371 u32 flags;
1372
1373 /* initialize private data (optional) */
1374 void (*init)(struct sock *sk);
1375 /* cleanup private data (optional) */
1376 void (*release)(struct sock *sk);
1377 } ____cacheline_aligned_in_smp;
1378
1379 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1380 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1381 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1382 struct tcp_congestion_ops *old_type);
1383 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1384
1385 void tcp_assign_congestion_control(struct sock *sk);
1386 void tcp_init_congestion_control(struct sock *sk);
1387 void tcp_cleanup_congestion_control(struct sock *sk);
1388 int tcp_set_default_congestion_control(struct net *net, const char *name);
1389 void tcp_get_default_congestion_control(struct net *net, char *name);
1390 void tcp_get_available_congestion_control(char *buf, size_t len);
1391 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1392 int tcp_set_allowed_congestion_control(char *allowed);
1393 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1394 bool cap_net_admin);
1395 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1396 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1397
1398 u32 tcp_reno_ssthresh(struct sock *sk);
1399 u32 tcp_reno_undo_cwnd(struct sock *sk);
1400 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1401 extern struct tcp_congestion_ops tcp_reno;
1402
1403 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1404 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1405 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1406 #ifdef CONFIG_INET
1407 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1408 #else
tcp_ca_get_name_by_key(u32 key,char * buffer)1409 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1410 {
1411 return NULL;
1412 }
1413 #endif
1414
tcp_ca_needs_ecn(const struct sock * sk)1415 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1416 {
1417 const struct inet_connection_sock *icsk = inet_csk(sk);
1418
1419 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1420 }
1421
tcp_ca_needs_accecn(const struct sock * sk)1422 static inline bool tcp_ca_needs_accecn(const struct sock *sk)
1423 {
1424 const struct inet_connection_sock *icsk = inet_csk(sk);
1425
1426 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN;
1427 }
1428
tcp_ca_ect_1_negotiation(const struct sock * sk)1429 static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk)
1430 {
1431 const struct inet_connection_sock *icsk = inet_csk(sk);
1432
1433 return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION;
1434 }
1435
tcp_ca_no_fallback_rfc3168(const struct sock * sk)1436 static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk)
1437 {
1438 const struct inet_connection_sock *icsk = inet_csk(sk);
1439
1440 return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168;
1441 }
1442
tcp_ca_event(struct sock * sk,const enum tcp_ca_event event)1443 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1444 {
1445 const struct inet_connection_sock *icsk = inet_csk(sk);
1446
1447 if (event == CA_EVENT_TX_START) {
1448 if (icsk->icsk_ca_ops->cwnd_event_tx_start)
1449 icsk->icsk_ca_ops->cwnd_event_tx_start(sk);
1450 return;
1451 }
1452 if (icsk->icsk_ca_ops->cwnd_event)
1453 icsk->icsk_ca_ops->cwnd_event(sk, event);
1454 }
1455
1456 /* From tcp_cong.c */
1457 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1458
1459
tcp_skb_sent_after(u64 t1,u64 t2,u32 seq1,u32 seq2)1460 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1461 {
1462 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1463 }
1464
1465 /* These functions determine how the current flow behaves in respect of SACK
1466 * handling. SACK is negotiated with the peer, and therefore it can vary
1467 * between different flows.
1468 *
1469 * tcp_is_sack - SACK enabled
1470 * tcp_is_reno - No SACK
1471 */
tcp_is_sack(const struct tcp_sock * tp)1472 static inline int tcp_is_sack(const struct tcp_sock *tp)
1473 {
1474 return likely(tp->rx_opt.sack_ok);
1475 }
1476
tcp_is_reno(const struct tcp_sock * tp)1477 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1478 {
1479 return !tcp_is_sack(tp);
1480 }
1481
tcp_left_out(const struct tcp_sock * tp)1482 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1483 {
1484 return tp->sacked_out + tp->lost_out;
1485 }
1486
1487 /* This determines how many packets are "in the network" to the best
1488 * of our knowledge. In many cases it is conservative, but where
1489 * detailed information is available from the receiver (via SACK
1490 * blocks etc.) we can make more aggressive calculations.
1491 *
1492 * Use this for decisions involving congestion control, use just
1493 * tp->packets_out to determine if the send queue is empty or not.
1494 *
1495 * Read this equation as:
1496 *
1497 * "Packets sent once on transmission queue" MINUS
1498 * "Packets left network, but not honestly ACKed yet" PLUS
1499 * "Packets fast retransmitted"
1500 */
tcp_packets_in_flight(const struct tcp_sock * tp)1501 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1502 {
1503 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1504 }
1505
1506 #define TCP_INFINITE_SSTHRESH 0x7fffffff
1507
tcp_snd_cwnd(const struct tcp_sock * tp)1508 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1509 {
1510 return tp->snd_cwnd;
1511 }
1512
tcp_snd_cwnd_set(struct tcp_sock * tp,u32 val)1513 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1514 {
1515 WARN_ON_ONCE((int)val <= 0);
1516 tp->snd_cwnd = val;
1517 }
1518
tcp_in_slow_start(const struct tcp_sock * tp)1519 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1520 {
1521 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1522 }
1523
tcp_in_initial_slowstart(const struct tcp_sock * tp)1524 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1525 {
1526 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1527 }
1528
tcp_in_cwnd_reduction(const struct sock * sk)1529 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1530 {
1531 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1532 (1 << inet_csk(sk)->icsk_ca_state);
1533 }
1534
1535 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1536 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1537 * ssthresh.
1538 */
tcp_current_ssthresh(const struct sock * sk)1539 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1540 {
1541 const struct tcp_sock *tp = tcp_sk(sk);
1542
1543 if (tcp_in_cwnd_reduction(sk))
1544 return tp->snd_ssthresh;
1545 else
1546 return max(tp->snd_ssthresh,
1547 ((tcp_snd_cwnd(tp) >> 1) +
1548 (tcp_snd_cwnd(tp) >> 2)));
1549 }
1550
1551 /* Use define here intentionally to get WARN_ON location shown at the caller */
1552 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1553
1554 void tcp_enter_cwr(struct sock *sk);
1555 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1556
1557 /* The maximum number of MSS of available cwnd for which TSO defers
1558 * sending if not using sysctl_tcp_tso_win_divisor.
1559 */
tcp_max_tso_deferred_mss(const struct tcp_sock * tp)1560 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1561 {
1562 return 3;
1563 }
1564
1565 /* Returns end sequence number of the receiver's advertised window */
tcp_wnd_end(const struct tcp_sock * tp)1566 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1567 {
1568 return tp->snd_una + tp->snd_wnd;
1569 }
1570
1571 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1572 * flexible approach. The RFC suggests cwnd should not be raised unless
1573 * it was fully used previously. And that's exactly what we do in
1574 * congestion avoidance mode. But in slow start we allow cwnd to grow
1575 * as long as the application has used half the cwnd.
1576 * Example :
1577 * cwnd is 10 (IW10), but application sends 9 frames.
1578 * We allow cwnd to reach 18 when all frames are ACKed.
1579 * This check is safe because it's as aggressive as slow start which already
1580 * risks 100% overshoot. The advantage is that we discourage application to
1581 * either send more filler packets or data to artificially blow up the cwnd
1582 * usage, and allow application-limited process to probe bw more aggressively.
1583 */
tcp_is_cwnd_limited(const struct sock * sk)1584 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1585 {
1586 const struct tcp_sock *tp = tcp_sk(sk);
1587
1588 if (tp->is_cwnd_limited)
1589 return true;
1590
1591 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1592 if (tcp_in_slow_start(tp))
1593 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1594
1595 return false;
1596 }
1597
1598 /* BBR congestion control needs pacing.
1599 * Same remark for SO_MAX_PACING_RATE.
1600 * sch_fq packet scheduler is efficiently handling pacing,
1601 * but is not always installed/used.
1602 * Return true if TCP stack should pace packets itself.
1603 */
tcp_needs_internal_pacing(const struct sock * sk)1604 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1605 {
1606 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1607 }
1608
1609 /* Estimates in how many jiffies next packet for this flow can be sent.
1610 * Scheduling a retransmit timer too early would be silly.
1611 */
tcp_pacing_delay(const struct sock * sk)1612 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1613 {
1614 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1615
1616 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1617 }
1618
tcp_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,bool pace_delay)1619 static inline void tcp_reset_xmit_timer(struct sock *sk,
1620 const int what,
1621 unsigned long when,
1622 bool pace_delay)
1623 {
1624 if (pace_delay)
1625 when += tcp_pacing_delay(sk);
1626 inet_csk_reset_xmit_timer(sk, what, when,
1627 tcp_rto_max(sk));
1628 }
1629
1630 /* Something is really bad, we could not queue an additional packet,
1631 * because qdisc is full or receiver sent a 0 window, or we are paced.
1632 * We do not want to add fuel to the fire, or abort too early,
1633 * so make sure the timer we arm now is at least 200ms in the future,
1634 * regardless of current icsk_rto value (as it could be ~2ms)
1635 */
tcp_probe0_base(const struct sock * sk)1636 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1637 {
1638 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1639 }
1640
1641 /* Variant of inet_csk_rto_backoff() used for zero window probes */
tcp_probe0_when(const struct sock * sk,unsigned long max_when)1642 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1643 unsigned long max_when)
1644 {
1645 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1646 inet_csk(sk)->icsk_backoff);
1647 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1648
1649 return (unsigned long)min_t(u64, when, max_when);
1650 }
1651
tcp_check_probe_timer(struct sock * sk)1652 static inline void tcp_check_probe_timer(struct sock *sk)
1653 {
1654 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1655 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1656 tcp_probe0_base(sk), true);
1657 }
1658
tcp_init_wl(struct tcp_sock * tp,u32 seq)1659 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1660 {
1661 tp->snd_wl1 = seq;
1662 }
1663
tcp_update_wl(struct tcp_sock * tp,u32 seq)1664 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1665 {
1666 tp->snd_wl1 = seq;
1667 }
1668
1669 /*
1670 * Calculate(/check) TCP checksum
1671 */
tcp_v4_check(int len,__be32 saddr,__be32 daddr,__wsum base)1672 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1673 __be32 daddr, __wsum base)
1674 {
1675 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1676 }
1677
tcp_checksum_complete(struct sk_buff * skb)1678 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1679 {
1680 return !skb_csum_unnecessary(skb) &&
1681 __skb_checksum_complete(skb);
1682 }
1683
1684 enum skb_drop_reason tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1685
1686 static inline enum skb_drop_reason
tcp_filter(struct sock * sk,struct sk_buff * skb)1687 tcp_filter(struct sock *sk, struct sk_buff *skb)
1688 {
1689 const struct tcphdr *th = (const struct tcphdr *)skb->data;
1690
1691 return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th));
1692 }
1693
1694 void tcp_set_state(struct sock *sk, int state);
1695 void tcp_done(struct sock *sk);
1696 int tcp_abort(struct sock *sk, int err);
1697
tcp_sack_reset(struct tcp_options_received * rx_opt)1698 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1699 {
1700 rx_opt->dsack = 0;
1701 rx_opt->num_sacks = 0;
1702 }
1703
1704 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1705
tcp_slow_start_after_idle_check(struct sock * sk)1706 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1707 {
1708 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1709 struct tcp_sock *tp = tcp_sk(sk);
1710 s32 delta;
1711
1712 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1713 tp->packets_out || ca_ops->cong_control)
1714 return;
1715 delta = tcp_jiffies32 - tp->lsndtime;
1716 if (delta > inet_csk(sk)->icsk_rto)
1717 tcp_cwnd_restart(sk, delta);
1718 }
1719
1720 /* Determine a window scaling and initial window to offer. */
1721 void tcp_select_initial_window(const struct sock *sk, int __space,
1722 __u32 mss, __u32 *rcv_wnd,
1723 __u32 *window_clamp, int wscale_ok,
1724 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1725
__tcp_win_from_space(u8 scaling_ratio,int space)1726 static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1727 {
1728 s64 scaled_space = (s64)space * scaling_ratio;
1729
1730 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1731 }
1732
tcp_win_from_space(const struct sock * sk,int space)1733 static inline int tcp_win_from_space(const struct sock *sk, int space)
1734 {
1735 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1736 }
1737
1738 /* inverse of __tcp_win_from_space() */
__tcp_space_from_win(u8 scaling_ratio,int win)1739 static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1740 {
1741 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1742
1743 do_div(val, scaling_ratio);
1744 return val;
1745 }
1746
tcp_space_from_win(const struct sock * sk,int win)1747 static inline int tcp_space_from_win(const struct sock *sk, int win)
1748 {
1749 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1750 }
1751
1752 /* Assume a 50% default for skb->len/skb->truesize ratio.
1753 * This may be adjusted later in tcp_measure_rcv_mss().
1754 */
1755 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1756
tcp_scaling_ratio_init(struct sock * sk)1757 static inline void tcp_scaling_ratio_init(struct sock *sk)
1758 {
1759 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1760 }
1761
1762 /* Note: caller must be prepared to deal with negative returns */
tcp_space(const struct sock * sk)1763 static inline int tcp_space(const struct sock *sk)
1764 {
1765 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1766 READ_ONCE(sk->sk_backlog.len) -
1767 atomic_read(&sk->sk_rmem_alloc));
1768 }
1769
tcp_full_space(const struct sock * sk)1770 static inline int tcp_full_space(const struct sock *sk)
1771 {
1772 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1773 }
1774
__tcp_adjust_rcv_ssthresh(struct sock * sk,u32 new_ssthresh)1775 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1776 {
1777 int unused_mem = sk_unused_reserved_mem(sk);
1778 struct tcp_sock *tp = tcp_sk(sk);
1779
1780 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1781 if (unused_mem)
1782 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1783 tcp_win_from_space(sk, unused_mem));
1784 }
1785
tcp_adjust_rcv_ssthresh(struct sock * sk)1786 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1787 {
1788 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1789 }
1790
1791 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1792 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1793
1794
1795 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1796 * If 87.5 % (7/8) of the space has been consumed, we want to override
1797 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1798 * len/truesize ratio.
1799 */
tcp_rmem_pressure(const struct sock * sk)1800 static inline bool tcp_rmem_pressure(const struct sock *sk)
1801 {
1802 int rcvbuf, threshold;
1803
1804 if (tcp_under_memory_pressure(sk))
1805 return true;
1806
1807 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1808 threshold = rcvbuf - (rcvbuf >> 3);
1809
1810 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1811 }
1812
tcp_epollin_ready(const struct sock * sk,int target)1813 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1814 {
1815 const struct tcp_sock *tp = tcp_sk(sk);
1816 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1817
1818 if (avail <= 0)
1819 return false;
1820
1821 return (avail >= target) || tcp_rmem_pressure(sk) ||
1822 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1823 }
1824
1825 extern void tcp_openreq_init_rwin(struct request_sock *req,
1826 const struct sock *sk_listener,
1827 const struct dst_entry *dst);
1828
1829 void tcp_enter_memory_pressure(struct sock *sk);
1830 void tcp_leave_memory_pressure(struct sock *sk);
1831
keepalive_intvl_when(const struct tcp_sock * tp)1832 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1833 {
1834 struct net *net = sock_net((struct sock *)tp);
1835 int val;
1836
1837 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1838 * and do_tcp_setsockopt().
1839 */
1840 val = READ_ONCE(tp->keepalive_intvl);
1841
1842 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1843 }
1844
keepalive_time_when(const struct tcp_sock * tp)1845 static inline int keepalive_time_when(const struct tcp_sock *tp)
1846 {
1847 struct net *net = sock_net((struct sock *)tp);
1848 int val;
1849
1850 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1851 val = READ_ONCE(tp->keepalive_time);
1852
1853 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1854 }
1855
keepalive_probes(const struct tcp_sock * tp)1856 static inline int keepalive_probes(const struct tcp_sock *tp)
1857 {
1858 struct net *net = sock_net((struct sock *)tp);
1859 int val;
1860
1861 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1862 * and do_tcp_setsockopt().
1863 */
1864 val = READ_ONCE(tp->keepalive_probes);
1865
1866 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1867 }
1868
keepalive_time_elapsed(const struct tcp_sock * tp)1869 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1870 {
1871 const struct inet_connection_sock *icsk = &tp->inet_conn;
1872
1873 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1874 tcp_jiffies32 - tp->rcv_tstamp);
1875 }
1876
tcp_fin_time(const struct sock * sk)1877 static inline int tcp_fin_time(const struct sock *sk)
1878 {
1879 int fin_timeout = tcp_sk(sk)->linger2 ? :
1880 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1881 const int rto = inet_csk(sk)->icsk_rto;
1882
1883 if (fin_timeout < (rto << 2) - (rto >> 1))
1884 fin_timeout = (rto << 2) - (rto >> 1);
1885
1886 return fin_timeout;
1887 }
1888
tcp_paws_check(const struct tcp_options_received * rx_opt,int paws_win)1889 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1890 int paws_win)
1891 {
1892 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1893 return true;
1894 if (unlikely(!time_before32(ktime_get_seconds(),
1895 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1896 return true;
1897 /*
1898 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1899 * then following tcp messages have valid values. Ignore 0 value,
1900 * or else 'negative' tsval might forbid us to accept their packets.
1901 */
1902 if (!rx_opt->ts_recent)
1903 return true;
1904 return false;
1905 }
1906
tcp_paws_reject(const struct tcp_options_received * rx_opt,int rst)1907 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1908 int rst)
1909 {
1910 if (tcp_paws_check(rx_opt, 0))
1911 return false;
1912
1913 /* RST segments are not recommended to carry timestamp,
1914 and, if they do, it is recommended to ignore PAWS because
1915 "their cleanup function should take precedence over timestamps."
1916 Certainly, it is mistake. It is necessary to understand the reasons
1917 of this constraint to relax it: if peer reboots, clock may go
1918 out-of-sync and half-open connections will not be reset.
1919 Actually, the problem would be not existing if all
1920 the implementations followed draft about maintaining clock
1921 via reboots. Linux-2.2 DOES NOT!
1922
1923 However, we can relax time bounds for RST segments to MSL.
1924 */
1925 if (rst && !time_before32(ktime_get_seconds(),
1926 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1927 return false;
1928 return true;
1929 }
1930
__tcp_fast_path_on(struct tcp_sock * tp,u32 snd_wnd)1931 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1932 {
1933 u32 ace;
1934
1935 /* mptcp hooks are only on the slow path */
1936 if (sk_is_mptcp((struct sock *)tp))
1937 return;
1938
1939 ace = tcp_ecn_mode_accecn(tp) ?
1940 ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
1941 TCP_ACCECN_CEP_ACE_MASK) : 0;
1942
1943 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1944 (ace << 22) |
1945 ntohl(TCP_FLAG_ACK) |
1946 snd_wnd);
1947 }
1948
tcp_fast_path_on(struct tcp_sock * tp)1949 static inline void tcp_fast_path_on(struct tcp_sock *tp)
1950 {
1951 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
1952 }
1953
tcp_fast_path_check(struct sock * sk)1954 static inline void tcp_fast_path_check(struct sock *sk)
1955 {
1956 struct tcp_sock *tp = tcp_sk(sk);
1957
1958 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
1959 tp->rcv_wnd &&
1960 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1961 !tp->urg_data)
1962 tcp_fast_path_on(tp);
1963 }
1964
1965 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1966 int mib_idx, u32 *last_oow_ack_time);
1967
tcp_mib_init(struct net * net)1968 static inline void tcp_mib_init(struct net *net)
1969 {
1970 /* See RFC 2012 */
1971 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1972 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1973 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1974 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1975 }
1976
1977 /* from STCP */
tcp_clear_all_retrans_hints(struct tcp_sock * tp)1978 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1979 {
1980 tp->retransmit_skb_hint = NULL;
1981 }
1982
1983 #define tcp_md5_addr tcp_ao_addr
1984
1985 /* - key database */
1986 struct tcp_md5sig_key {
1987 struct hlist_node node;
1988 u8 keylen;
1989 u8 family; /* AF_INET or AF_INET6 */
1990 u8 prefixlen;
1991 u8 flags;
1992 union tcp_md5_addr addr;
1993 int l3index; /* set if key added with L3 scope */
1994 u8 key[TCP_MD5SIG_MAXKEYLEN];
1995 struct rcu_head rcu;
1996 };
1997
1998 /* - sock block */
1999 struct tcp_md5sig_info {
2000 struct hlist_head head;
2001 struct rcu_head rcu;
2002 };
2003
2004 /* - pseudo header */
2005 struct tcp4_pseudohdr {
2006 __be32 saddr;
2007 __be32 daddr;
2008 __u8 pad;
2009 __u8 protocol;
2010 __be16 len;
2011 };
2012
2013 struct tcp6_pseudohdr {
2014 struct in6_addr saddr;
2015 struct in6_addr daddr;
2016 __be32 len;
2017 __be32 protocol; /* including padding */
2018 };
2019
2020 /*
2021 * struct tcp_sigpool - per-CPU pool of ahash_requests
2022 * @scratch: per-CPU temporary area, that can be used between
2023 * tcp_sigpool_start() and tcp_sigpool_end() to perform
2024 * crypto request
2025 * @req: pre-allocated ahash request
2026 */
2027 struct tcp_sigpool {
2028 void *scratch;
2029 struct ahash_request *req;
2030 };
2031
2032 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
2033 void tcp_sigpool_get(unsigned int id);
2034 void tcp_sigpool_release(unsigned int id);
2035 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
2036 const struct sk_buff *skb,
2037 unsigned int header_len);
2038
2039 /**
2040 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
2041 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
2042 * @c: returned tcp_sigpool for usage (uninitialized on failure)
2043 *
2044 * Returns: 0 on success, error otherwise.
2045 */
2046 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
2047 /**
2048 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
2049 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
2050 */
2051 void tcp_sigpool_end(struct tcp_sigpool *c);
2052 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
2053 /* - functions */
2054 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
2055 const struct sock *sk, const struct sk_buff *skb);
2056 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
2057 int family, u8 prefixlen, int l3index, u8 flags,
2058 const u8 *newkey, u8 newkeylen);
2059 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
2060 int family, u8 prefixlen, int l3index,
2061 struct tcp_md5sig_key *key);
2062
2063 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
2064 int family, u8 prefixlen, int l3index, u8 flags);
2065 void tcp_clear_md5_list(struct sock *sk);
2066 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
2067 const struct sock *addr_sk);
2068
2069 #ifdef CONFIG_TCP_MD5SIG
2070 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
2071 const union tcp_md5_addr *addr,
2072 int family, bool any_l3index);
2073 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)2074 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2075 const union tcp_md5_addr *addr, int family)
2076 {
2077 if (!static_branch_unlikely(&tcp_md5_needed.key))
2078 return NULL;
2079 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
2080 }
2081
2082 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup_any_l3index(const struct sock * sk,const union tcp_md5_addr * addr,int family)2083 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2084 const union tcp_md5_addr *addr, int family)
2085 {
2086 if (!static_branch_unlikely(&tcp_md5_needed.key))
2087 return NULL;
2088 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
2089 }
2090
2091 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
2092 void tcp_md5_destruct_sock(struct sock *sk);
2093 #else
2094 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)2095 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2096 const union tcp_md5_addr *addr, int family)
2097 {
2098 return NULL;
2099 }
2100
2101 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup_any_l3index(const struct sock * sk,const union tcp_md5_addr * addr,int family)2102 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2103 const union tcp_md5_addr *addr, int family)
2104 {
2105 return NULL;
2106 }
2107
2108 #define tcp_twsk_md5_key(twsk) NULL
tcp_md5_destruct_sock(struct sock * sk)2109 static inline void tcp_md5_destruct_sock(struct sock *sk)
2110 {
2111 }
2112 #endif
2113
2114 struct md5_ctx;
2115 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
2116 unsigned int header_len);
2117 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
2118
2119 /* From tcp_fastopen.c */
2120 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2121 struct tcp_fastopen_cookie *cookie);
2122 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2123 struct tcp_fastopen_cookie *cookie, bool syn_lost,
2124 u16 try_exp);
2125 struct tcp_fastopen_request {
2126 /* Fast Open cookie. Size 0 means a cookie request */
2127 struct tcp_fastopen_cookie cookie;
2128 struct msghdr *data; /* data in MSG_FASTOPEN */
2129 size_t size;
2130 int copied; /* queued in tcp_connect() */
2131 struct ubuf_info *uarg;
2132 };
2133 void tcp_free_fastopen_req(struct tcp_sock *tp);
2134 void tcp_fastopen_destroy_cipher(struct sock *sk);
2135 void tcp_fastopen_ctx_destroy(struct net *net);
2136 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
2137 void *primary_key, void *backup_key);
2138 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
2139 u64 *key);
2140 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
2141 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
2142 struct request_sock *req,
2143 struct tcp_fastopen_cookie *foc,
2144 const struct dst_entry *dst);
2145 void tcp_fastopen_init_key_once(struct net *net);
2146 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
2147 struct tcp_fastopen_cookie *cookie);
2148 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
2149 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
2150 #define TCP_FASTOPEN_KEY_MAX 2
2151 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
2152 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
2153
2154 /* Fastopen key context */
2155 struct tcp_fastopen_context {
2156 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
2157 int num;
2158 struct rcu_head rcu;
2159 };
2160
2161 void tcp_fastopen_active_disable(struct sock *sk);
2162 bool tcp_fastopen_active_should_disable(struct sock *sk);
2163 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
2164 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
2165
2166 /* Caller needs to wrap with rcu_read_(un)lock() */
2167 static inline
tcp_fastopen_get_ctx(const struct sock * sk)2168 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
2169 {
2170 struct tcp_fastopen_context *ctx;
2171
2172 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
2173 if (!ctx)
2174 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
2175 return ctx;
2176 }
2177
2178 static inline
tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie * foc,const struct tcp_fastopen_cookie * orig)2179 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
2180 const struct tcp_fastopen_cookie *orig)
2181 {
2182 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
2183 orig->len == foc->len &&
2184 !memcmp(orig->val, foc->val, foc->len))
2185 return true;
2186 return false;
2187 }
2188
2189 static inline
tcp_fastopen_context_len(const struct tcp_fastopen_context * ctx)2190 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
2191 {
2192 return ctx->num;
2193 }
2194
2195 /* Latencies incurred by various limits for a sender. They are
2196 * chronograph-like stats that are mutually exclusive.
2197 */
2198 enum tcp_chrono {
2199 TCP_CHRONO_UNSPEC,
2200 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
2201 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
2202 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
2203 __TCP_CHRONO_MAX,
2204 };
2205
tcp_chrono_set(struct tcp_sock * tp,const enum tcp_chrono new)2206 static inline void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2207 {
2208 const u32 now = tcp_jiffies32;
2209 enum tcp_chrono old = tp->chrono_type;
2210
2211 if (old > TCP_CHRONO_UNSPEC)
2212 tp->chrono_stat[old - 1] += now - tp->chrono_start;
2213 tp->chrono_start = now;
2214 tp->chrono_type = new;
2215 }
2216
tcp_chrono_start(struct sock * sk,const enum tcp_chrono type)2217 static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2218 {
2219 struct tcp_sock *tp = tcp_sk(sk);
2220
2221 /* If there are multiple conditions worthy of tracking in a
2222 * chronograph then the highest priority enum takes precedence
2223 * over the other conditions. So that if something "more interesting"
2224 * starts happening, stop the previous chrono and start a new one.
2225 */
2226 if (type > tp->chrono_type)
2227 tcp_chrono_set(tp, type);
2228 }
2229
2230 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2231
2232 /* This helper is needed, because skb->tcp_tsorted_anchor uses
2233 * the same memory storage than skb->destructor/_skb_refdst
2234 */
tcp_skb_tsorted_anchor_cleanup(struct sk_buff * skb)2235 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2236 {
2237 skb->destructor = NULL;
2238 skb->_skb_refdst = 0UL;
2239 }
2240
2241 #define tcp_skb_tsorted_save(skb) { \
2242 unsigned long _save = skb->_skb_refdst; \
2243 skb->_skb_refdst = 0UL;
2244
2245 #define tcp_skb_tsorted_restore(skb) \
2246 skb->_skb_refdst = _save; \
2247 }
2248
2249 void tcp_write_queue_purge(struct sock *sk);
2250
tcp_rtx_queue_head(const struct sock * sk)2251 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2252 {
2253 return skb_rb_first(&sk->tcp_rtx_queue);
2254 }
2255
tcp_rtx_queue_tail(const struct sock * sk)2256 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2257 {
2258 return skb_rb_last(&sk->tcp_rtx_queue);
2259 }
2260
tcp_write_queue_tail(const struct sock * sk)2261 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2262 {
2263 return skb_peek_tail(&sk->sk_write_queue);
2264 }
2265
2266 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
2267 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2268
tcp_send_head(const struct sock * sk)2269 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2270 {
2271 return skb_peek(&sk->sk_write_queue);
2272 }
2273
tcp_skb_is_last(const struct sock * sk,const struct sk_buff * skb)2274 static inline bool tcp_skb_is_last(const struct sock *sk,
2275 const struct sk_buff *skb)
2276 {
2277 return skb_queue_is_last(&sk->sk_write_queue, skb);
2278 }
2279
2280 /**
2281 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2282 * @sk: socket
2283 *
2284 * Since the write queue can have a temporary empty skb in it,
2285 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2286 */
tcp_write_queue_empty(const struct sock * sk)2287 static inline bool tcp_write_queue_empty(const struct sock *sk)
2288 {
2289 const struct tcp_sock *tp = tcp_sk(sk);
2290
2291 return tp->write_seq == tp->snd_nxt;
2292 }
2293
tcp_rtx_queue_empty(const struct sock * sk)2294 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2295 {
2296 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2297 }
2298
tcp_rtx_and_write_queues_empty(const struct sock * sk)2299 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2300 {
2301 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2302 }
2303
tcp_add_write_queue_tail(struct sock * sk,struct sk_buff * skb)2304 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2305 {
2306 __skb_queue_tail(&sk->sk_write_queue, skb);
2307
2308 /* Queue it, remembering where we must start sending. */
2309 if (sk->sk_write_queue.next == skb)
2310 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2311 }
2312
2313 /* Insert new before skb on the write queue of sk. */
tcp_insert_write_queue_before(struct sk_buff * new,struct sk_buff * skb,struct sock * sk)2314 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2315 struct sk_buff *skb,
2316 struct sock *sk)
2317 {
2318 __skb_queue_before(&sk->sk_write_queue, skb, new);
2319 }
2320
tcp_unlink_write_queue(struct sk_buff * skb,struct sock * sk)2321 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2322 {
2323 tcp_skb_tsorted_anchor_cleanup(skb);
2324 __skb_unlink(skb, &sk->sk_write_queue);
2325 }
2326
2327 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2328
tcp_rtx_queue_unlink(struct sk_buff * skb,struct sock * sk)2329 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2330 {
2331 tcp_skb_tsorted_anchor_cleanup(skb);
2332 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2333 }
2334
tcp_rtx_queue_unlink_and_free(struct sk_buff * skb,struct sock * sk)2335 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2336 {
2337 list_del(&skb->tcp_tsorted_anchor);
2338 tcp_rtx_queue_unlink(skb, sk);
2339 tcp_wmem_free_skb(sk, skb);
2340 }
2341
tcp_write_collapse_fence(struct sock * sk)2342 static inline void tcp_write_collapse_fence(struct sock *sk)
2343 {
2344 struct sk_buff *skb = tcp_write_queue_tail(sk);
2345
2346 if (skb)
2347 TCP_SKB_CB(skb)->eor = 1;
2348 }
2349
tcp_push_pending_frames(struct sock * sk)2350 static inline void tcp_push_pending_frames(struct sock *sk)
2351 {
2352 if (tcp_send_head(sk)) {
2353 struct tcp_sock *tp = tcp_sk(sk);
2354
2355 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2356 }
2357 }
2358
2359 /* Start sequence of the skb just after the highest skb with SACKed
2360 * bit, valid only if sacked_out > 0 or when the caller has ensured
2361 * validity by itself.
2362 */
tcp_highest_sack_seq(struct tcp_sock * tp)2363 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2364 {
2365 if (!tp->sacked_out)
2366 return tp->snd_una;
2367
2368 if (tp->highest_sack == NULL)
2369 return tp->snd_nxt;
2370
2371 return TCP_SKB_CB(tp->highest_sack)->seq;
2372 }
2373
tcp_advance_highest_sack(struct sock * sk,struct sk_buff * skb)2374 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2375 {
2376 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2377 }
2378
tcp_highest_sack(struct sock * sk)2379 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2380 {
2381 return tcp_sk(sk)->highest_sack;
2382 }
2383
tcp_highest_sack_reset(struct sock * sk)2384 static inline void tcp_highest_sack_reset(struct sock *sk)
2385 {
2386 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2387 }
2388
2389 /* Called when old skb is about to be deleted and replaced by new skb */
tcp_highest_sack_replace(struct sock * sk,struct sk_buff * old,struct sk_buff * new)2390 static inline void tcp_highest_sack_replace(struct sock *sk,
2391 struct sk_buff *old,
2392 struct sk_buff *new)
2393 {
2394 if (old == tcp_highest_sack(sk))
2395 tcp_sk(sk)->highest_sack = new;
2396 }
2397
2398 /* This helper checks if socket has IP_TRANSPARENT set */
inet_sk_transparent(const struct sock * sk)2399 static inline bool inet_sk_transparent(const struct sock *sk)
2400 {
2401 switch (sk->sk_state) {
2402 case TCP_TIME_WAIT:
2403 return inet_twsk(sk)->tw_transparent;
2404 case TCP_NEW_SYN_RECV:
2405 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2406 }
2407 return inet_test_bit(TRANSPARENT, sk);
2408 }
2409
2410 /* Determines whether this is a thin stream (which may suffer from
2411 * increased latency). Used to trigger latency-reducing mechanisms.
2412 */
tcp_stream_is_thin(struct tcp_sock * tp)2413 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2414 {
2415 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2416 }
2417
2418 /* /proc */
2419 enum tcp_seq_states {
2420 TCP_SEQ_STATE_LISTENING,
2421 TCP_SEQ_STATE_ESTABLISHED,
2422 };
2423
2424 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2425 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2426 void tcp_seq_stop(struct seq_file *seq, void *v);
2427
2428 struct tcp_seq_afinfo {
2429 sa_family_t family;
2430 };
2431
2432 struct tcp_iter_state {
2433 struct seq_net_private p;
2434 enum tcp_seq_states state;
2435 struct sock *syn_wait_sk;
2436 int bucket, offset, sbucket, num;
2437 loff_t last_pos;
2438 };
2439
2440 extern struct request_sock_ops tcp_request_sock_ops;
2441 extern struct request_sock_ops tcp6_request_sock_ops;
2442
2443 void tcp_v4_destroy_sock(struct sock *sk);
2444
2445 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2446 netdev_features_t features);
2447 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2448 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2449 struct tcphdr *th);
2450 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2451 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2452 #ifdef CONFIG_INET
2453 void tcp_gro_complete(struct sk_buff *skb);
2454 #else
tcp_gro_complete(struct sk_buff * skb)2455 static inline void tcp_gro_complete(struct sk_buff *skb) { }
2456 #endif
2457
__tcp_v4_send_check(struct sk_buff * skb,__be32 saddr,__be32 daddr)2458 static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
2459 __be32 daddr)
2460 {
2461 struct tcphdr *th = tcp_hdr(skb);
2462
2463 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
2464 skb->csum_start = skb_transport_header(skb) - skb->head;
2465 skb->csum_offset = offsetof(struct tcphdr, check);
2466 }
2467
tcp_notsent_lowat(const struct tcp_sock * tp)2468 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2469 {
2470 struct net *net = sock_net((struct sock *)tp);
2471 u32 val;
2472
2473 val = READ_ONCE(tp->notsent_lowat);
2474
2475 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2476 }
2477
2478 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2479
2480 #ifdef CONFIG_PROC_FS
2481 int tcp4_proc_init(void);
2482 void tcp4_proc_exit(void);
2483 #endif
2484
2485 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2486 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2487 const struct tcp_request_sock_ops *af_ops,
2488 struct sock *sk, struct sk_buff *skb);
2489
2490 /* TCP af-specific functions */
2491 struct tcp_sock_af_ops {
2492 #ifdef CONFIG_TCP_MD5SIG
2493 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2494 const struct sock *addr_sk);
2495 void (*calc_md5_hash)(char *location,
2496 const struct tcp_md5sig_key *md5,
2497 const struct sock *sk,
2498 const struct sk_buff *skb);
2499 int (*md5_parse)(struct sock *sk,
2500 int optname,
2501 sockptr_t optval,
2502 int optlen);
2503 #endif
2504 #ifdef CONFIG_TCP_AO
2505 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2506 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2507 struct sock *addr_sk,
2508 int sndid, int rcvid);
2509 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2510 const struct sock *sk,
2511 __be32 sisn, __be32 disn, bool send);
2512 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2513 const struct sock *sk, const struct sk_buff *skb,
2514 const u8 *tkey, int hash_offset, u32 sne);
2515 #endif
2516 };
2517
2518 struct tcp_request_sock_ops {
2519 u16 mss_clamp;
2520 #ifdef CONFIG_TCP_MD5SIG
2521 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2522 const struct sock *addr_sk);
2523 void (*calc_md5_hash) (char *location,
2524 const struct tcp_md5sig_key *md5,
2525 const struct sock *sk,
2526 const struct sk_buff *skb);
2527 #endif
2528 #ifdef CONFIG_TCP_AO
2529 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2530 struct request_sock *req,
2531 int sndid, int rcvid);
2532 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2533 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2534 struct request_sock *req, const struct sk_buff *skb,
2535 int hash_offset, u32 sne);
2536 #endif
2537 #ifdef CONFIG_SYN_COOKIES
2538 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2539 __u16 *mss);
2540 #endif
2541 struct dst_entry *(*route_req)(const struct sock *sk,
2542 struct sk_buff *skb,
2543 struct flowi *fl,
2544 struct request_sock *req,
2545 u32 tw_isn);
2546 union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
2547 const struct net *net,
2548 const struct sk_buff *skb);
2549 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2550 struct flowi *fl, struct request_sock *req,
2551 struct tcp_fastopen_cookie *foc,
2552 enum tcp_synack_type synack_type,
2553 struct sk_buff *syn_skb);
2554 };
2555
2556 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2557 #if IS_ENABLED(CONFIG_IPV6)
2558 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2559 #endif
2560
2561 #ifdef CONFIG_SYN_COOKIES
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2562 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2563 const struct sock *sk, struct sk_buff *skb,
2564 __u16 *mss)
2565 {
2566 tcp_synq_overflow(sk);
2567 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2568 return ops->cookie_init_seq(skb, mss);
2569 }
2570 #else
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2571 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2572 const struct sock *sk, struct sk_buff *skb,
2573 __u16 *mss)
2574 {
2575 return 0;
2576 }
2577 #endif
2578
2579 struct tcp_key {
2580 union {
2581 struct {
2582 struct tcp_ao_key *ao_key;
2583 char *traffic_key;
2584 u32 sne;
2585 u8 rcv_next;
2586 };
2587 struct tcp_md5sig_key *md5_key;
2588 };
2589 enum {
2590 TCP_KEY_NONE = 0,
2591 TCP_KEY_MD5,
2592 TCP_KEY_AO,
2593 } type;
2594 };
2595
tcp_get_current_key(const struct sock * sk,struct tcp_key * out)2596 static inline void tcp_get_current_key(const struct sock *sk,
2597 struct tcp_key *out)
2598 {
2599 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2600 const struct tcp_sock *tp = tcp_sk(sk);
2601 #endif
2602
2603 #ifdef CONFIG_TCP_AO
2604 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2605 struct tcp_ao_info *ao;
2606
2607 ao = rcu_dereference_protected(tp->ao_info,
2608 lockdep_sock_is_held(sk));
2609 if (ao) {
2610 out->ao_key = READ_ONCE(ao->current_key);
2611 out->type = TCP_KEY_AO;
2612 return;
2613 }
2614 }
2615 #endif
2616 #ifdef CONFIG_TCP_MD5SIG
2617 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2618 rcu_access_pointer(tp->md5sig_info)) {
2619 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2620 if (out->md5_key) {
2621 out->type = TCP_KEY_MD5;
2622 return;
2623 }
2624 }
2625 #endif
2626 out->type = TCP_KEY_NONE;
2627 }
2628
tcp_key_is_md5(const struct tcp_key * key)2629 static inline bool tcp_key_is_md5(const struct tcp_key *key)
2630 {
2631 if (static_branch_tcp_md5())
2632 return key->type == TCP_KEY_MD5;
2633 return false;
2634 }
2635
tcp_key_is_ao(const struct tcp_key * key)2636 static inline bool tcp_key_is_ao(const struct tcp_key *key)
2637 {
2638 if (static_branch_tcp_ao())
2639 return key->type == TCP_KEY_AO;
2640 return false;
2641 }
2642
2643 int tcpv4_offload_init(void);
2644
2645 void tcp_v4_init(void);
2646 void tcp_init(void);
2647
2648 /* tcp_recovery.c */
2649 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2650 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2651 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2652 u32 reo_wnd);
2653 extern bool tcp_rack_mark_lost(struct sock *sk);
2654 extern void tcp_rack_reo_timeout(struct sock *sk);
2655
2656 /* tcp_plb.c */
2657
2658 /*
2659 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2660 * expects cong_ratio which represents fraction of traffic that experienced
2661 * congestion over a single RTT. In order to avoid floating point operations,
2662 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2663 */
2664 #define TCP_PLB_SCALE 8
2665
2666 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2667 struct tcp_plb_state {
2668 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2669 unused:3;
2670 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2671 };
2672
tcp_plb_init(const struct sock * sk,struct tcp_plb_state * plb)2673 static inline void tcp_plb_init(const struct sock *sk,
2674 struct tcp_plb_state *plb)
2675 {
2676 plb->consec_cong_rounds = 0;
2677 plb->pause_until = 0;
2678 }
2679 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2680 const int cong_ratio);
2681 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2682 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2683
tcp_warn_once(const struct sock * sk,bool cond,const char * str)2684 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2685 {
2686 WARN_ONCE(cond,
2687 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2688 str,
2689 tcp_snd_cwnd(tcp_sk(sk)),
2690 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2691 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2692 tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2693 inet_csk(sk)->icsk_ca_state,
2694 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2695 inet_csk(sk)->icsk_pmtu_cookie);
2696 }
2697
2698 /* At how many usecs into the future should the RTO fire? */
tcp_rto_delta_us(const struct sock * sk)2699 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2700 {
2701 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2702 u32 rto = inet_csk(sk)->icsk_rto;
2703
2704 if (likely(skb)) {
2705 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2706
2707 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2708 } else {
2709 tcp_warn_once(sk, 1, "rtx queue empty: ");
2710 return jiffies_to_usecs(rto);
2711 }
2712
2713 }
2714
2715 /*
2716 * Save and compile IPv4 options, return a pointer to it
2717 */
tcp_v4_save_options(struct net * net,struct sk_buff * skb)2718 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2719 struct sk_buff *skb)
2720 {
2721 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2722 struct ip_options_rcu *dopt = NULL;
2723
2724 if (opt->optlen) {
2725 int opt_size = sizeof(*dopt) + opt->optlen;
2726
2727 dopt = kmalloc(opt_size, GFP_ATOMIC);
2728 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2729 kfree(dopt);
2730 dopt = NULL;
2731 }
2732 }
2733 return dopt;
2734 }
2735
2736 /* locally generated TCP pure ACKs have skb->truesize == 2
2737 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2738 * This is much faster than dissecting the packet to find out.
2739 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2740 */
skb_is_tcp_pure_ack(const struct sk_buff * skb)2741 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2742 {
2743 return skb->truesize == 2;
2744 }
2745
skb_set_tcp_pure_ack(struct sk_buff * skb)2746 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2747 {
2748 skb->truesize = 2;
2749 }
2750
tcp_inq(struct sock * sk)2751 static inline int tcp_inq(struct sock *sk)
2752 {
2753 struct tcp_sock *tp = tcp_sk(sk);
2754 int answ;
2755
2756 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2757 answ = 0;
2758 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2759 !tp->urg_data ||
2760 before(tp->urg_seq, tp->copied_seq) ||
2761 !before(tp->urg_seq, tp->rcv_nxt)) {
2762
2763 answ = tp->rcv_nxt - tp->copied_seq;
2764
2765 /* Subtract 1, if FIN was received */
2766 if (answ && sock_flag(sk, SOCK_DONE))
2767 answ--;
2768 } else {
2769 answ = tp->urg_seq - tp->copied_seq;
2770 }
2771
2772 return answ;
2773 }
2774
2775 int tcp_peek_len(struct socket *sock);
2776
tcp_segs_in(struct tcp_sock * tp,const struct sk_buff * skb)2777 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2778 {
2779 u16 segs_in;
2780
2781 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2782
2783 /* We update these fields while other threads might
2784 * read them from tcp_get_info()
2785 */
2786 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2787 if (skb->len > tcp_hdrlen(skb))
2788 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2789 }
2790
2791 /*
2792 * TCP listen path runs lockless.
2793 * We forced "struct sock" to be const qualified to make sure
2794 * we don't modify one of its field by mistake.
2795 * Here, we increment sk_drops which is an atomic_t, so we can safely
2796 * make sock writable again.
2797 */
tcp_listendrop(const struct sock * sk)2798 static inline void tcp_listendrop(const struct sock *sk)
2799 {
2800 sk_drops_inc((struct sock *)sk);
2801 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2802 }
2803
2804 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2805
2806 /*
2807 * Interface for adding Upper Level Protocols over TCP
2808 */
2809
2810 #define TCP_ULP_NAME_MAX 16
2811 #define TCP_ULP_MAX 128
2812 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2813
2814 struct tcp_ulp_ops {
2815 struct list_head list;
2816
2817 /* initialize ulp */
2818 int (*init)(struct sock *sk);
2819 /* update ulp */
2820 void (*update)(struct sock *sk, struct proto *p,
2821 void (*write_space)(struct sock *sk));
2822 /* cleanup ulp */
2823 void (*release)(struct sock *sk);
2824 /* diagnostic */
2825 int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2826 size_t (*get_info_size)(const struct sock *sk, bool net_admin);
2827 /* clone ulp */
2828 void (*clone)(const struct request_sock *req, struct sock *newsk,
2829 const gfp_t priority);
2830
2831 char name[TCP_ULP_NAME_MAX];
2832 struct module *owner;
2833 };
2834 int tcp_register_ulp(struct tcp_ulp_ops *type);
2835 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2836 int tcp_set_ulp(struct sock *sk, const char *name);
2837 void tcp_get_available_ulp(char *buf, size_t len);
2838 void tcp_cleanup_ulp(struct sock *sk);
2839 void tcp_update_ulp(struct sock *sk, struct proto *p,
2840 void (*write_space)(struct sock *sk));
2841
2842 #define MODULE_ALIAS_TCP_ULP(name) \
2843 MODULE_INFO(alias, name); \
2844 MODULE_INFO(alias, "tcp-ulp-" name)
2845
2846 #ifdef CONFIG_NET_SOCK_MSG
2847 struct sk_msg;
2848 struct sk_psock;
2849
2850 #ifdef CONFIG_BPF_SYSCALL
2851 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2852 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2853 #ifdef CONFIG_BPF_STREAM_PARSER
2854 struct strparser;
2855 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
2856 sk_read_actor_t recv_actor);
2857 #endif /* CONFIG_BPF_STREAM_PARSER */
2858 #endif /* CONFIG_BPF_SYSCALL */
2859
2860 #ifdef CONFIG_INET
2861 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2862 #else
tcp_eat_skb(struct sock * sk,struct sk_buff * skb)2863 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2864 {
2865 }
2866 #endif
2867
2868 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2869 struct sk_msg *msg, u32 bytes, int flags);
2870 #endif /* CONFIG_NET_SOCK_MSG */
2871
2872 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
tcp_bpf_clone(const struct sock * sk,struct sock * newsk)2873 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2874 {
2875 }
2876 #endif
2877
2878 #ifdef CONFIG_CGROUP_BPF
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2879 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2880 struct sk_buff *skb,
2881 unsigned int end_offset)
2882 {
2883 skops->skb = skb;
2884 skops->skb_data_end = skb->data + end_offset;
2885 }
2886 #else
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2887 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2888 struct sk_buff *skb,
2889 unsigned int end_offset)
2890 {
2891 }
2892 #endif
2893
2894 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2895 * is < 0, then the BPF op failed (for example if the loaded BPF
2896 * program does not support the chosen operation or there is no BPF
2897 * program loaded).
2898 */
2899 #ifdef CONFIG_BPF
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2900 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2901 {
2902 struct bpf_sock_ops_kern sock_ops;
2903 int ret;
2904
2905 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2906 if (sk_fullsock(sk)) {
2907 sock_ops.is_fullsock = 1;
2908 sock_ops.is_locked_tcp_sock = 1;
2909 sock_owned_by_me(sk);
2910 }
2911
2912 sock_ops.sk = sk;
2913 sock_ops.op = op;
2914 if (nargs > 0)
2915 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2916
2917 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2918 if (ret == 0)
2919 ret = sock_ops.reply;
2920 else
2921 ret = -1;
2922 return ret;
2923 }
2924
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2925 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2926 {
2927 u32 args[2] = {arg1, arg2};
2928
2929 return tcp_call_bpf(sk, op, 2, args);
2930 }
2931
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2932 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2933 u32 arg3)
2934 {
2935 u32 args[3] = {arg1, arg2, arg3};
2936
2937 return tcp_call_bpf(sk, op, 3, args);
2938 }
2939
2940 #else
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2941 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2942 {
2943 return -EPERM;
2944 }
2945
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2946 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2947 {
2948 return -EPERM;
2949 }
2950
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2951 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2952 u32 arg3)
2953 {
2954 return -EPERM;
2955 }
2956
2957 #endif
2958
tcp_timeout_init(struct sock * sk)2959 static inline u32 tcp_timeout_init(struct sock *sk)
2960 {
2961 int timeout;
2962
2963 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2964
2965 if (timeout <= 0)
2966 timeout = TCP_TIMEOUT_INIT;
2967 return min_t(int, timeout, TCP_RTO_MAX);
2968 }
2969
tcp_rwnd_init_bpf(struct sock * sk)2970 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2971 {
2972 int rwnd;
2973
2974 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2975
2976 if (rwnd < 0)
2977 rwnd = 0;
2978 return rwnd;
2979 }
2980
tcp_bpf_ca_needs_ecn(struct sock * sk)2981 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2982 {
2983 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2984 }
2985
tcp_bpf_rtt(struct sock * sk,long mrtt,u32 srtt)2986 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2987 {
2988 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2989 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2990 }
2991
2992 #if IS_ENABLED(CONFIG_SMC)
2993 extern struct static_key_false tcp_have_smc;
2994 #endif
2995
2996 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2997 void clean_acked_data_enable(struct tcp_sock *tp,
2998 void (*cad)(struct sock *sk, u32 ack_seq));
2999 void clean_acked_data_disable(struct tcp_sock *tp);
3000 void clean_acked_data_flush(void);
3001 #endif
3002
3003 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
tcp_add_tx_delay(struct sk_buff * skb,const struct tcp_sock * tp)3004 static inline void tcp_add_tx_delay(struct sk_buff *skb,
3005 const struct tcp_sock *tp)
3006 {
3007 if (static_branch_unlikely(&tcp_tx_delay_enabled))
3008 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
3009 }
3010
3011 /* Compute Earliest Departure Time for some control packets
3012 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
3013 */
tcp_transmit_time(const struct sock * sk)3014 static inline u64 tcp_transmit_time(const struct sock *sk)
3015 {
3016 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
3017 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
3018 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
3019
3020 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
3021 }
3022 return 0;
3023 }
3024
tcp_parse_auth_options(const struct tcphdr * th,const u8 ** md5_hash,const struct tcp_ao_hdr ** aoh)3025 static inline int tcp_parse_auth_options(const struct tcphdr *th,
3026 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
3027 {
3028 const u8 *md5_tmp, *ao_tmp;
3029 int ret;
3030
3031 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
3032 if (ret)
3033 return ret;
3034
3035 if (md5_hash)
3036 *md5_hash = md5_tmp;
3037
3038 if (aoh) {
3039 if (!ao_tmp)
3040 *aoh = NULL;
3041 else
3042 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
3043 }
3044
3045 return 0;
3046 }
3047
tcp_ao_required(struct sock * sk,const void * saddr,int family,int l3index,bool stat_inc)3048 static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
3049 int family, int l3index, bool stat_inc)
3050 {
3051 #ifdef CONFIG_TCP_AO
3052 struct tcp_ao_info *ao_info;
3053 struct tcp_ao_key *ao_key;
3054
3055 if (!static_branch_unlikely(&tcp_ao_needed.key))
3056 return false;
3057
3058 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
3059 lockdep_sock_is_held(sk));
3060 if (!ao_info)
3061 return false;
3062
3063 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
3064 if (ao_info->ao_required || ao_key) {
3065 if (stat_inc) {
3066 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
3067 atomic64_inc(&ao_info->counters.ao_required);
3068 }
3069 return true;
3070 }
3071 #endif
3072 return false;
3073 }
3074
3075 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
3076 const struct request_sock *req, const struct sk_buff *skb,
3077 const void *saddr, const void *daddr,
3078 int family, int dif, int sdif);
3079
tcp_recv_should_stop(struct sock * sk)3080 static inline int tcp_recv_should_stop(struct sock *sk)
3081 {
3082 return sk->sk_err ||
3083 sk->sk_state == TCP_CLOSE ||
3084 (sk->sk_shutdown & RCV_SHUTDOWN) ||
3085 signal_pending(current);
3086 }
3087
3088 INDIRECT_CALLABLE_DECLARE(union tcp_seq_and_ts_off
3089 tcp_v4_init_seq_and_ts_off(const struct net *net,
3090 const struct sk_buff *skb));
3091 INDIRECT_CALLABLE_DECLARE(union tcp_seq_and_ts_off
3092 tcp_v6_init_seq_and_ts_off(const struct net *net,
3093 const struct sk_buff *skb));
3094 #endif /* _TCP_H */
3095