1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 #include <net/tcp.h>
23 #include <net/tcp_ecn.h>
24 #include <net/xfrm.h>
25 #include <net/busy_poll.h>
26 #include <net/rstreason.h>
27 #include <net/psp.h>
28
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)29 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
30 {
31 if (seq == s_win)
32 return true;
33 if (after(end_seq, s_win) && before(seq, e_win))
34 return true;
35 return seq == e_win && seq == end_seq;
36 }
37
38 static enum tcp_tw_status
tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock * tw,const struct sk_buff * skb,int mib_idx)39 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
40 const struct sk_buff *skb, int mib_idx)
41 {
42 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
43
44 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
45 &tcptw->tw_last_oow_ack_time)) {
46 /* Send ACK. Note, we do not put the bucket,
47 * it will be released by caller.
48 */
49 return TCP_TW_ACK_OOW;
50 }
51
52 /* We are rate-limiting, so just release the tw sock and drop skb. */
53 inet_twsk_put(tw);
54 return TCP_TW_SUCCESS;
55 }
56
twsk_rcv_nxt_update(struct tcp_timewait_sock * tcptw,u32 seq,u32 rcv_nxt)57 static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq,
58 u32 rcv_nxt)
59 {
60 #ifdef CONFIG_TCP_AO
61 struct tcp_ao_info *ao;
62
63 ao = rcu_dereference(tcptw->ao_info);
64 if (unlikely(ao && seq < rcv_nxt))
65 WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
66 #endif
67 WRITE_ONCE(tcptw->tw_rcv_nxt, seq);
68 }
69
70 /*
71 * * Main purpose of TIME-WAIT state is to close connection gracefully,
72 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
73 * (and, probably, tail of data) and one or more our ACKs are lost.
74 * * What is TIME-WAIT timeout? It is associated with maximal packet
75 * lifetime in the internet, which results in wrong conclusion, that
76 * it is set to catch "old duplicate segments" wandering out of their path.
77 * It is not quite correct. This timeout is calculated so that it exceeds
78 * maximal retransmission timeout enough to allow to lose one (or more)
79 * segments sent by peer and our ACKs. This time may be calculated from RTO.
80 * * When TIME-WAIT socket receives RST, it means that another end
81 * finally closed and we are allowed to kill TIME-WAIT too.
82 * * Second purpose of TIME-WAIT is catching old duplicate segments.
83 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
84 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
85 * * If we invented some more clever way to catch duplicates
86 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
87 *
88 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
89 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
90 * from the very beginning.
91 *
92 * NOTE. With recycling (and later with fin-wait-2) TW bucket
93 * is _not_ stateless. It means, that strictly speaking we must
94 * spinlock it. I do not want! Well, probability of misbehaviour
95 * is ridiculously low and, seems, we could use some mb() tricks
96 * to avoid misread sequence numbers, states etc. --ANK
97 *
98 * We don't need to initialize tmp_out.sack_ok as we don't use the results
99 */
100 enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock * tw,struct sk_buff * skb,const struct tcphdr * th,u32 * tw_isn,enum skb_drop_reason * drop_reason)101 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
102 const struct tcphdr *th, u32 *tw_isn,
103 enum skb_drop_reason *drop_reason)
104 {
105 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
106 u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt);
107 struct tcp_options_received tmp_opt;
108 enum skb_drop_reason psp_drop;
109 bool paws_reject = false;
110 int ts_recent_stamp;
111
112 /* Instead of dropping immediately, wait to see what value is
113 * returned. We will accept a non psp-encapsulated syn in the
114 * case where TCP_TW_SYN is returned.
115 */
116 psp_drop = psp_twsk_rx_policy_check(tw, skb);
117
118 tmp_opt.saw_tstamp = 0;
119 ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
120 if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
121 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
122
123 if (tmp_opt.saw_tstamp) {
124 if (tmp_opt.rcv_tsecr)
125 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
126 tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
127 tmp_opt.ts_recent_stamp = ts_recent_stamp;
128 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
129 }
130 }
131
132 if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) {
133 /* Just repeat all the checks of tcp_rcv_state_process() */
134
135 if (psp_drop)
136 goto out_put;
137
138 /* Out of window, send ACK */
139 if (paws_reject ||
140 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
141 rcv_nxt,
142 rcv_nxt + tcptw->tw_rcv_wnd))
143 return tcp_timewait_check_oow_rate_limit(
144 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
145
146 if (th->rst)
147 goto kill;
148
149 if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt))
150 return TCP_TW_RST;
151
152 /* Dup ACK? */
153 if (!th->ack ||
154 !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) ||
155 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
156 inet_twsk_put(tw);
157 return TCP_TW_SUCCESS;
158 }
159
160 /* New data or FIN. If new data arrive after half-duplex close,
161 * reset.
162 */
163 if (!th->fin ||
164 TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1)
165 return TCP_TW_RST;
166
167 /* FIN arrived, enter true time-wait state. */
168 WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT);
169 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq,
170 rcv_nxt);
171
172 if (tmp_opt.saw_tstamp) {
173 u64 ts = tcp_clock_ms();
174
175 WRITE_ONCE(tw->tw_entry_stamp, ts);
176 WRITE_ONCE(tcptw->tw_ts_recent_stamp,
177 div_u64(ts, MSEC_PER_SEC));
178 WRITE_ONCE(tcptw->tw_ts_recent,
179 tmp_opt.rcv_tsval);
180 }
181
182 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
183 return TCP_TW_ACK;
184 }
185
186 /*
187 * Now real TIME-WAIT state.
188 *
189 * RFC 1122:
190 * "When a connection is [...] on TIME-WAIT state [...]
191 * [a TCP] MAY accept a new SYN from the remote TCP to
192 * reopen the connection directly, if it:
193 *
194 * (1) assigns its initial sequence number for the new
195 * connection to be larger than the largest sequence
196 * number it used on the previous connection incarnation,
197 * and
198 *
199 * (2) returns to TIME-WAIT state if the SYN turns out
200 * to be an old duplicate".
201 */
202
203 if (!paws_reject &&
204 (TCP_SKB_CB(skb)->seq == rcv_nxt &&
205 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
206 /* In window segment, it may be only reset or bare ack. */
207
208 if (psp_drop)
209 goto out_put;
210
211 if (th->rst) {
212 /* This is TIME_WAIT assassination, in two flavors.
213 * Oh well... nobody has a sufficient solution to this
214 * protocol bug yet.
215 */
216 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
217 kill:
218 inet_twsk_deschedule_put(tw);
219 return TCP_TW_SUCCESS;
220 }
221 } else {
222 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
223 }
224
225 if (tmp_opt.saw_tstamp) {
226 WRITE_ONCE(tcptw->tw_ts_recent,
227 tmp_opt.rcv_tsval);
228 WRITE_ONCE(tcptw->tw_ts_recent_stamp,
229 ktime_get_seconds());
230 }
231
232 inet_twsk_put(tw);
233 return TCP_TW_SUCCESS;
234 }
235
236 /* Out of window segment.
237
238 All the segments are ACKed immediately.
239
240 The only exception is new SYN. We accept it, if it is
241 not old duplicate and we are not in danger to be killed
242 by delayed old duplicates. RFC check is that it has
243 newer sequence number works at rates <40Mbit/sec.
244 However, if paws works, it is reliable AND even more,
245 we even may relax silly seq space cutoff.
246
247 RED-PEN: we violate main RFC requirement, if this SYN will appear
248 old duplicate (i.e. we receive RST in reply to SYN-ACK),
249 we must return socket to time-wait state. It is not good,
250 but not fatal yet.
251 */
252
253 if (th->syn && !th->rst && !th->ack && !paws_reject &&
254 (after(TCP_SKB_CB(skb)->seq, rcv_nxt) ||
255 (tmp_opt.saw_tstamp &&
256 (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
257 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
258 if (isn == 0)
259 isn++;
260 *tw_isn = isn;
261 return TCP_TW_SYN;
262 }
263
264 if (psp_drop)
265 goto out_put;
266
267 if (paws_reject) {
268 *drop_reason = SKB_DROP_REASON_TCP_RFC7323_TW_PAWS;
269 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWS_TW_REJECTED);
270 }
271
272 if (!th->rst) {
273 /* In this case we must reset the TIMEWAIT timer.
274 *
275 * If it is ACKless SYN it may be both old duplicate
276 * and new good SYN with random sequence number <rcv_nxt.
277 * Do not reschedule in the last case.
278 */
279 if (paws_reject || th->ack)
280 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
281
282 return tcp_timewait_check_oow_rate_limit(
283 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
284 }
285
286 out_put:
287 inet_twsk_put(tw);
288 return TCP_TW_SUCCESS;
289 }
290
tcp_time_wait_init(struct sock * sk,struct tcp_timewait_sock * tcptw)291 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
292 {
293 #ifdef CONFIG_TCP_MD5SIG
294 const struct tcp_sock *tp = tcp_sk(sk);
295 struct tcp_md5sig_key *key;
296
297 /*
298 * The timewait bucket does not have the key DB from the
299 * sock structure. We just make a quick copy of the
300 * md5 key being used (if indeed we are using one)
301 * so the timewait ack generating code has the key.
302 */
303 tcptw->tw_md5_key = NULL;
304 if (!static_branch_unlikely(&tcp_md5_needed.key))
305 return;
306
307 key = tp->af_specific->md5_lookup(sk, sk);
308 if (key) {
309 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
310 if (!tcptw->tw_md5_key)
311 return;
312 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
313 goto out_free;
314 }
315 return;
316 out_free:
317 WARN_ON_ONCE(1);
318 kfree(tcptw->tw_md5_key);
319 tcptw->tw_md5_key = NULL;
320 #endif
321 }
322
323 /*
324 * Move a socket to time-wait or dead fin-wait-2 state.
325 */
tcp_time_wait(struct sock * sk,int state,int timeo)326 void tcp_time_wait(struct sock *sk, int state, int timeo)
327 {
328 const struct inet_connection_sock *icsk = inet_csk(sk);
329 struct tcp_sock *tp = tcp_sk(sk);
330 struct net *net = sock_net(sk);
331 struct inet_timewait_sock *tw;
332
333 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
334
335 if (tw) {
336 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
337 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
338
339 tw->tw_mark = sk->sk_mark;
340 tw->tw_priority = READ_ONCE(sk->sk_priority);
341 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
342 /* refreshed when we enter true TIME-WAIT state */
343 tw->tw_entry_stamp = tcp_time_stamp_ms(tp);
344 tcptw->tw_rcv_nxt = tp->rcv_nxt;
345 tcptw->tw_snd_nxt = tp->snd_nxt;
346 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
347 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
348 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
349 tcptw->tw_ts_offset = tp->tsoffset;
350 tw->tw_usec_ts = tp->tcp_usec_ts;
351 tcptw->tw_last_oow_ack_time = 0;
352 tcptw->tw_tx_delay = tp->tcp_tx_delay;
353 tw->tw_txhash = sk->sk_txhash;
354 tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping;
355 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
356 tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping;
357 #endif
358 #if IS_ENABLED(CONFIG_IPV6)
359 if (tw->tw_family == PF_INET6) {
360 struct ipv6_pinfo *np = inet6_sk(sk);
361
362 tw->tw_v6_daddr = sk->sk_v6_daddr;
363 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
364 tw->tw_tclass = np->tclass;
365 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
366 tw->tw_ipv6only = sk->sk_ipv6only;
367 }
368 #endif
369
370 tcp_time_wait_init(sk, tcptw);
371 tcp_ao_time_wait(tcptw, tp);
372
373 /* Get the TIME_WAIT timeout firing. */
374 if (timeo < rto)
375 timeo = rto;
376
377 if (state == TCP_TIME_WAIT)
378 timeo = TCP_TIMEWAIT_LEN;
379
380 /* Linkage updates.
381 * Note that access to tw after this point is illegal.
382 */
383 inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
384 } else {
385 /* Sorry, if we're out of memory, just CLOSE this
386 * socket up. We've got bigger problems than
387 * non-graceful socket closings.
388 */
389 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
390 }
391
392 tcp_update_metrics(sk);
393 tcp_done(sk);
394 }
395 EXPORT_SYMBOL(tcp_time_wait);
396
tcp_twsk_destructor(struct sock * sk)397 void tcp_twsk_destructor(struct sock *sk)
398 {
399 #ifdef CONFIG_TCP_MD5SIG
400 if (static_branch_unlikely(&tcp_md5_needed.key)) {
401 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
402
403 if (twsk->tw_md5_key) {
404 kfree(twsk->tw_md5_key);
405 static_branch_slow_dec_deferred(&tcp_md5_needed);
406 }
407 }
408 #endif
409 tcp_ao_destroy_sock(sk, true);
410 psp_twsk_assoc_free(inet_twsk(sk));
411 }
412
tcp_twsk_purge(struct list_head * net_exit_list)413 void tcp_twsk_purge(struct list_head *net_exit_list)
414 {
415 bool purged_once = false;
416 struct net *net;
417
418 list_for_each_entry(net, net_exit_list, exit_list) {
419 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
420 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
421 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
422 } else if (!purged_once) {
423 inet_twsk_purge(&tcp_hashinfo);
424 purged_once = true;
425 }
426 }
427 }
428
429 /* Warning : This function is called without sk_listener being locked.
430 * Be sure to read socket fields once, as their value could change under us.
431 */
tcp_openreq_init_rwin(struct request_sock * req,const struct sock * sk_listener,const struct dst_entry * dst)432 void tcp_openreq_init_rwin(struct request_sock *req,
433 const struct sock *sk_listener,
434 const struct dst_entry *dst)
435 {
436 struct inet_request_sock *ireq = inet_rsk(req);
437 const struct tcp_sock *tp = tcp_sk(sk_listener);
438 int full_space = tcp_full_space(sk_listener);
439 u32 window_clamp;
440 __u8 rcv_wscale;
441 u32 rcv_wnd;
442 int mss;
443
444 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
445 window_clamp = READ_ONCE(tp->window_clamp);
446 /* Set this up on the first call only */
447 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
448
449 /* limit the window selection if the user enforce a smaller rx buffer */
450 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
451 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
452 req->rsk_window_clamp = full_space;
453
454 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
455 if (rcv_wnd == 0)
456 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
457 else if (full_space < rcv_wnd * mss)
458 full_space = rcv_wnd * mss;
459
460 /* tcp_full_space because it is guaranteed to be the first packet */
461 tcp_select_initial_window(sk_listener, full_space,
462 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
463 &req->rsk_rcv_wnd,
464 &req->rsk_window_clamp,
465 ireq->wscale_ok,
466 &rcv_wscale,
467 rcv_wnd);
468 ireq->rcv_wscale = rcv_wscale;
469 }
470
tcp_ecn_openreq_child(struct sock * sk,const struct request_sock * req,const struct sk_buff * skb)471 static void tcp_ecn_openreq_child(struct sock *sk,
472 const struct request_sock *req,
473 const struct sk_buff *skb)
474 {
475 const struct tcp_request_sock *treq = tcp_rsk(req);
476 struct tcp_sock *tp = tcp_sk(sk);
477
478 if (treq->accecn_ok) {
479 tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
480 tp->syn_ect_snt = treq->syn_ect_snt;
481 tcp_accecn_third_ack(sk, skb, treq->syn_ect_snt);
482 tp->saw_accecn_opt = treq->saw_accecn_opt;
483 if (treq->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_SEND)
484 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_SEND);
485 if (treq->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_RECV)
486 tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV);
487 tp->prev_ecnfield = treq->syn_ect_rcv;
488 tp->accecn_opt_demand = 1;
489 tcp_ecn_received_counters_payload(sk, skb);
490 } else {
491 if (inet_rsk(req)->ecn_ok && !tcp_ca_no_fallback_rfc3168(sk))
492 tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
493 else
494 tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
495 }
496 }
497
tcp_ca_openreq_child(struct sock * sk,const struct dst_entry * dst)498 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
499 {
500 struct inet_connection_sock *icsk = inet_csk(sk);
501 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
502 bool ca_got_dst = false;
503
504 if (ca_key != TCP_CA_UNSPEC) {
505 const struct tcp_congestion_ops *ca;
506
507 rcu_read_lock();
508 ca = tcp_ca_find_key(ca_key);
509 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
510 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
511 icsk->icsk_ca_ops = ca;
512 ca_got_dst = true;
513 }
514 rcu_read_unlock();
515 }
516
517 /* If no valid choice made yet, assign current system default ca. */
518 if (!ca_got_dst &&
519 (!icsk->icsk_ca_setsockopt ||
520 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
521 tcp_assign_congestion_control(sk);
522
523 tcp_set_ca_state(sk, TCP_CA_Open);
524 }
525
smc_check_reset_syn_req(const struct tcp_sock * oldtp,struct request_sock * req,struct tcp_sock * newtp)526 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
527 struct request_sock *req,
528 struct tcp_sock *newtp)
529 {
530 #if IS_ENABLED(CONFIG_SMC)
531 struct inet_request_sock *ireq;
532
533 if (static_branch_unlikely(&tcp_have_smc)) {
534 ireq = inet_rsk(req);
535 if (oldtp->syn_smc && !ireq->smc_ok)
536 newtp->syn_smc = 0;
537 }
538 #endif
539 }
540
541 /* This is not only more efficient than what we used to do, it eliminates
542 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
543 *
544 * Actually, we could lots of memory writes here. tp of listening
545 * socket contains all necessary default parameters.
546 */
tcp_create_openreq_child(const struct sock * sk,struct request_sock * req,struct sk_buff * skb)547 struct sock *tcp_create_openreq_child(const struct sock *sk,
548 struct request_sock *req,
549 struct sk_buff *skb)
550 {
551 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
552 const struct inet_request_sock *ireq = inet_rsk(req);
553 struct tcp_request_sock *treq = tcp_rsk(req);
554 struct inet_connection_sock *newicsk;
555 const struct tcp_sock *oldtp;
556 struct tcp_sock *newtp;
557 u32 seq;
558
559 if (!newsk)
560 return NULL;
561
562 newicsk = inet_csk(newsk);
563 newtp = tcp_sk(newsk);
564 oldtp = tcp_sk(sk);
565
566 smc_check_reset_syn_req(oldtp, req, newtp);
567
568 /* Now setup tcp_sock */
569 newtp->pred_flags = 0;
570
571 seq = treq->rcv_isn + 1;
572 newtp->rcv_wup = seq;
573 WRITE_ONCE(newtp->copied_seq, seq);
574 WRITE_ONCE(newtp->rcv_nxt, seq);
575 newtp->segs_in = 1;
576
577 seq = treq->snt_isn + 1;
578 newtp->snd_sml = newtp->snd_una = seq;
579 WRITE_ONCE(newtp->snd_nxt, seq);
580 newtp->snd_up = seq;
581
582 INIT_LIST_HEAD(&newtp->tsq_node);
583 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
584
585 tcp_init_wl(newtp, treq->rcv_isn);
586
587 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
588 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
589
590 newtp->lsndtime = tcp_jiffies32;
591 newsk->sk_txhash = READ_ONCE(treq->txhash);
592 newtp->total_retrans = req->num_retrans;
593
594 tcp_init_xmit_timers(newsk);
595 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
596
597 if (sock_flag(newsk, SOCK_KEEPOPEN))
598 tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp));
599
600 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
601 newtp->rx_opt.sack_ok = ireq->sack_ok;
602 newtp->window_clamp = req->rsk_window_clamp;
603 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
604 newtp->rcv_wnd = req->rsk_rcv_wnd;
605 newtp->rcv_mwnd_seq = newtp->rcv_wup + req->rsk_rcv_wnd;
606 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
607 if (newtp->rx_opt.wscale_ok) {
608 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
609 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
610 } else {
611 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
612 newtp->window_clamp = min(newtp->window_clamp, 65535U);
613 }
614 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
615 newtp->max_window = newtp->snd_wnd;
616
617 if (newtp->rx_opt.tstamp_ok) {
618 newtp->tcp_usec_ts = treq->req_usec_ts;
619 newtp->rx_opt.ts_recent = req->ts_recent;
620 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
621 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
622 } else {
623 newtp->tcp_usec_ts = 0;
624 newtp->rx_opt.ts_recent_stamp = 0;
625 newtp->tcp_header_len = sizeof(struct tcphdr);
626 }
627 if (req->num_timeout) {
628 newtp->total_rto = req->num_timeout;
629 newtp->undo_marker = treq->snt_isn;
630 if (newtp->tcp_usec_ts) {
631 newtp->retrans_stamp = treq->snt_synack;
632 newtp->total_rto_time = (u32)(tcp_clock_us() -
633 newtp->retrans_stamp) / USEC_PER_MSEC;
634 } else {
635 newtp->retrans_stamp = div_u64(treq->snt_synack,
636 USEC_PER_SEC / TCP_TS_HZ);
637 newtp->total_rto_time = tcp_clock_ms() -
638 newtp->retrans_stamp;
639 }
640 newtp->total_rto_recoveries = 1;
641 }
642 newtp->tsoffset = treq->ts_off;
643 #ifdef CONFIG_TCP_MD5SIG
644 newtp->md5sig_info = NULL; /*XXX*/
645 #endif
646 #ifdef CONFIG_TCP_AO
647 newtp->ao_info = NULL;
648
649 if (tcp_rsk_used_ao(req)) {
650 struct tcp_ao_key *ao_key;
651
652 ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
653 if (ao_key)
654 newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
655 }
656 #endif
657 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
658 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
659 newtp->rx_opt.mss_clamp = req->mss;
660 tcp_ecn_openreq_child(newsk, req, skb);
661 newtp->fastopen_req = NULL;
662 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
663
664 newtp->bpf_chg_cc_inprogress = 0;
665 tcp_bpf_clone(sk, newsk);
666
667 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
668
669 xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1);
670
671 return newsk;
672 }
673 EXPORT_SYMBOL(tcp_create_openreq_child);
674
675 /*
676 * Process an incoming packet for SYN_RECV sockets represented as a
677 * request_sock. Normally sk is the listener socket but for TFO it
678 * points to the child socket.
679 *
680 * XXX (TFO) - The current impl contains a special check for ack
681 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
682 *
683 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
684 *
685 * Note: If @fastopen is true, this can be called from process context.
686 * Otherwise, this is from BH context.
687 */
688
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req,bool fastopen,bool * req_stolen,enum skb_drop_reason * drop_reason)689 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
690 struct request_sock *req,
691 bool fastopen, bool *req_stolen,
692 enum skb_drop_reason *drop_reason)
693 {
694 struct tcp_options_received tmp_opt;
695 struct sock *child;
696 const struct tcphdr *th = tcp_hdr(skb);
697 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
698 bool tsecr_reject = false;
699 bool paws_reject = false;
700 bool own_req;
701
702 tmp_opt.saw_tstamp = 0;
703 tmp_opt.accecn = 0;
704 if (th->doff > (sizeof(struct tcphdr)>>2)) {
705 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
706
707 if (tmp_opt.saw_tstamp) {
708 tmp_opt.ts_recent = req->ts_recent;
709 if (tmp_opt.rcv_tsecr) {
710 if (inet_rsk(req)->tstamp_ok && !fastopen)
711 tsecr_reject = !between(tmp_opt.rcv_tsecr,
712 tcp_rsk(req)->snt_tsval_first,
713 READ_ONCE(tcp_rsk(req)->snt_tsval_last));
714 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
715 }
716 /* We do not store true stamp, but it is not required,
717 * it can be estimated (approximately)
718 * from another data.
719 */
720 tmp_opt.ts_recent_stamp = ktime_get_seconds() -
721 tcp_reqsk_timeout(req) / HZ;
722 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
723 }
724 }
725
726 /* Check for pure retransmitted SYN. */
727 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
728 flg == TCP_FLAG_SYN &&
729 !paws_reject) {
730 /*
731 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
732 * this case on figure 6 and figure 8, but formal
733 * protocol description says NOTHING.
734 * To be more exact, it says that we should send ACK,
735 * because this segment (at least, if it has no data)
736 * is out of window.
737 *
738 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
739 * describe SYN-RECV state. All the description
740 * is wrong, we cannot believe to it and should
741 * rely only on common sense and implementation
742 * experience.
743 *
744 * Enforce "SYN-ACK" according to figure 8, figure 6
745 * of RFC793, fixed by RFC1122.
746 *
747 * Note that even if there is new data in the SYN packet
748 * they will be thrown away too.
749 *
750 * Reset timer after retransmitting SYNACK, similar to
751 * the idea of fast retransmit in recovery.
752 */
753 if (!tcp_oow_rate_limited(sock_net(sk), skb,
754 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
755 &tcp_rsk(req)->last_oow_ack_time)) {
756 if (tcp_rsk(req)->accecn_ok) {
757 u8 ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
758 INET_ECN_MASK;
759
760 tcp_rsk(req)->syn_ect_rcv = ect_rcv;
761 if (tcp_accecn_ace(tcp_hdr(skb)) == 0x0)
762 tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_RECV;
763 }
764 if (!tcp_rtx_synack(sk, req)) {
765 unsigned long expires = jiffies;
766
767 if (req->num_retrans > 1 && tcp_rsk(req)->accecn_ok)
768 tcp_rsk(req)->accecn_fail_mode |= TCP_ACCECN_ACE_FAIL_SEND;
769
770 expires += tcp_reqsk_timeout(req);
771 if (!fastopen)
772 mod_timer_pending(&req->rsk_timer,
773 expires);
774 else
775 req->rsk_timer.expires = expires;
776 }
777 }
778 return NULL;
779 }
780
781 /* Further reproduces section "SEGMENT ARRIVES"
782 for state SYN-RECEIVED of RFC793.
783 It is broken, however, it does not work only
784 when SYNs are crossed.
785
786 You would think that SYN crossing is impossible here, since
787 we should have a SYN_SENT socket (from connect()) on our end,
788 but this is not true if the crossed SYNs were sent to both
789 ends by a malicious third party. We must defend against this,
790 and to do that we first verify the ACK (as per RFC793, page
791 36) and reset if it is invalid. Is this a true full defense?
792 To convince ourselves, let us consider a way in which the ACK
793 test can still pass in this 'malicious crossed SYNs' case.
794 Malicious sender sends identical SYNs (and thus identical sequence
795 numbers) to both A and B:
796
797 A: gets SYN, seq=7
798 B: gets SYN, seq=7
799
800 By our good fortune, both A and B select the same initial
801 send sequence number of seven :-)
802
803 A: sends SYN|ACK, seq=7, ack_seq=8
804 B: sends SYN|ACK, seq=7, ack_seq=8
805
806 So we are now A eating this SYN|ACK, ACK test passes. So
807 does sequence test, SYN is truncated, and thus we consider
808 it a bare ACK.
809
810 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
811 bare ACK. Otherwise, we create an established connection. Both
812 ends (listening sockets) accept the new incoming connection and try
813 to talk to each other. 8-)
814
815 Note: This case is both harmless, and rare. Possibility is about the
816 same as us discovering intelligent life on another plant tomorrow.
817
818 But generally, we should (RFC lies!) to accept ACK
819 from SYNACK both here and in tcp_rcv_state_process().
820 tcp_rcv_state_process() does not, hence, we do not too.
821
822 Note that the case is absolutely generic:
823 we cannot optimize anything here without
824 violating protocol. All the checks must be made
825 before attempt to create socket.
826 */
827
828 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
829 * and the incoming segment acknowledges something not yet
830 * sent (the segment carries an unacceptable ACK) ...
831 * a reset is sent."
832 *
833 * Invalid ACK: reset will be sent by listening socket.
834 * Note that the ACK validity check for a Fast Open socket is done
835 * elsewhere and is checked directly against the child socket rather
836 * than req because user data may have been sent out.
837 */
838 if ((flg & TCP_FLAG_ACK) && !fastopen &&
839 (TCP_SKB_CB(skb)->ack_seq !=
840 tcp_rsk(req)->snt_isn + 1))
841 return sk;
842
843 /* RFC793: "first check sequence number". */
844
845 if (paws_reject || tsecr_reject ||
846 !tcp_in_window(TCP_SKB_CB(skb)->seq,
847 TCP_SKB_CB(skb)->end_seq,
848 tcp_rsk(req)->rcv_nxt,
849 tcp_rsk(req)->rcv_nxt +
850 tcp_synack_window(req))) {
851 /* Out of window: send ACK and drop. */
852 if (!(flg & TCP_FLAG_RST) &&
853 !tcp_oow_rate_limited(sock_net(sk), skb,
854 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
855 &tcp_rsk(req)->last_oow_ack_time))
856 req->rsk_ops->send_ack(sk, skb, req);
857 if (paws_reject) {
858 SKB_DR_SET(*drop_reason, TCP_RFC7323_PAWS);
859 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
860 } else if (tsecr_reject) {
861 SKB_DR_SET(*drop_reason, TCP_RFC7323_TSECR);
862 NET_INC_STATS(sock_net(sk), LINUX_MIB_TSECRREJECTED);
863 } else {
864 SKB_DR_SET(*drop_reason, TCP_OVERWINDOW);
865 }
866 return NULL;
867 }
868
869 /* In sequence, PAWS is OK. */
870
871 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
872 /* Truncate SYN, it is out of window starting
873 at tcp_rsk(req)->rcv_isn + 1. */
874 flg &= ~TCP_FLAG_SYN;
875 }
876
877 /* RFC793: "second check the RST bit" and
878 * "fourth, check the SYN bit"
879 */
880 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
881 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
882 goto embryonic_reset;
883 }
884
885 /* ACK sequence verified above, just make sure ACK is
886 * set. If ACK not set, just silently drop the packet.
887 *
888 * XXX (TFO) - if we ever allow "data after SYN", the
889 * following check needs to be removed.
890 */
891 if (!(flg & TCP_FLAG_ACK))
892 return NULL;
893
894 if (tcp_rsk(req)->accecn_ok && tmp_opt.accecn &&
895 tcp_rsk(req)->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
896 u8 saw_opt = tcp_accecn_option_init(skb, tmp_opt.accecn);
897
898 tcp_rsk(req)->saw_accecn_opt = saw_opt;
899 if (tcp_rsk(req)->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN) {
900 u8 fail_mode = TCP_ACCECN_OPT_FAIL_RECV;
901
902 tcp_rsk(req)->accecn_fail_mode |= fail_mode;
903 }
904 }
905
906 /* For Fast Open no more processing is needed (sk is the
907 * child socket).
908 */
909 if (fastopen)
910 return sk;
911
912 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
913 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
914 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
915 inet_rsk(req)->acked = 1;
916 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
917 return NULL;
918 }
919
920 /* OK, ACK is valid, create big socket and
921 * feed this segment to it. It will repeat all
922 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
923 * ESTABLISHED STATE. If it will be dropped after
924 * socket is created, wait for troubles.
925 */
926 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
927 req, &own_req, NULL);
928 if (!child)
929 goto listen_overflow;
930
931 if (own_req && tmp_opt.saw_tstamp &&
932 !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
933 tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval;
934
935 if (own_req && rsk_drop_req(req)) {
936 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
937 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
938 return child;
939 }
940
941 sock_rps_save_rxhash(child, skb);
942 tcp_synack_rtt_meas(child, req);
943 *req_stolen = !own_req;
944 return inet_csk_complete_hashdance(sk, child, req, own_req);
945
946 listen_overflow:
947 SKB_DR_SET(*drop_reason, TCP_LISTEN_OVERFLOW);
948 if (sk != req->rsk_listener)
949 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
950
951 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
952 inet_rsk(req)->acked = 1;
953 return NULL;
954 }
955
956 embryonic_reset:
957 if (!(flg & TCP_FLAG_RST)) {
958 /* Received a bad SYN pkt - for TFO We try not to reset
959 * the local connection unless it's really necessary to
960 * avoid becoming vulnerable to outside attack aiming at
961 * resetting legit local connections.
962 */
963 req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
964 } else if (fastopen) { /* received a valid RST pkt */
965 reqsk_fastopen_remove(sk, req, true);
966 tcp_reset(sk, skb);
967 }
968 if (!fastopen) {
969 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
970
971 if (unlinked)
972 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
973 *req_stolen = !unlinked;
974 }
975 return NULL;
976 }
977
978 /*
979 * Queue segment on the new socket if the new socket is active,
980 * otherwise we just shortcircuit this and continue with
981 * the new socket.
982 *
983 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
984 * when entering. But other states are possible due to a race condition
985 * where after __inet_lookup_established() fails but before the listener
986 * locked is obtained, other packets cause the same connection to
987 * be created.
988 */
989
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)990 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
991 struct sk_buff *skb)
992 __releases(&((child)->sk_lock.slock))
993 {
994 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
995 int state = child->sk_state;
996
997 /* record sk_napi_id and sk_rx_queue_mapping of child. */
998 sk_mark_napi_id_set(child, skb);
999
1000 tcp_segs_in(tcp_sk(child), skb);
1001 if (!sock_owned_by_user(child)) {
1002 reason = tcp_rcv_state_process(child, skb);
1003 /* Wakeup parent, send SIGIO */
1004 if (state == TCP_SYN_RECV && child->sk_state != state)
1005 READ_ONCE(parent->sk_data_ready)(parent);
1006 } else {
1007 /* Alas, it is possible again, because we do lookup
1008 * in main socket hash table and lock on listening
1009 * socket does not protect us more.
1010 */
1011 __sk_add_backlog(child, skb);
1012 }
1013
1014 bh_unlock_sock(child);
1015 sock_put(child);
1016 return reason;
1017 }
1018