Lines Matching +full:non +full:- +full:urgent
1 // SPDX-License-Identifier: GPL-2.0-only
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
33 * Cacophonix Gaul : draft-minshall-nagle-01
57 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
58 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
69 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
73 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
74 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
76 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
77 tp->highest_sack = skb; in tcp_event_new_data_sent()
79 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
80 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent()
91 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
99 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || in tcp_acceptable_seq()
100 (tp->rx_opt.wscale_ok && in tcp_acceptable_seq()
101 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq()
102 return tp->snd_nxt; in tcp_acceptable_seq()
108 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
111 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
125 int mss = tp->advmss; in tcp_advertise_mss()
132 tp->advmss = mss; in tcp_advertise_mss()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
156 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_restart()
157 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
170 tp->lsndtime = now; in tcp_event_data_sent()
175 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
184 if (unlikely(tp->compressed_ack)) { in tcp_event_ack_sent()
186 tp->compressed_ack); in tcp_event_ack_sent()
187 tp->compressed_ack = 0; in tcp_event_ack_sent()
188 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_event_ack_sent()
192 if (unlikely(rcv_nxt != tp->rcv_nxt)) in tcp_event_ack_sent()
224 * we will truncate our initial window offering to 32K-1 in tcp_select_initial_window()
229 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_initial_window()
240 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_select_initial_window()
243 *rcv_wscale = clamp_t(int, ilog2(space) - 15, in tcp_select_initial_window()
253 * value can be stuffed directly into th->window for an outgoing
260 u32 old_win = tp->rcv_wnd; in tcp_select_window()
267 if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) in tcp_select_window()
276 * window in time. --DaveM in tcp_select_window()
280 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { in tcp_select_window()
284 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
288 tp->rcv_wnd = new_win; in tcp_select_window()
289 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
294 if (!tp->rx_opt.rcv_wscale && in tcp_select_window()
295 READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_window()
298 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
301 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
305 tp->pred_flags = 0; in tcp_select_window()
315 /* Packet ECN state for a SYN-ACK */
320 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
321 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
322 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
333 bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 || in tcp_ecn_send_syn()
343 tp->ecn_flags = 0; in tcp_ecn_send_syn()
346 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
347 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
355 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) in tcp_ecn_clear_syn()
356 /* tp->ecn_flags are cleared at a later point in time when in tcp_ecn_clear_syn()
359 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn()
365 if (inet_rsk(req)->ecn_ok) in tcp_ecn_make_synack()
366 th->ece = 1; in tcp_ecn_make_synack()
377 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
378 /* Not-retransmitted data segment: set ECT and inject CWR. */ in tcp_ecn_send()
379 if (skb->len != tcp_header_len && in tcp_ecn_send()
380 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
382 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
383 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
384 th->cwr = 1; in tcp_ecn_send()
385 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
391 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
392 th->ece = 1; in tcp_ecn_send()
396 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
401 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
403 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
407 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
410 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
415 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
460 if (unlikely(OPTION_MPTCP & opts->options)) in mptcp_options_write()
461 mptcp_write_options(th, ptr, tp, &opts->mptcp); in mptcp_options_write()
507 * Thus, "req" is passed here and the cgroup-bpf-progs in bpf_skops_hdr_opt_len()
512 * consistent between fastopen and non-fastopen on in bpf_skops_hdr_opt_len()
535 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; in bpf_skops_hdr_opt_len()
537 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; in bpf_skops_hdr_opt_len()
539 *remaining -= opts->bpf_opt_len; in bpf_skops_hdr_opt_len()
548 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; in bpf_skops_write_hdr_opt()
571 first_opt_off = tcp_hdrlen(skb) - max_opt_len; in bpf_skops_write_hdr_opt()
579 nr_written = max_opt_len - sock_ops.remaining_opt_len; in bpf_skops_write_hdr_opt()
582 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, in bpf_skops_write_hdr_opt()
583 max_opt_len - nr_written); in bpf_skops_write_hdr_opt()
610 u8 maclen = tcp_ao_maclen(key->ao_key); in process_tcp_ao_options()
616 (tcprsk->ao_keyid << 8) | in process_tcp_ao_options()
617 (tcprsk->ao_rcv_next)); in process_tcp_ao_options()
622 ao_info = rcu_dereference_check(tp->ao_info, in process_tcp_ao_options()
623 lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); in process_tcp_ao_options()
624 rnext_key = READ_ONCE(ao_info->rnext_key); in process_tcp_ao_options()
628 (tcp_ao_len(key->ao_key) << 16) | in process_tcp_ao_options()
629 (key->ao_key->sndid << 8) | in process_tcp_ao_options()
630 (rnext_key->rcvid)); in process_tcp_ao_options()
632 opts->hash_location = (__u8 *)ptr; in process_tcp_ao_options()
646 * Luckily we can at least blame others for their non-compliance but from
647 * inter-operability perspective it seems that we're somewhat stuck with
661 u16 options = opts->options; /* mungable copy */ in tcp_options_write()
667 opts->hash_location = (__u8 *)ptr; in tcp_options_write()
672 if (unlikely(opts->mss)) { in tcp_options_write()
675 opts->mss); in tcp_options_write()
691 *ptr++ = htonl(opts->tsval); in tcp_options_write()
692 *ptr++ = htonl(opts->tsecr); in tcp_options_write()
706 opts->ws); in tcp_options_write()
709 if (unlikely(opts->num_sack_blocks)) { in tcp_options_write()
710 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
711 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
717 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * in tcp_options_write()
720 for (this_sack = 0; this_sack < opts->num_sack_blocks; in tcp_options_write()
726 tp->rx_opt.dsack = 0; in tcp_options_write()
730 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; in tcp_options_write()
734 if (foc->exp) { in tcp_options_write()
735 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; in tcp_options_write()
740 len = TCPOLEN_FASTOPEN_BASE + foc->len; in tcp_options_write()
745 memcpy(p, foc->val, foc->len); in tcp_options_write()
747 p[foc->len] = TCPOPT_NOP; in tcp_options_write()
748 p[foc->len + 1] = TCPOPT_NOP; in tcp_options_write()
764 if (tp->syn_smc) { in smc_set_option()
766 opts->options |= OPTION_SMC; in smc_set_option()
767 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option()
781 if (tp->syn_smc && ireq->smc_ok) { in smc_set_option_cond()
783 opts->options |= OPTION_SMC; in smc_set_option_cond()
784 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option_cond()
798 if (mptcp_synack_options(req, &size, &opts->mptcp)) { in mptcp_set_option_cond()
800 opts->options |= OPTION_MPTCP; in mptcp_set_option_cond()
801 *remaining -= size; in mptcp_set_option_cond()
816 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
822 opts->options |= OPTION_MD5; in tcp_syn_options()
823 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_syn_options()
825 timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps); in tcp_syn_options()
827 opts->options |= OPTION_AO; in tcp_syn_options()
828 remaining -= tcp_ao_len_aligned(key->ao_key); in tcp_syn_options()
834 * advertised. But we subtract them from tp->mss_cache so that in tcp_syn_options()
841 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
842 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_syn_options()
845 opts->options |= OPTION_TS; in tcp_syn_options()
846 opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset; in tcp_syn_options()
847 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
848 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_syn_options()
850 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { in tcp_syn_options()
851 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
852 opts->options |= OPTION_WSCALE; in tcp_syn_options()
853 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_syn_options()
855 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { in tcp_syn_options()
856 opts->options |= OPTION_SACK_ADVERTISE; in tcp_syn_options()
857 if (unlikely(!(OPTION_TS & opts->options))) in tcp_syn_options()
858 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_syn_options()
861 if (fastopen && fastopen->cookie.len >= 0) { in tcp_syn_options()
862 u32 need = fastopen->cookie.len; in tcp_syn_options()
864 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_syn_options()
868 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_syn_options()
869 opts->fastopen_cookie = &fastopen->cookie; in tcp_syn_options()
870 remaining -= need; in tcp_syn_options()
871 tp->syn_fastopen = 1; in tcp_syn_options()
872 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
881 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { in tcp_syn_options()
882 opts->options |= OPTION_MPTCP; in tcp_syn_options()
883 remaining -= size; in tcp_syn_options()
889 return MAX_TCP_OPTION_SPACE - remaining; in tcp_syn_options()
892 /* Set up TCP options for SYN-ACKs. */
906 opts->options |= OPTION_MD5; in tcp_synack_options()
907 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_synack_options()
915 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
917 opts->options |= OPTION_AO; in tcp_synack_options()
918 remaining -= tcp_ao_len_aligned(key->ao_key); in tcp_synack_options()
919 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
923 opts->mss = mss; in tcp_synack_options()
924 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_synack_options()
926 if (likely(ireq->wscale_ok)) { in tcp_synack_options()
927 opts->ws = ireq->rcv_wscale; in tcp_synack_options()
928 opts->options |= OPTION_WSCALE; in tcp_synack_options()
929 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_synack_options()
931 if (likely(ireq->tstamp_ok)) { in tcp_synack_options()
932 opts->options |= OPTION_TS; in tcp_synack_options()
933 opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) + in tcp_synack_options()
934 tcp_rsk(req)->ts_off; in tcp_synack_options()
935 opts->tsecr = READ_ONCE(req->ts_recent); in tcp_synack_options()
936 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_synack_options()
938 if (likely(ireq->sack_ok)) { in tcp_synack_options()
939 opts->options |= OPTION_SACK_ADVERTISE; in tcp_synack_options()
940 if (unlikely(!ireq->tstamp_ok)) in tcp_synack_options()
941 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_synack_options()
943 if (foc != NULL && foc->len >= 0) { in tcp_synack_options()
944 u32 need = foc->len; in tcp_synack_options()
946 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_synack_options()
950 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_synack_options()
951 opts->fastopen_cookie = foc; in tcp_synack_options()
952 remaining -= need; in tcp_synack_options()
963 return MAX_TCP_OPTION_SPACE - remaining; in tcp_synack_options()
977 opts->options = 0; in tcp_established_options()
981 opts->options |= OPTION_MD5; in tcp_established_options()
984 opts->options |= OPTION_AO; in tcp_established_options()
985 size += tcp_ao_len_aligned(key->ao_key); in tcp_established_options()
988 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
989 opts->options |= OPTION_TS; in tcp_established_options()
990 opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + in tcp_established_options()
991 tp->tsoffset : 0; in tcp_established_options()
992 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
1003 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1007 &opts->mptcp)) { in tcp_established_options()
1008 opts->options |= OPTION_MPTCP; in tcp_established_options()
1013 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
1015 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1020 opts->num_sack_blocks = in tcp_established_options()
1022 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / in tcp_established_options()
1026 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; in tcp_established_options()
1031 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
1035 size = MAX_TCP_OPTION_SPACE - remaining; in tcp_established_options()
1050 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
1064 if ((1 << sk->sk_state) & in tcp_tsq_write()
1069 if (tp->lost_out > tp->retrans_out && in tcp_tsq_write()
1075 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
1085 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
1092 * transferring tsq->head because tcp_wfree() might
1093 * interrupt us (non NAPI drivers)
1105 list_splice_init(&tsq->head, &list); in tcp_tasklet_func()
1110 list_del(&tp->tsq_node); in tcp_tasklet_func()
1114 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tasklet_func()
1127 * tcp_release_cb - tcp release_sock() callback
1135 unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); in tcp_release_cb()
1143 } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); in tcp_release_cb()
1159 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1174 INIT_LIST_HEAD(&tsq->head); in tcp_tasklet_init()
1175 tasklet_setup(&tsq->tasklet, tcp_tasklet_func); in tcp_tasklet_init()
1186 struct sock *sk = skb->sk; in tcp_wfree()
1195 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
1200 * - less callbacks to tcp_write_xmit(), reducing stress (batches) in tcp_wfree()
1201 * - chance for incoming ACK (processed by another cpu maybe) in tcp_wfree()
1202 * to migrate this flow (skb->ooo_okay will be eventually set) in tcp_wfree()
1204 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
1207 oval = smp_load_acquire(&sk->sk_tsq_flags); in tcp_wfree()
1213 } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); in tcp_wfree()
1218 empty = list_empty(&tsq->head); in tcp_wfree()
1219 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
1221 tasklet_schedule(&tsq->tasklet); in tcp_wfree()
1247 if (sk->sk_pacing_status != SK_PACING_NONE) { in tcp_update_skb_after_send()
1248 unsigned long rate = READ_ONCE(sk->sk_pacing_rate); in tcp_update_skb_after_send()
1251 * Note that tp->data_segs_out overflows after 2^32 packets, in tcp_update_skb_after_send()
1254 if (rate != ~0UL && rate && tp->data_segs_out >= 10) { in tcp_update_skb_after_send()
1255 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); in tcp_update_skb_after_send()
1256 u64 credit = tp->tcp_wstamp_ns - prior_wstamp; in tcp_update_skb_after_send()
1259 len_ns -= min_t(u64, len_ns / 2, credit); in tcp_update_skb_after_send()
1260 tp->tcp_wstamp_ns += len_ns; in tcp_update_skb_after_send()
1263 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_update_skb_after_send()
1298 prior_wstamp = tp->tcp_wstamp_ns; in __tcp_transmit_skb()
1299 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); in __tcp_transmit_skb()
1300 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); in __tcp_transmit_skb()
1312 return -ENOBUFS; in __tcp_transmit_skb()
1313 /* retransmit skbs might have a non zero value in skb->dev in __tcp_transmit_skb()
1314 * because skb->dev is aliased with skb->rbnode.rb_left in __tcp_transmit_skb()
1316 skb->dev = NULL; in __tcp_transmit_skb()
1324 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { in __tcp_transmit_skb()
1330 * Note that we do not force the PSH flag for non GSO packets, in __tcp_transmit_skb()
1332 * and in this case it is better to delay the delivery of 1-MSS in __tcp_transmit_skb()
1337 tcb->tcp_flags |= TCPHDR_PSH; in __tcp_transmit_skb()
1341 /* We set skb->ooo_okay to one if this packet can select in __tcp_transmit_skb()
1345 * if XPS is enabled, or sk->sk_txhash otherwise. in __tcp_transmit_skb()
1354 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || in __tcp_transmit_skb()
1362 skb->pfmemalloc = 0; in __tcp_transmit_skb()
1368 skb->sk = sk; in __tcp_transmit_skb()
1369 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; in __tcp_transmit_skb()
1370 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1372 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); in __tcp_transmit_skb()
1375 th = (struct tcphdr *)skb->data; in __tcp_transmit_skb()
1376 th->source = inet->inet_sport; in __tcp_transmit_skb()
1377 th->dest = inet->inet_dport; in __tcp_transmit_skb()
1378 th->seq = htonl(tcb->seq); in __tcp_transmit_skb()
1379 th->ack_seq = htonl(rcv_nxt); in __tcp_transmit_skb()
1381 tcb->tcp_flags); in __tcp_transmit_skb()
1383 th->check = 0; in __tcp_transmit_skb()
1384 th->urg_ptr = 0; in __tcp_transmit_skb()
1387 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in __tcp_transmit_skb()
1388 if (before(tp->snd_up, tcb->seq + 0x10000)) { in __tcp_transmit_skb()
1389 th->urg_ptr = htons(tp->snd_up - tcb->seq); in __tcp_transmit_skb()
1390 th->urg = 1; in __tcp_transmit_skb()
1391 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in __tcp_transmit_skb()
1392 th->urg_ptr = htons(0xFFFF); in __tcp_transmit_skb()
1393 th->urg = 1; in __tcp_transmit_skb()
1397 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1398 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { in __tcp_transmit_skb()
1399 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1405 th->window = htons(min(tp->rcv_wnd, 65535U)); in __tcp_transmit_skb()
1414 tp->af_specific->calc_md5_hash(opts.hash_location, in __tcp_transmit_skb()
1424 return -ENOMEM; in __tcp_transmit_skb()
1431 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, in __tcp_transmit_skb()
1435 if (likely(tcb->tcp_flags & TCPHDR_ACK)) in __tcp_transmit_skb()
1438 if (skb->len != tcp_header_size) { in __tcp_transmit_skb()
1440 tp->data_segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1441 tp->bytes_sent += skb->len - tcp_header_size; in __tcp_transmit_skb()
1444 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1448 tp->segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1450 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ in __tcp_transmit_skb()
1451 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in __tcp_transmit_skb()
1452 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); in __tcp_transmit_skb()
1454 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ in __tcp_transmit_skb()
1457 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in __tcp_transmit_skb()
1462 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, in __tcp_transmit_skb()
1464 sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1481 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1494 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1497 sk_wmem_queued_add(sk, skb->truesize); in tcp_queue_skb()
1498 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1504 if (skb->len <= mss_now) { in tcp_set_skb_tso_segs()
1506 * non-TSO case. in tcp_set_skb_tso_segs()
1509 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_set_skb_tso_segs()
1511 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); in tcp_set_skb_tso_segs()
1512 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1523 tp->packets_out -= decr; in tcp_adjust_pcount()
1525 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1526 tp->sacked_out -= decr; in tcp_adjust_pcount()
1527 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1528 tp->retrans_out -= decr; in tcp_adjust_pcount()
1529 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1530 tp->lost_out -= decr; in tcp_adjust_pcount()
1534 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1536 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1537 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1538 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_adjust_pcount()
1539 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1546 return TCP_SKB_CB(skb)->txstamp_ack || in tcp_has_tx_tstamp()
1547 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); in tcp_has_tx_tstamp()
1555 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1557 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1559 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1560 shinfo2->tx_flags |= tsflags; in tcp_fragment_tstamp()
1561 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1562 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; in tcp_fragment_tstamp()
1563 TCP_SKB_CB(skb)->txstamp_ack = 0; in tcp_fragment_tstamp()
1569 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; in tcp_skb_fragment_eor()
1570 TCP_SKB_CB(skb)->eor = 0; in tcp_skb_fragment_eor()
1580 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1582 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1601 if (WARN_ON(len > skb->len)) in tcp_fragment()
1602 return -EINVAL; in tcp_fragment()
1611 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); in tcp_fragment()
1612 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1617 return -ENOMEM; in tcp_fragment()
1621 return -ENOMEM; in tcp_fragment()
1626 return -ENOMEM; /* We'll just try again later. */ in tcp_fragment()
1630 sk_wmem_queued_add(sk, buff->truesize); in tcp_fragment()
1631 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1632 nlen = skb->len - len; in tcp_fragment()
1633 buff->truesize += nlen; in tcp_fragment()
1634 skb->truesize -= nlen; in tcp_fragment()
1637 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1638 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1639 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1642 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1643 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1644 TCP_SKB_CB(buff)->tcp_flags = flags; in tcp_fragment()
1645 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1650 skb_set_delivery_time(buff, skb->tstamp, true); in tcp_fragment()
1660 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; in tcp_fragment()
1665 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1666 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1677 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); in tcp_fragment()
1694 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1695 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1699 eat -= size; in __pskb_trim_head()
1701 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
1703 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head()
1704 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
1710 shinfo->nr_frags = k; in __pskb_trim_head()
1712 skb->data_len -= len; in __pskb_trim_head()
1713 skb->len = skb->data_len; in __pskb_trim_head()
1723 return -ENOMEM; in tcp_trim_head()
1727 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1729 skb->truesize -= delta_truesize; in tcp_trim_head()
1730 sk_wmem_queued_add(sk, -delta_truesize); in tcp_trim_head()
1734 /* Any change of skb->len requires recalculation of tso factor. */ in tcp_trim_head()
1749 It is MMS_S - sizeof(tcphdr) of rfc1122 in __tcp_mtu_to_mss()
1751 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1754 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1755 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1758 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1762 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); in __tcp_mtu_to_mss()
1770 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1771 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1782 tp->tcp_header_len + in tcp_mss_to_mtu()
1783 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1784 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1795 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; in tcp_mtup_init()
1796 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1797 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1798 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); in tcp_mtup_init()
1799 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1800 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1801 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtup_init()
1807 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1810 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1814 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1816 tp->mss_cache is current effective sending mss, including
1819 tp->rx_opt.mss_clamp.
1824 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1825 are READ ONLY outside this function. --ANK (980731)
1833 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1834 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
1840 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
1841 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
1842 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1843 tp->mss_cache = mss_now; in tcp_sync_mss()
1861 mss_now = tp->mss_cache; in tcp_current_mss()
1865 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1871 /* The mss_cache is sized based on tp->tcp_header_len, which assumes in tcp_current_mss()
1875 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1876 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1877 mss_now -= delta; in tcp_current_mss()
1891 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1892 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1895 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1897 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1900 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1902 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_application_limited()
1907 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
1911 * is fully utilized. If cwnd-limited then remember that fact for the in tcp_cwnd_validate()
1912 * current window. If not cwnd-limited then track the maximum number of in tcp_cwnd_validate()
1913 * outstanding packets in the current window. (If cwnd-limited then we in tcp_cwnd_validate()
1914 * chose to not update tp->max_packets_out to avoid an extra else in tcp_cwnd_validate()
1917 if (!before(tp->snd_una, tp->cwnd_usage_seq) || in tcp_cwnd_validate()
1919 (!tp->is_cwnd_limited && in tcp_cwnd_validate()
1920 tp->packets_out > tp->max_packets_out)) { in tcp_cwnd_validate()
1921 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1922 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1923 tp->cwnd_usage_seq = tp->snd_nxt; in tcp_cwnd_validate()
1928 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1929 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_validate()
1932 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1933 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1935 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && in tcp_cwnd_validate()
1936 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1937 !ca_ops->cong_control) in tcp_cwnd_validate()
1947 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
1948 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
1949 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
1957 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1958 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1962 * Note that a TSO packet might end with a sub-mss segment
1964 * if ((skb->len % mss) != 0)
1965 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1967 * skb_pcount = skb->len / mss_now
1972 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1973 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1988 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
1995 * - For close peers, we rather send bigger packets to reduce
1997 * - For long distance/rtt flows, we would like to get ACK clocking
2001 * in bigger TSO bursts. We we cut the RTT-based allowance in half
2002 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
2011 bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift); in tcp_tso_autosize()
2013 r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); in tcp_tso_autosize()
2014 if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) in tcp_tso_autosize()
2015 bytes += sk->sk_gso_max_size >> r; in tcp_tso_autosize()
2017 bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); in tcp_tso_autosize()
2027 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
2030 min_tso = ca_ops->min_tso_segs ? in tcp_tso_segs()
2031 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
2032 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); in tcp_tso_segs()
2035 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
2048 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
2054 needed = min(skb->len, window); in tcp_mss_split_point()
2065 return needed - partial; in tcp_mss_split_point()
2079 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in tcp_cwnd_test()
2092 return min(halfcwnd, cwnd - in_flight); in tcp_cwnd_test()
2126 /* Don't use the nagle rule for urgent data (or for the final FIN). */ in tcp_nagle_test()
2127 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
2130 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
2141 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
2143 if (skb->len > cur_mss) in tcp_snd_wnd_test()
2144 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
2153 * know that all the data is in scatter-gather pages, and that the
2159 int nlen = skb->len - len; in tso_fragment()
2164 DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); in tso_fragment()
2168 return -ENOMEM; in tso_fragment()
2172 sk_wmem_queued_add(sk, buff->truesize); in tso_fragment()
2173 sk_mem_charge(sk, buff->truesize); in tso_fragment()
2174 buff->truesize += nlen; in tso_fragment()
2175 skb->truesize -= nlen; in tso_fragment()
2178 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
2179 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
2180 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
2183 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
2184 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
2185 TCP_SKB_CB(buff)->tcp_flags = flags; in tso_fragment()
2220 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
2225 * Note that tp->tcp_wstamp_ns can be in the future if we have in tcp_tso_should_defer()
2228 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; in tcp_tso_should_defer()
2237 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
2240 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
2244 /* If a full-sized TSO skb can be sent, do it. */ in tcp_tso_should_defer()
2245 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
2249 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
2252 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
2254 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); in tcp_tso_should_defer()
2268 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
2276 delta = tp->tcp_clock_cache - head->tstamp; in tcp_tso_should_defer()
2278 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) in tcp_tso_should_defer()
2283 * 1) We are cwnd-limited in tcp_tso_should_defer()
2284 * 2) We are rwnd-limited in tcp_tso_should_defer()
2288 if (cong_win <= skb->len) { in tcp_tso_should_defer()
2293 if (send_win <= skb->len) { in tcp_tso_should_defer()
2300 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || in tcp_tso_should_defer()
2301 TCP_SKB_CB(skb)->eor) in tcp_tso_should_defer()
2318 interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); in tcp_mtu_check_reprobe()
2319 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
2324 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
2325 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2327 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
2328 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2331 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_check_reprobe()
2341 if (len <= skb->len) in tcp_can_coalesce_send_queue_head()
2344 if (unlikely(TCP_SKB_CB(skb)->eor) || in tcp_can_coalesce_send_queue_head()
2349 len -= skb->len; in tcp_can_coalesce_send_queue_head()
2358 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; in tcp_clone_payload()
2362 if (!sk_wmem_schedule(sk, to->truesize + probe_size)) in tcp_clone_payload()
2363 return -ENOMEM; in tcp_clone_payload()
2365 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_clone_payload()
2366 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; in tcp_clone_payload()
2369 return -EINVAL; in tcp_clone_payload()
2371 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { in tcp_clone_payload()
2375 probe_size - len); in tcp_clone_payload()
2385 return -E2BIG; in tcp_clone_payload()
2398 skb_shinfo(to)->nr_frags = nr_frags; in tcp_clone_payload()
2399 to->truesize += probe_size; in tcp_clone_payload()
2400 to->len += probe_size; in tcp_clone_payload()
2401 to->data_len += probe_size; in tcp_clone_payload()
2413 * -1 otherwise
2432 if (likely(!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
2433 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
2434 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2436 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) in tcp_mtu_probe()
2437 return -1; in tcp_mtu_probe()
2440 * and current mss_clamp. if (search_high - search_low) in tcp_mtu_probe()
2444 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2445 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
2446 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
2447 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
2452 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2453 interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { in tcp_mtu_probe()
2458 return -1; in tcp_mtu_probe()
2462 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
2463 return -1; in tcp_mtu_probe()
2465 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
2466 return -1; in tcp_mtu_probe()
2467 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
2473 return -1; in tcp_mtu_probe()
2479 return -1; in tcp_mtu_probe()
2484 return -1; in tcp_mtu_probe()
2490 return -1; in tcp_mtu_probe()
2492 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2493 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2499 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
2500 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
2501 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; in tcp_mtu_probe()
2508 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
2510 if (skb->len <= copy) { in tcp_mtu_probe()
2513 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_mtu_probe()
2517 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; in tcp_mtu_probe()
2522 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
2526 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2534 tcp_init_tso_segs(nskb, nskb->len); in tcp_mtu_probe()
2537 * be resegmented into mss-sized pieces by tcp_write_xmit(). in tcp_mtu_probe()
2542 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); in tcp_mtu_probe()
2545 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2546 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2547 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2552 return -1; in tcp_mtu_probe()
2562 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) in tcp_pacing_check()
2565 if (!hrtimer_is_queued(&tp->pacing_timer)) { in tcp_pacing_check()
2566 hrtimer_start(&tp->pacing_timer, in tcp_pacing_check()
2567 ns_to_ktime(tp->tcp_wstamp_ns), in tcp_pacing_check()
2576 const struct rb_node *node = sk->tcp_rtx_queue.rb_node; in tcp_rtx_queue_empty_or_single_skb()
2583 return !node->rb_left && !node->rb_right; in tcp_rtx_queue_empty_or_single_skb()
2590 * - better RTT estimation and ACK scheduling
2591 * - faster recovery
2592 * - high rates
2603 2 * skb->truesize, in tcp_small_queue_check()
2604 READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check()
2605 if (sk->sk_pacing_status == SK_PACING_NONE) in tcp_small_queue_check()
2607 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); in tcp_small_queue_check()
2611 tcp_sk(sk)->tcp_tx_delay) { in tcp_small_queue_check()
2612 u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) * in tcp_small_queue_check()
2613 tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
2616 * approximate our needs assuming an ~100% skb->truesize overhead. in tcp_small_queue_check()
2620 extra_bytes >>= (20 - 1); in tcp_small_queue_check()
2623 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2632 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2638 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2647 enum tcp_chrono old = tp->chrono_type; in tcp_chrono_set()
2650 tp->chrono_stat[old - 1] += now - tp->chrono_start; in tcp_chrono_set()
2651 tp->chrono_start = now; in tcp_chrono_set()
2652 tp->chrono_type = new; in tcp_chrono_set()
2664 if (type > tp->chrono_type) in tcp_chrono_start()
2682 else if (type == tp->chrono_type) in tcp_chrono_stop()
2691 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2728 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2730 tp->tcp_wstamp_ns = tp->tcp_clock_cache; in tcp_write_xmit()
2731 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); in tcp_write_xmit()
2732 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_write_xmit()
2777 if (skb->len > limit && in tcp_write_xmit()
2786 * We do not want to send a pure-ack packet and have in tcp_write_xmit()
2789 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) in tcp_write_xmit()
2819 tp->prr_out += sent_pkts; in tcp_write_xmit()
2826 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2839 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_schedule_loss_probe()
2842 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); in tcp_schedule_loss_probe()
2847 !tp->packets_out || !tcp_is_sack(tp) || in tcp_schedule_loss_probe()
2848 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()
2849 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()
2856 if (tp->srtt_us) { in tcp_schedule_loss_probe()
2857 timeout_us = tp->srtt_us >> 2; in tcp_schedule_loss_probe()
2858 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2869 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
2886 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in skb_still_in_host_queue()
2908 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2911 tp->tlp_retrans = 0; in tcp_send_loss_probe()
2914 pcount = tp->packets_out; in tcp_send_loss_probe()
2916 if (tp->packets_out > pcount) in tcp_send_loss_probe()
2920 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
2922 WARN_ONCE(tp->packets_out, in tcp_send_loss_probe()
2924 tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss); in tcp_send_loss_probe()
2925 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2936 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2938 (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2950 tp->tlp_retrans = 1; in tcp_send_loss_probe()
2954 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
2958 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2974 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2989 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2991 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
3003 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
3009 * since header prediction assumes th->window stays fixed.
3011 * Strictly speaking, keeping th->window fixed violates the receiver
3055 * fluctuations. --SAW 1998/11/1 in __tcp_select_window()
3057 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
3065 full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
3074 * a non-zero scaling factor in effect. in __tcp_select_window()
3076 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) in __tcp_select_window()
3082 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3090 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3093 * of the maximum allowed, try to move to zero-window, else in __tcp_select_window()
3103 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
3104 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3109 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
3116 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3118 window = tp->rcv_wnd; in __tcp_select_window()
3127 if (window <= free_space - mss || window > free_space) in __tcp_select_window()
3138 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3141 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3148 free_space < (1 << tp->rx_opt.rcv_wscale)) in __tcp_select_window()
3152 if (free_space > tp->rcv_ssthresh) { in __tcp_select_window()
3153 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3158 * the memory-based limit, and rcv_ssthresh is not a hard limit in __tcp_select_window()
3161 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3175 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_skb_collapse_tstamp()
3176 shinfo->tskey = next_shinfo->tskey; in tcp_skb_collapse_tstamp()
3177 TCP_SKB_CB(skb)->txstamp_ack |= in tcp_skb_collapse_tstamp()
3178 TCP_SKB_CB(next_skb)->txstamp_ack; in tcp_skb_collapse_tstamp()
3189 next_skb_size = next_skb->len; in tcp_collapse_retrans()
3199 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
3202 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
3207 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
3208 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; in tcp_collapse_retrans()
3212 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
3213 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
3231 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
3247 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) in tcp_retrans_try_collapse()
3249 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
3259 space -= skb->len; in tcp_retrans_try_collapse()
3269 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
3278 * state updates are done by the caller. Returns non-zero if an
3290 if (icsk->icsk_mtup.probe_size) in __tcp_retransmit_skb()
3291 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
3294 return -EBUSY; in __tcp_retransmit_skb()
3297 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
3298 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in __tcp_retransmit_skb()
3299 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; in __tcp_retransmit_skb()
3300 TCP_SKB_CB(skb)->seq++; in __tcp_retransmit_skb()
3303 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { in __tcp_retransmit_skb()
3305 return -EINVAL; in __tcp_retransmit_skb()
3307 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
3308 return -ENOMEM; in __tcp_retransmit_skb()
3311 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
3312 return -EHOSTUNREACH; /* Routing failure or similar. */ in __tcp_retransmit_skb()
3315 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in __tcp_retransmit_skb()
3323 if (TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
3324 return -EAGAIN; in __tcp_retransmit_skb()
3334 if (skb->len > len) { in __tcp_retransmit_skb()
3337 return -ENOMEM; /* We'll try again later. */ in __tcp_retransmit_skb()
3340 return -ENOMEM; in __tcp_retransmit_skb()
3344 diff -= tcp_skb_pcount(skb); in __tcp_retransmit_skb()
3348 if (skb->len < avail_wnd) in __tcp_retransmit_skb()
3353 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) in __tcp_retransmit_skb()
3359 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
3361 tp->total_retrans += segs; in __tcp_retransmit_skb()
3362 tp->bytes_retrans += skb->len; in __tcp_retransmit_skb()
3364 /* make sure skb->data is aligned on arches that require it in __tcp_retransmit_skb()
3365 * and check if ack-trimming & collapsing extended the headroom in __tcp_retransmit_skb()
3368 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
3375 nskb->dev = NULL; in __tcp_retransmit_skb()
3378 err = -ENOBUFS; in __tcp_retransmit_skb()
3383 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
3393 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
3397 TCP_SKB_CB(skb)->seq, segs, err); in __tcp_retransmit_skb()
3401 } else if (err != -EBUSY) { in __tcp_retransmit_skb()
3414 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
3418 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
3419 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3423 if (!tp->retrans_stamp) in tcp_retransmit_skb()
3424 tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb); in tcp_retransmit_skb()
3426 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
3427 tp->undo_retrans = 0; in tcp_retransmit_skb()
3428 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3446 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
3450 skb = tp->retransmit_skb_hint ?: rtx_head; in tcp_xmit_retransmit_queue()
3461 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
3463 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); in tcp_xmit_retransmit_queue()
3466 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
3472 if (tp->retrans_out >= tp->lost_out) { in tcp_xmit_retransmit_queue()
3480 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3498 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
3501 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) in tcp_xmit_retransmit_queue()
3507 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
3522 delta = size - sk->sk_forward_alloc; in sk_forced_mem_schedule()
3529 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in sk_forced_mem_schedule()
3530 mem_cgroup_charge_skmem(sk->sk_memcg, amt, in sk_forced_mem_schedule()
3549 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3552 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; in tcp_send_fin()
3553 TCP_SKB_CB(tskb)->end_seq++; in tcp_send_fin()
3554 tp->write_seq++; in tcp_send_fin()
3558 * We need to set tp->snd_nxt to the value it would have in tcp_send_fin()
3560 * does not change tp->snd_nxt. in tcp_send_fin()
3562 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); in tcp_send_fin()
3566 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); in tcp_send_fin()
3570 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); in tcp_send_fin()
3572 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3574 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
3584 * by RFC 2525, section 2.17. -DaveM
3614 /* Send a crossed SYN-ACK during socket establishment.
3625 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
3627 return -EFAULT; in tcp_send_synack()
3629 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
3637 return -ENOMEM; in tcp_send_synack()
3638 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); in tcp_send_synack()
3642 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3643 sk_wmem_queued_add(sk, nskb->truesize); in tcp_send_synack()
3644 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3648 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
3655 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3700 * sk->sk_wmem_alloc in an atomic, we can promote to rw. in tcp_make_synack()
3712 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) in tcp_make_synack()
3719 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ in tcp_make_synack()
3720 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); in tcp_make_synack()
3729 u8 keyid = tcp_rsk(req)->ao_keyid; in tcp_make_synack()
3731 ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req), in tcp_make_synack()
3732 keyid, -1); in tcp_make_synack()
3733 /* If there is no matching key - avoid sending anything, in tcp_make_synack()
3735 * for another peer-matching key, but the peer has requested in tcp_make_synack()
3741 … net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n", in tcp_make_synack()
3750 key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk, in tcp_make_synack()
3756 skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4); in tcp_make_synack()
3758 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; in tcp_make_synack()
3766 th = (struct tcphdr *)skb->data; in tcp_make_synack()
3768 th->syn = 1; in tcp_make_synack()
3769 th->ack = 1; in tcp_make_synack()
3771 th->source = htons(ireq->ir_num); in tcp_make_synack()
3772 th->dest = ireq->ir_rmt_port; in tcp_make_synack()
3773 skb->mark = ireq->ir_mark; in tcp_make_synack()
3774 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_make_synack()
3775 th->seq = htonl(tcp_rsk(req)->snt_isn); in tcp_make_synack()
3777 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); in tcp_make_synack()
3780 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); in tcp_make_synack()
3782 th->doff = (tcp_header_size >> 2); in tcp_make_synack()
3785 /* Okay, we have all we need - do the md5 hash if needed */ in tcp_make_synack()
3788 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, in tcp_make_synack()
3793 tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location, in tcp_make_synack()
3795 opts.hash_location - (u8 *)th, 0); in tcp_make_synack()
3823 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { in tcp_ca_dst_init()
3824 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
3825 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
3826 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
3842 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_connect_init()
3843 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) in tcp_connect_init()
3844 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_connect_init()
3849 if (tp->rx_opt.user_mss) in tcp_connect_init()
3850 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3851 tp->max_window = 0; in tcp_connect_init()
3857 if (!tp->window_clamp) in tcp_connect_init()
3858 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); in tcp_connect_init()
3859 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_connect_init()
3864 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3865 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3866 tp->window_clamp = tcp_full_space(sk); in tcp_connect_init()
3873 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3874 &tp->rcv_wnd, in tcp_connect_init()
3875 &tp->window_clamp, in tcp_connect_init()
3876 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), in tcp_connect_init()
3880 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3881 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3883 WRITE_ONCE(sk->sk_err, 0); in tcp_connect_init()
3885 tp->snd_wnd = 0; in tcp_connect_init()
3888 tp->snd_una = tp->write_seq; in tcp_connect_init()
3889 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3890 tp->snd_up = tp->write_seq; in tcp_connect_init()
3891 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect_init()
3893 if (likely(!tp->repair)) in tcp_connect_init()
3894 tp->rcv_nxt = 0; in tcp_connect_init()
3896 tp->rcv_tstamp = tcp_jiffies32; in tcp_connect_init()
3897 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3898 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_connect_init()
3900 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
3901 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3910 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
3912 sk_wmem_queued_add(sk, skb->truesize); in tcp_connect_queue_skb()
3913 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3914 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_connect_queue_skb()
3915 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3919 * queue a data-only packet after the regular SYN, such that regular SYNs
3920 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3929 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3934 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3935 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3938 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and in tcp_send_syn_data()
3939 * user-MSS. Reserve maximum option space for middleboxes that add in tcp_send_syn_data()
3942 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); in tcp_send_syn_data()
3944 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_send_syn_data()
3946 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - in tcp_send_syn_data()
3949 space = min_t(size_t, space, fo->size); in tcp_send_syn_data()
3953 pfrag, sk->sk_allocation)) in tcp_send_syn_data()
3955 syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); in tcp_send_syn_data()
3958 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); in tcp_send_syn_data()
3960 space = min_t(size_t, space, pfrag->size - pfrag->offset); in tcp_send_syn_data()
3964 space = copy_page_from_iter(pfrag->page, pfrag->offset, in tcp_send_syn_data()
3965 space, &fo->data->msg_iter); in tcp_send_syn_data()
3971 skb_fill_page_desc(syn_data, 0, pfrag->page, in tcp_send_syn_data()
3972 pfrag->offset, space); in tcp_send_syn_data()
3973 page_ref_inc(pfrag->page); in tcp_send_syn_data()
3974 pfrag->offset += space; in tcp_send_syn_data()
3976 skb_zcopy_set(syn_data, fo->uarg, NULL); in tcp_send_syn_data()
3979 if (space == fo->size) in tcp_send_syn_data()
3980 fo->data = NULL; in tcp_send_syn_data()
3981 fo->copied = space; in tcp_send_syn_data()
3984 if (syn_data->len) in tcp_send_syn_data()
3987 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3989 skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); in tcp_send_syn_data()
3996 TCP_SKB_CB(syn_data)->seq++; in tcp_send_syn_data()
3997 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; in tcp_send_syn_data()
3999 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
4000 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
4006 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
4007 tp->packets_out -= tcp_skb_pcount(syn_data); in tcp_send_syn_data()
4011 if (fo->cookie.len > 0) in tcp_send_syn_data()
4012 fo->cookie.len = 0; in tcp_send_syn_data()
4013 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
4015 tp->syn_fastopen = 0; in tcp_send_syn_data()
4017 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ in tcp_send_syn_data()
4032 * Return error if the peer has both a md5 and a tcp-ao key in tcp_connect()
4035 if (unlikely(rcu_dereference_protected(tp->md5sig_info, in tcp_connect()
4037 bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); in tcp_connect()
4038 bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); in tcp_connect()
4041 ao_info = rcu_dereference_check(tp->ao_info, in tcp_connect()
4048 needs_ao |= ao_info->ao_required; in tcp_connect()
4049 WARN_ON_ONCE(ao_info->ao_required && needs_md5); in tcp_connect()
4052 return -EKEYREJECTED; in tcp_connect()
4054 /* If we have a matching md5 key and no matching tcp-ao key in tcp_connect()
4061 kfree(rcu_replace_pointer(tp->md5sig_info, NULL, in tcp_connect()
4067 if (unlikely(rcu_dereference_protected(tp->ao_info, in tcp_connect()
4072 if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) in tcp_connect()
4073 return -EKEYREJECTED; in tcp_connect()
4077 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
4078 return -EHOSTUNREACH; /* Routing failure or similar. */ in tcp_connect()
4082 if (unlikely(tp->repair)) { in tcp_connect()
4087 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); in tcp_connect()
4089 return -ENOBUFS; in tcp_connect()
4091 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
4093 tp->retrans_stamp = tcp_time_stamp_ts(tp); in tcp_connect()
4096 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
4099 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
4100 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
4101 if (err == -ECONNREFUSED) in tcp_connect()
4104 /* We change tp->snd_nxt after the tcp_transmit_skb() call in tcp_connect()
4107 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect()
4108 tp->pushed_seq = tp->write_seq; in tcp_connect()
4111 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); in tcp_connect()
4112 tp->pushed_seq = TCP_SKB_CB(buff)->seq; in tcp_connect()
4118 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
4126 u32 delack_max = inet_csk(sk)->icsk_delack_max; in tcp_delack_max()
4130 u32 delack_from_rto_min = max_t(int, 1, rto_min - 1); in tcp_delack_max()
4144 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
4152 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
4158 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements in tcp_send_delayed_ack()
4161 if (tp->srtt_us) { in tcp_send_delayed_ack()
4162 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
4178 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
4180 if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { in tcp_send_delayed_ack()
4185 if (!time_before(timeout, icsk->icsk_ack.timeout)) in tcp_send_delayed_ack()
4186 timeout = icsk->icsk_ack.timeout; in tcp_send_delayed_ack()
4188 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; in tcp_send_delayed_ack()
4189 icsk->icsk_ack.timeout = timeout; in tcp_send_delayed_ack()
4190 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
4199 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
4212 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; in __tcp_send_ack()
4214 icsk->icsk_ack.retry++; in __tcp_send_ack()
4216 icsk->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
4238 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); in tcp_send_ack()
4244 * Question: what should we make while urgent mode?
4248 * Current solution: to send TWO zero-length segments in urgent mode:
4249 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
4250 * out-of-date with SND.UNA-1 to probe window.
4252 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) in tcp_xmit_probe_skb() argument
4261 return -1; in tcp_xmit_probe_skb()
4269 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
4277 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
4278 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
4290 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
4291 return -1; in tcp_write_wakeup()
4294 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
4297 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
4299 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
4300 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
4306 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
4307 skb->len > mss) { in tcp_write_wakeup()
4309 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4312 return -1; in tcp_write_wakeup()
4316 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4322 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
4341 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
4343 icsk->icsk_probes_out = 0; in tcp_send_probe0()
4344 icsk->icsk_backoff = 0; in tcp_send_probe0()
4345 icsk->icsk_probes_tstamp = 0; in tcp_send_probe0()
4349 icsk->icsk_probes_out++; in tcp_send_probe0()
4351 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) in tcp_send_probe0()
4352 icsk->icsk_backoff++; in tcp_send_probe0()
4367 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; in tcp_rtx_synack()
4372 if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) in tcp_rtx_synack()
4373 WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); in tcp_rtx_synack()
4374 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, in tcp_rtx_synack()
4384 tcp_sk_rw(sk)->total_retrans++; in tcp_rtx_synack()