Lines Matching refs:sk
69 void tcp_time_wait(struct sock *sk, int state, int timeo);
300 static inline bool tcp_under_memory_pressure(const struct sock *sk) in tcp_under_memory_pressure() argument
302 if (mem_cgroup_sk_enabled(sk) && in tcp_under_memory_pressure()
303 mem_cgroup_sk_under_memory_pressure(sk)) in tcp_under_memory_pressure()
325 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) in tcp_wmem_free_skb() argument
327 sk_wmem_queued_add(sk, -skb->truesize); in tcp_wmem_free_skb()
329 sk_mem_uncharge(sk, skb->truesize); in tcp_wmem_free_skb()
331 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb))); in tcp_wmem_free_skb()
335 void sk_forced_mem_schedule(struct sock *sk, int size);
337 bool tcp_check_oom(const struct sock *sk, int shift);
351 void tcp_shutdown(struct sock *sk, int how);
356 void tcp_remove_empty_skb(struct sock *sk);
357 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
358 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
359 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
362 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
363 int tcp_wmem_schedule(struct sock *sk, int copy);
364 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
366 void tcp_release_cb(struct sock *sk);
368 void tcp_write_timer_handler(struct sock *sk);
369 void tcp_delack_timer_handler(struct sock *sk);
370 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
371 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
372 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
373 void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
374 void tcp_rcv_space_adjust(struct sock *sk);
375 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
376 void tcp_twsk_destructor(struct sock *sk);
378 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
381 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
384 static inline void tcp_dec_quickack_mode(struct sock *sk) in tcp_dec_quickack_mode() argument
386 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode()
390 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; in tcp_dec_quickack_mode()
456 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
461 void tcp_enter_loss(struct sock *sk);
462 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
464 void tcp_update_metrics(struct sock *sk);
465 void tcp_init_metrics(struct sock *sk);
468 void __tcp_close(struct sock *sk, long timeout);
469 void tcp_close(struct sock *sk, long timeout);
470 void tcp_init_sock(struct sock *sk);
471 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
474 int do_tcp_getsockopt(struct sock *sk, int level,
476 int tcp_getsockopt(struct sock *sk, int level, int optname,
479 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
481 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
483 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
484 void tcp_set_keepalive(struct sock *sk, int val);
486 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
488 int tcp_set_rcvlowat(struct sock *sk, int val);
489 int tcp_set_window_clamp(struct sock *sk, int val);
492 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
494 void tcp_data_ready(struct sock *sk);
506 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
508 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
513 struct sock *sk, struct tcphdr *th);
518 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
519 void tcp_v4_mtu_reduced(struct sock *sk);
520 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
521 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
522 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
523 struct sock *tcp_create_openreq_child(const struct sock *sk,
526 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
527 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
532 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
533 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
534 int tcp_connect(struct sock *sk);
540 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
545 int tcp_disconnect(struct sock *sk, int flags);
547 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
548 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
549 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
552 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
556 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
558 struct sock *sk, struct sk_buff *skb,
595 static inline void tcp_synq_overflow(const struct sock *sk) in tcp_synq_overflow() argument
600 if (sk->sk_reuseport) { in tcp_synq_overflow()
603 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_overflow()
613 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_overflow()
615 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now); in tcp_synq_overflow()
619 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) in tcp_synq_no_recent_overflow() argument
624 if (sk->sk_reuseport) { in tcp_synq_no_recent_overflow()
627 reuse = rcu_dereference(sk->sk_reuseport_cb); in tcp_synq_no_recent_overflow()
636 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); in tcp_synq_no_recent_overflow()
682 return skb->sk; in cookie_bpf_ok()
685 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
692 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk, in cookie_bpf_check() argument
701 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
709 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
711 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
713 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
714 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
715 void tcp_retransmit_timer(struct sock *sk);
718 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
724 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
730 void tcp_send_fin(struct sock *sk);
731 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
735 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
736 void tcp_send_ack(struct sock *sk);
737 void tcp_send_delayed_ack(struct sock *sk);
738 void tcp_send_loss_probe(struct sock *sk);
739 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
744 void tcp_rearm_rto(struct sock *sk);
745 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
746 void tcp_done_with_error(struct sock *sk, int err);
747 void tcp_reset(struct sock *sk, struct sk_buff *skb);
748 void tcp_fin(struct sock *sk);
749 void tcp_check_space(struct sock *sk);
750 void tcp_sack_compress_send_ack(struct sock *sk);
758 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) in tcp_add_receive_queue() argument
762 __skb_queue_tail(&sk->sk_receive_queue, skb); in tcp_add_receive_queue()
767 static inline void tcp_clear_xmit_timers(struct sock *sk) in tcp_clear_xmit_timers() argument
769 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) in tcp_clear_xmit_timers()
770 __sock_put(sk); in tcp_clear_xmit_timers()
772 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) in tcp_clear_xmit_timers()
773 __sock_put(sk); in tcp_clear_xmit_timers()
775 inet_csk_clear_xmit_timers(sk); in tcp_clear_xmit_timers()
778 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
779 unsigned int tcp_current_mss(struct sock *sk);
780 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
809 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
811 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
814 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
815 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
816 void tcp_read_done(struct sock *sk, size_t len);
818 void tcp_initialize_rcv_mss(struct sock *sk);
820 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
821 int tcp_mss_to_mtu(struct sock *sk, int mss);
822 void tcp_mtup_init(struct sock *sk);
824 static inline unsigned int tcp_rto_max(const struct sock *sk) in tcp_rto_max() argument
826 return READ_ONCE(inet_csk(sk)->icsk_rto_max); in tcp_rto_max()
829 static inline void tcp_bound_rto(struct sock *sk) in tcp_bound_rto() argument
831 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); in tcp_bound_rto()
839 u32 tcp_delack_max(const struct sock *sk);
842 static inline u32 tcp_rto_min(const struct sock *sk) in tcp_rto_min() argument
844 const struct dst_entry *dst = __sk_dst_get(sk); in tcp_rto_min()
845 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min); in tcp_rto_min()
852 static inline u32 tcp_rto_min_us(const struct sock *sk) in tcp_rto_min_us() argument
854 return jiffies_to_usecs(tcp_rto_min(sk)); in tcp_rto_min_us()
885 u32 __tcp_select_window(struct sock *sk);
887 void tcp_send_window_probe(struct sock *sk);
1101 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1234 u32 (*ssthresh)(struct sock *sk);
1237 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1240 void (*set_state)(struct sock *sk, u8 new_state);
1243 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1246 void (*in_ack_event)(struct sock *sk, u32 flags);
1249 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1252 u32 (*min_tso_segs)(struct sock *sk);
1257 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1261 u32 (*undo_cwnd)(struct sock *sk);
1263 u32 (*sndbuf_expand)(struct sock *sk);
1267 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1277 void (*init)(struct sock *sk);
1279 void (*release)(struct sock *sk);
1288 void tcp_assign_congestion_control(struct sock *sk);
1289 void tcp_init_congestion_control(struct sock *sk);
1290 void tcp_cleanup_congestion_control(struct sock *sk);
1296 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1301 u32 tcp_reno_ssthresh(struct sock *sk);
1302 u32 tcp_reno_undo_cwnd(struct sock *sk);
1303 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1318 static inline bool tcp_ca_needs_ecn(const struct sock *sk) in tcp_ca_needs_ecn() argument
1320 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_needs_ecn()
1325 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) in tcp_ca_event() argument
1327 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_event()
1330 icsk->icsk_ca_ops->cwnd_event(sk, event); in tcp_ca_event()
1334 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1337 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1338 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1340 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1342 void tcp_rate_check_app_limited(struct sock *sk);
1413 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) in tcp_in_cwnd_reduction() argument
1416 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction()
1423 static inline __u32 tcp_current_ssthresh(const struct sock *sk) in tcp_current_ssthresh() argument
1425 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_ssthresh()
1427 if (tcp_in_cwnd_reduction(sk)) in tcp_current_ssthresh()
1438 void tcp_enter_cwr(struct sock *sk);
1468 static inline bool tcp_is_cwnd_limited(const struct sock *sk) in tcp_is_cwnd_limited() argument
1470 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited()
1488 static inline bool tcp_needs_internal_pacing(const struct sock *sk) in tcp_needs_internal_pacing() argument
1490 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; in tcp_needs_internal_pacing()
1496 static inline unsigned long tcp_pacing_delay(const struct sock *sk) in tcp_pacing_delay() argument
1498 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; in tcp_pacing_delay()
1503 static inline void tcp_reset_xmit_timer(struct sock *sk, in tcp_reset_xmit_timer() argument
1509 when += tcp_pacing_delay(sk); in tcp_reset_xmit_timer()
1510 inet_csk_reset_xmit_timer(sk, what, when, in tcp_reset_xmit_timer()
1511 tcp_rto_max(sk)); in tcp_reset_xmit_timer()
1520 static inline unsigned long tcp_probe0_base(const struct sock *sk) in tcp_probe0_base() argument
1522 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); in tcp_probe0_base()
1526 static inline unsigned long tcp_probe0_when(const struct sock *sk, in tcp_probe0_when() argument
1530 inet_csk(sk)->icsk_backoff); in tcp_probe0_when()
1531 u64 when = (u64)tcp_probe0_base(sk) << backoff; in tcp_probe0_when()
1536 static inline void tcp_check_probe_timer(struct sock *sk) in tcp_check_probe_timer() argument
1538 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) in tcp_check_probe_timer()
1539 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, in tcp_check_probe_timer()
1540 tcp_probe0_base(sk), true); in tcp_check_probe_timer()
1568 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1572 int tcp_filter(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason);
1573 void tcp_set_state(struct sock *sk, int state);
1574 void tcp_done(struct sock *sk);
1575 int tcp_abort(struct sock *sk, int err);
1583 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1585 static inline void tcp_slow_start_after_idle_check(struct sock *sk) in tcp_slow_start_after_idle_check() argument
1587 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_slow_start_after_idle_check()
1588 struct tcp_sock *tp = tcp_sk(sk); in tcp_slow_start_after_idle_check()
1591 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || in tcp_slow_start_after_idle_check()
1595 if (delta > inet_csk(sk)->icsk_rto) in tcp_slow_start_after_idle_check()
1596 tcp_cwnd_restart(sk, delta); in tcp_slow_start_after_idle_check()
1600 void tcp_select_initial_window(const struct sock *sk, int __space,
1612 static inline int tcp_win_from_space(const struct sock *sk, int space) in tcp_win_from_space() argument
1614 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space); in tcp_win_from_space()
1626 static inline int tcp_space_from_win(const struct sock *sk, int win) in tcp_space_from_win() argument
1628 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); in tcp_space_from_win()
1636 static inline void tcp_scaling_ratio_init(struct sock *sk) in tcp_scaling_ratio_init() argument
1638 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; in tcp_scaling_ratio_init()
1642 static inline int tcp_space(const struct sock *sk) in tcp_space() argument
1644 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - in tcp_space()
1645 READ_ONCE(sk->sk_backlog.len) - in tcp_space()
1646 atomic_read(&sk->sk_rmem_alloc)); in tcp_space()
1649 static inline int tcp_full_space(const struct sock *sk) in tcp_full_space() argument
1651 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); in tcp_full_space()
1654 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh) in __tcp_adjust_rcv_ssthresh() argument
1656 int unused_mem = sk_unused_reserved_mem(sk); in __tcp_adjust_rcv_ssthresh()
1657 struct tcp_sock *tp = tcp_sk(sk); in __tcp_adjust_rcv_ssthresh()
1662 tcp_win_from_space(sk, unused_mem)); in __tcp_adjust_rcv_ssthresh()
1665 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) in tcp_adjust_rcv_ssthresh() argument
1667 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss); in tcp_adjust_rcv_ssthresh()
1670 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1671 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1679 static inline bool tcp_rmem_pressure(const struct sock *sk) in tcp_rmem_pressure() argument
1683 if (tcp_under_memory_pressure(sk)) in tcp_rmem_pressure()
1686 rcvbuf = READ_ONCE(sk->sk_rcvbuf); in tcp_rmem_pressure()
1689 return atomic_read(&sk->sk_rmem_alloc) > threshold; in tcp_rmem_pressure()
1692 static inline bool tcp_epollin_ready(const struct sock *sk, int target) in tcp_epollin_ready() argument
1694 const struct tcp_sock *tp = tcp_sk(sk); in tcp_epollin_ready()
1700 return (avail >= target) || tcp_rmem_pressure(sk) || in tcp_epollin_ready()
1701 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss); in tcp_epollin_ready()
1708 void tcp_enter_memory_pressure(struct sock *sk);
1709 void tcp_leave_memory_pressure(struct sock *sk);
1756 static inline int tcp_fin_time(const struct sock *sk) in tcp_fin_time() argument
1758 int fin_timeout = tcp_sk(sk)->linger2 ? : in tcp_fin_time()
1759 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); in tcp_fin_time()
1760 const int rto = inet_csk(sk)->icsk_rto; in tcp_fin_time()
1833 static inline void tcp_fast_path_check(struct sock *sk) in tcp_fast_path_check() argument
1835 struct tcp_sock *tp = tcp_sk(sk); in tcp_fast_path_check()
1839 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && in tcp_fast_path_check()
1941 const struct sock *sk, const struct sk_buff *skb);
1942 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1945 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1949 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1951 void tcp_clear_md5_list(struct sock *sk);
1952 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1956 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1960 tcp_md5_do_lookup(const struct sock *sk, int l3index, in tcp_md5_do_lookup() argument
1965 return __tcp_md5_do_lookup(sk, l3index, addr, family, false); in tcp_md5_do_lookup()
1969 tcp_md5_do_lookup_any_l3index(const struct sock *sk, in tcp_md5_do_lookup_any_l3index() argument
1974 return __tcp_md5_do_lookup(sk, 0, addr, family, true); in tcp_md5_do_lookup_any_l3index()
1978 void tcp_md5_destruct_sock(struct sock *sk);
1981 tcp_md5_do_lookup(const struct sock *sk, int l3index, in tcp_md5_do_lookup() argument
1988 tcp_md5_do_lookup_any_l3index(const struct sock *sk, in tcp_md5_do_lookup_any_l3index() argument
1995 static inline void tcp_md5_destruct_sock(struct sock *sk) in tcp_md5_destruct_sock() argument
2009 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2011 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2023 void tcp_fastopen_destroy_cipher(struct sock *sk);
2025 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
2029 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
2030 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
2035 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
2037 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
2050 void tcp_fastopen_active_disable(struct sock *sk);
2051 bool tcp_fastopen_active_should_disable(struct sock *sk);
2052 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
2053 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
2057 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) in tcp_fastopen_get_ctx() argument
2061 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); in tcp_fastopen_get_ctx()
2063 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); in tcp_fastopen_get_ctx()
2095 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
2096 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2115 void tcp_write_queue_purge(struct sock *sk);
2117 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) in tcp_rtx_queue_head() argument
2119 return skb_rb_first(&sk->tcp_rtx_queue); in tcp_rtx_queue_head()
2122 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) in tcp_rtx_queue_tail() argument
2124 return skb_rb_last(&sk->tcp_rtx_queue); in tcp_rtx_queue_tail()
2127 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) in tcp_write_queue_tail() argument
2129 return skb_peek_tail(&sk->sk_write_queue); in tcp_write_queue_tail()
2132 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ argument
2133 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2135 static inline struct sk_buff *tcp_send_head(const struct sock *sk) in tcp_send_head() argument
2137 return skb_peek(&sk->sk_write_queue); in tcp_send_head()
2140 static inline bool tcp_skb_is_last(const struct sock *sk, in tcp_skb_is_last() argument
2143 return skb_queue_is_last(&sk->sk_write_queue, skb); in tcp_skb_is_last()
2153 static inline bool tcp_write_queue_empty(const struct sock *sk) in tcp_write_queue_empty() argument
2155 const struct tcp_sock *tp = tcp_sk(sk); in tcp_write_queue_empty()
2160 static inline bool tcp_rtx_queue_empty(const struct sock *sk) in tcp_rtx_queue_empty() argument
2162 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); in tcp_rtx_queue_empty()
2165 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) in tcp_rtx_and_write_queues_empty() argument
2167 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); in tcp_rtx_and_write_queues_empty()
2170 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) in tcp_add_write_queue_tail() argument
2172 __skb_queue_tail(&sk->sk_write_queue, skb); in tcp_add_write_queue_tail()
2175 if (sk->sk_write_queue.next == skb) in tcp_add_write_queue_tail()
2176 tcp_chrono_start(sk, TCP_CHRONO_BUSY); in tcp_add_write_queue_tail()
2182 struct sock *sk) in tcp_insert_write_queue_before() argument
2184 __skb_queue_before(&sk->sk_write_queue, skb, new); in tcp_insert_write_queue_before()
2187 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) in tcp_unlink_write_queue() argument
2190 __skb_unlink(skb, &sk->sk_write_queue); in tcp_unlink_write_queue()
2195 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink() argument
2198 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); in tcp_rtx_queue_unlink()
2201 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) in tcp_rtx_queue_unlink_and_free() argument
2204 tcp_rtx_queue_unlink(skb, sk); in tcp_rtx_queue_unlink_and_free()
2205 tcp_wmem_free_skb(sk, skb); in tcp_rtx_queue_unlink_and_free()
2208 static inline void tcp_write_collapse_fence(struct sock *sk) in tcp_write_collapse_fence() argument
2210 struct sk_buff *skb = tcp_write_queue_tail(sk); in tcp_write_collapse_fence()
2216 static inline void tcp_push_pending_frames(struct sock *sk) in tcp_push_pending_frames() argument
2218 if (tcp_send_head(sk)) { in tcp_push_pending_frames()
2219 struct tcp_sock *tp = tcp_sk(sk); in tcp_push_pending_frames()
2221 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); in tcp_push_pending_frames()
2240 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) in tcp_advance_highest_sack() argument
2242 tcp_sk(sk)->highest_sack = skb_rb_next(skb); in tcp_advance_highest_sack()
2245 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) in tcp_highest_sack() argument
2247 return tcp_sk(sk)->highest_sack; in tcp_highest_sack()
2250 static inline void tcp_highest_sack_reset(struct sock *sk) in tcp_highest_sack_reset() argument
2252 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); in tcp_highest_sack_reset()
2256 static inline void tcp_highest_sack_replace(struct sock *sk, in tcp_highest_sack_replace() argument
2260 if (old == tcp_highest_sack(sk)) in tcp_highest_sack_replace()
2261 tcp_sk(sk)->highest_sack = new; in tcp_highest_sack_replace()
2265 static inline bool inet_sk_transparent(const struct sock *sk) in inet_sk_transparent() argument
2267 switch (sk->sk_state) { in inet_sk_transparent()
2269 return inet_twsk(sk)->tw_transparent; in inet_sk_transparent()
2271 return inet_rsk(inet_reqsk(sk))->no_srccheck; in inet_sk_transparent()
2273 return inet_test_bit(TRANSPARENT, sk); in inet_sk_transparent()
2309 void tcp_v4_destroy_sock(struct sock *sk);
2339 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2346 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2349 struct sock *sk, struct sk_buff *skb);
2354 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2358 const struct sock *sk,
2360 int (*md5_parse)(struct sock *sk,
2366 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2367 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2371 const struct sock *sk,
2374 const struct sock *sk, const struct sk_buff *skb,
2382 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2386 const struct sock *sk,
2390 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2393 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2402 struct dst_entry *(*route_req)(const struct sock *sk,
2409 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2423 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2426 tcp_synq_overflow(sk); in cookie_init_sequence()
2427 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); in cookie_init_sequence()
2432 const struct sock *sk, struct sk_buff *skb, in cookie_init_sequence() argument
2456 static inline void tcp_get_current_key(const struct sock *sk, in tcp_get_current_key() argument
2460 const struct tcp_sock *tp = tcp_sk(sk); in tcp_get_current_key()
2468 lockdep_sock_is_held(sk)); in tcp_get_current_key()
2479 out->md5_key = tp->af_specific->md5_lookup(sk, sk); in tcp_get_current_key()
2509 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2510 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2513 extern bool tcp_rack_mark_lost(struct sock *sk);
2516 extern void tcp_rack_reo_timeout(struct sock *sk);
2517 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2536 static inline void tcp_plb_init(const struct sock *sk, in tcp_plb_init() argument
2542 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2544 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2545 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2547 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str) in tcp_warn_once() argument
2552 tcp_snd_cwnd(tcp_sk(sk)), in tcp_warn_once()
2553 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, in tcp_warn_once()
2554 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, in tcp_warn_once()
2555 tcp_sk(sk)->tlp_high_seq, sk->sk_state, in tcp_warn_once()
2556 inet_csk(sk)->icsk_ca_state, in tcp_warn_once()
2557 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, in tcp_warn_once()
2558 inet_csk(sk)->icsk_pmtu_cookie); in tcp_warn_once()
2562 static inline s64 tcp_rto_delta_us(const struct sock *sk) in tcp_rto_delta_us() argument
2564 const struct sk_buff *skb = tcp_rtx_queue_head(sk); in tcp_rto_delta_us()
2565 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rto_delta_us()
2570 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; in tcp_rto_delta_us()
2572 tcp_warn_once(sk, 1, "rtx queue empty: "); in tcp_rto_delta_us()
2614 static inline int tcp_inq(struct sock *sk) in tcp_inq() argument
2616 struct tcp_sock *tp = tcp_sk(sk); in tcp_inq()
2619 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in tcp_inq()
2621 } else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_inq()
2629 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_inq()
2661 static inline void tcp_listendrop(const struct sock *sk) in tcp_listendrop() argument
2663 sk_drops_inc((struct sock *)sk); in tcp_listendrop()
2664 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_listendrop()
2681 int (*init)(struct sock *sk);
2683 void (*update)(struct sock *sk, struct proto *p,
2684 void (*write_space)(struct sock *sk));
2686 void (*release)(struct sock *sk);
2688 int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2689 size_t (*get_info_size)(const struct sock *sk, bool net_admin);
2699 int tcp_set_ulp(struct sock *sk, const char *name);
2701 void tcp_cleanup_ulp(struct sock *sk);
2702 void tcp_update_ulp(struct sock *sk, struct proto *p,
2703 void (*write_space)(struct sock *sk));
2714 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2715 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2724 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2726 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) in tcp_eat_skb() argument
2731 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2736 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) in tcp_bpf_clone() argument
2763 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2769 if (sk_fullsock(sk)) { in tcp_call_bpf()
2772 sock_owned_by_me(sk); in tcp_call_bpf()
2775 sock_ops.sk = sk; in tcp_call_bpf()
2788 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2792 return tcp_call_bpf(sk, op, 2, args); in tcp_call_bpf_2arg()
2795 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2800 return tcp_call_bpf(sk, op, 3, args); in tcp_call_bpf_3arg()
2804 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) in tcp_call_bpf() argument
2809 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) in tcp_call_bpf_2arg() argument
2814 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, in tcp_call_bpf_3arg() argument
2822 static inline u32 tcp_timeout_init(struct sock *sk) in tcp_timeout_init() argument
2826 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); in tcp_timeout_init()
2833 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) in tcp_rwnd_init_bpf() argument
2837 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); in tcp_rwnd_init_bpf()
2844 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) in tcp_bpf_ca_needs_ecn() argument
2846 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); in tcp_bpf_ca_needs_ecn()
2849 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt) in tcp_bpf_rtt() argument
2851 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) in tcp_bpf_rtt()
2852 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt); in tcp_bpf_rtt()
2861 void (*cad)(struct sock *sk, u32 ack_seq));
2877 static inline u64 tcp_transmit_time(const struct sock *sk) in tcp_transmit_time() argument
2880 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? in tcp_transmit_time()
2881 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; in tcp_transmit_time()
2911 static inline bool tcp_ao_required(struct sock *sk, const void *saddr, in tcp_ao_required() argument
2921 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info, in tcp_ao_required()
2922 lockdep_sock_is_held(sk)); in tcp_ao_required()
2926 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); in tcp_ao_required()
2929 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); in tcp_ao_required()
2938 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,