Lines Matching refs:sk
47 static u32 sk_ehashfn(const struct sock *sk) in sk_ehashfn() argument
50 if (sk->sk_family == AF_INET6 && in sk_ehashfn()
51 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) in sk_ehashfn()
52 return inet6_ehashfn(sock_net(sk), in sk_ehashfn()
53 &sk->sk_v6_rcv_saddr, sk->sk_num, in sk_ehashfn()
54 &sk->sk_v6_daddr, sk->sk_dport); in sk_ehashfn()
56 return inet_ehashfn(sock_net(sk), in sk_ehashfn()
57 sk->sk_rcv_saddr, sk->sk_num, in sk_ehashfn()
58 sk->sk_daddr, sk->sk_dport); in sk_ehashfn()
61 static bool sk_is_connect_bind(const struct sock *sk) in sk_is_connect_bind() argument
63 if (sk->sk_state == TCP_TIME_WAIT) in sk_is_connect_bind()
64 return inet_twsk(sk)->tw_connect_bind; in sk_is_connect_bind()
66 return sk->sk_userlocks & SOCK_CONNECT_BIND; in sk_is_connect_bind()
127 const struct sock *sk) in inet_bind2_bucket_init() argument
134 if (sk->sk_family == AF_INET6) { in inet_bind2_bucket_init()
135 tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); in inet_bind2_bucket_init()
136 tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr; in inet_bind2_bucket_init()
139 ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr); in inet_bind2_bucket_init()
142 tb2->rcv_saddr = sk->sk_rcv_saddr; in inet_bind2_bucket_init()
155 const struct sock *sk) in inet_bind2_bucket_create() argument
160 inet_bind2_bucket_init(tb2, net, head, tb, sk); in inet_bind2_bucket_create()
168 const struct sock *sk; in inet_bind2_bucket_destroy() local
179 sk_for_each_bound(sk, &tb->owners) { in inet_bind2_bucket_destroy()
180 if (!sk_is_connect_bind(sk)) in inet_bind2_bucket_destroy()
188 const struct sock *sk) in inet_bind2_bucket_addr_match() argument
191 if (sk->sk_family == AF_INET6) in inet_bind2_bucket_addr_match()
192 return ipv6_addr_equal(&tb2->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); in inet_bind2_bucket_addr_match()
197 return tb2->rcv_saddr == sk->sk_rcv_saddr; in inet_bind2_bucket_addr_match()
200 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, in inet_bind_hash() argument
203 inet_sk(sk)->inet_num = port; in inet_bind_hash()
204 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash()
205 inet_csk(sk)->icsk_bind2_hash = tb2; in inet_bind_hash()
206 sk_add_bind_node(sk, &tb2->owners); in inet_bind_hash()
212 static void __inet_put_port(struct sock *sk) in __inet_put_port() argument
214 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); in __inet_put_port()
216 struct net *net = sock_net(sk); in __inet_put_port()
220 bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size); in __inet_put_port()
222 head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num); in __inet_put_port()
225 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port()
226 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port()
227 inet_sk(sk)->inet_num = 0; in __inet_put_port()
228 sk->sk_userlocks &= ~SOCK_CONNECT_BIND; in __inet_put_port()
231 if (inet_csk(sk)->icsk_bind2_hash) { in __inet_put_port()
232 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; in __inet_put_port()
234 __sk_del_bind_node(sk); in __inet_put_port()
235 inet_csk(sk)->icsk_bind2_hash = NULL; in __inet_put_port()
244 void inet_put_port(struct sock *sk) in inet_put_port() argument
247 __inet_put_port(sk); in inet_put_port()
252 int __inet_inherit_port(const struct sock *sk, struct sock *child) in __inet_inherit_port() argument
254 struct inet_hashinfo *table = tcp_get_hashinfo(sk); in __inet_inherit_port()
258 struct net *net = sock_net(sk); in __inet_inherit_port()
270 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port()
271 tb2 = inet_csk(sk)->icsk_bind2_hash; in __inet_inherit_port()
278 l3mdev = inet_sk_bound_l3mdev(sk); in __inet_inherit_port()
303 l3mdev = inet_sk_bound_l3mdev(sk); in __inet_inherit_port()
332 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) in inet_lhash2_bucket_sk() argument
337 if (sk->sk_family == AF_INET6) in inet_lhash2_bucket_sk()
338 hash = ipv6_portaddr_hash(sock_net(sk), in inet_lhash2_bucket_sk()
339 &sk->sk_v6_rcv_saddr, in inet_lhash2_bucket_sk()
340 inet_sk(sk)->inet_num); in inet_lhash2_bucket_sk()
343 hash = ipv4_portaddr_hash(sock_net(sk), in inet_lhash2_bucket_sk()
344 inet_sk(sk)->inet_rcv_saddr, in inet_lhash2_bucket_sk()
345 inet_sk(sk)->inet_num); in inet_lhash2_bucket_sk()
349 static inline int compute_score(struct sock *sk, const struct net *net, in compute_score() argument
355 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum && in compute_score()
356 !ipv6_only_sock(sk)) { in compute_score()
357 if (sk->sk_rcv_saddr != daddr) in compute_score()
360 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) in compute_score()
362 score = sk->sk_bound_dev_if ? 2 : 1; in compute_score()
364 if (sk->sk_family == PF_INET) in compute_score()
366 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) in compute_score()
387 struct sock *inet_lookup_reuseport(const struct net *net, struct sock *sk, in inet_lookup_reuseport() argument
396 if (sk->sk_reuseport) { in inet_lookup_reuseport()
399 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); in inet_lookup_reuseport()
420 struct sock *sk, *result = NULL; in inet_lhash2_lookup() local
424 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { in inet_lhash2_lookup()
425 score = compute_score(sk, net, hnum, daddr, dif, sdif); in inet_lhash2_lookup()
427 result = inet_lookup_reuseport(net, sk, skb, doff, in inet_lhash2_lookup()
432 result = sk; in inet_lhash2_lookup()
447 struct sock *sk, *reuse_sk; in inet_lookup_run_sk_lookup() local
451 daddr, hnum, dif, &sk); in inet_lookup_run_sk_lookup()
452 if (no_reuseport || IS_ERR_OR_NULL(sk)) in inet_lookup_run_sk_lookup()
453 return sk; in inet_lookup_run_sk_lookup()
455 reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, in inet_lookup_run_sk_lookup()
458 sk = reuse_sk; in inet_lookup_run_sk_lookup()
459 return sk; in inet_lookup_run_sk_lookup()
507 void sock_gen_put(struct sock *sk) in sock_gen_put() argument
509 if (!refcount_dec_and_test(&sk->sk_refcnt)) in sock_gen_put()
512 if (sk->sk_state == TCP_TIME_WAIT) in sock_gen_put()
513 inet_twsk_free(inet_twsk(sk)); in sock_gen_put()
514 else if (sk->sk_state == TCP_NEW_SYN_RECV) in sock_gen_put()
515 reqsk_free(inet_reqsk(sk)); in sock_gen_put()
517 sk_free(sk); in sock_gen_put()
523 sock_gen_put(skb->sk); in sock_edemux()
538 struct sock *sk; in __inet_lookup_established() local
546 sk_nulls_for_each_rcu(sk, node, &head->chain) { in __inet_lookup_established()
547 if (sk->sk_hash != hash) in __inet_lookup_established()
549 if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) { in __inet_lookup_established()
550 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) in __inet_lookup_established()
552 if (unlikely(!inet_match(net, sk, acookie, in __inet_lookup_established()
554 sock_gen_put(sk); in __inet_lookup_established()
568 sk = NULL; in __inet_lookup_established()
570 return sk; in __inet_lookup_established()
576 struct sock *sk, __u16 lport, in __inet_check_established() argument
582 struct inet_sock *inet = inet_sk(sk); in __inet_check_established()
585 int dif = sk->sk_bound_dev_if; in __inet_check_established()
586 struct net *net = sock_net(sk); in __inet_check_established()
618 if (tcp_twsk_unique(sk, sk2, twp)) in __inet_check_established()
630 sk->sk_hash = hash; in __inet_check_established()
631 WARN_ON(!sk_unhashed(sk)); in __inet_check_established()
632 __sk_nulls_add_node_rcu(sk, &head->chain); in __inet_check_established()
638 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in __inet_check_established()
653 static u64 inet_sk_port_offset(const struct sock *sk) in inet_sk_port_offset() argument
655 const struct inet_sock *inet = inet_sk(sk); in inet_sk_port_offset()
665 static bool inet_ehash_lookup_by_sk(struct sock *sk, in inet_ehash_lookup_by_sk() argument
668 const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num); in inet_ehash_lookup_by_sk()
669 const int sdif = sk->sk_bound_dev_if; in inet_ehash_lookup_by_sk()
670 const int dif = sk->sk_bound_dev_if; in inet_ehash_lookup_by_sk()
672 struct net *net = sock_net(sk); in inet_ehash_lookup_by_sk()
675 INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr); in inet_ehash_lookup_by_sk()
678 if (esk->sk_hash != sk->sk_hash) in inet_ehash_lookup_by_sk()
680 if (sk->sk_family == AF_INET) { in inet_ehash_lookup_by_sk()
687 else if (sk->sk_family == AF_INET6) { in inet_ehash_lookup_by_sk()
689 &sk->sk_v6_daddr, in inet_ehash_lookup_by_sk()
690 &sk->sk_v6_rcv_saddr, in inet_ehash_lookup_by_sk()
705 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) in inet_ehash_insert() argument
707 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); in inet_ehash_insert()
713 WARN_ON_ONCE(!sk_unhashed(sk)); in inet_ehash_insert()
715 sk->sk_hash = sk_ehashfn(sk); in inet_ehash_insert()
716 head = inet_ehash_bucket(hashinfo, sk->sk_hash); in inet_ehash_insert()
718 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); in inet_ehash_insert()
722 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); in inet_ehash_insert()
725 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); in inet_ehash_insert()
731 __sk_nulls_add_node_rcu(sk, list); in inet_ehash_insert()
738 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) in inet_ehash_nolisten() argument
740 bool ok = inet_ehash_insert(sk, osk, found_dup_sk); in inet_ehash_nolisten()
743 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in inet_ehash_nolisten()
746 inet_sk_set_state(sk, TCP_CLOSE); in inet_ehash_nolisten()
747 sock_set_flag(sk, SOCK_DEAD); in inet_ehash_nolisten()
748 inet_csk_destroy_sock(sk); in inet_ehash_nolisten()
754 static int inet_reuseport_add_sock(struct sock *sk, in inet_reuseport_add_sock() argument
757 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; in inet_reuseport_add_sock()
759 kuid_t uid = sk_uid(sk); in inet_reuseport_add_sock()
763 if (sk2 != sk && in inet_reuseport_add_sock()
764 sk2->sk_family == sk->sk_family && in inet_reuseport_add_sock()
765 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && in inet_reuseport_add_sock()
766 sk2->sk_bound_dev_if == sk->sk_bound_dev_if && in inet_reuseport_add_sock()
769 inet_rcv_saddr_equal(sk, sk2, false)) in inet_reuseport_add_sock()
770 return reuseport_add_sock(sk, sk2, in inet_reuseport_add_sock()
771 inet_rcv_saddr_any(sk)); in inet_reuseport_add_sock()
774 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); in inet_reuseport_add_sock()
777 int inet_hash(struct sock *sk) in inet_hash() argument
779 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); in inet_hash()
783 if (sk->sk_state == TCP_CLOSE) in inet_hash()
786 if (sk->sk_state != TCP_LISTEN) { in inet_hash()
788 inet_ehash_nolisten(sk, NULL, NULL); in inet_hash()
792 WARN_ON(!sk_unhashed(sk)); in inet_hash()
793 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); in inet_hash()
796 if (sk->sk_reuseport) { in inet_hash()
797 err = inet_reuseport_add_sock(sk, ilb2); in inet_hash()
801 sock_set_flag(sk, SOCK_RCU_FREE); in inet_hash()
802 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in inet_hash()
803 sk->sk_family == AF_INET6) in inet_hash()
804 __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head); in inet_hash()
806 __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head); in inet_hash()
807 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in inet_hash()
815 void inet_unhash(struct sock *sk) in inet_unhash() argument
817 struct inet_hashinfo *hashinfo = tcp_get_hashinfo(sk); in inet_unhash()
819 if (sk_unhashed(sk)) in inet_unhash()
822 sock_rps_delete_flow(sk); in inet_unhash()
823 if (sk->sk_state == TCP_LISTEN) { in inet_unhash()
826 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); in inet_unhash()
831 if (rcu_access_pointer(sk->sk_reuseport_cb)) in inet_unhash()
832 reuseport_stop_listen_sock(sk); in inet_unhash()
834 __sk_nulls_del_node_init_rcu(sk); in inet_unhash()
835 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in inet_unhash()
838 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); in inet_unhash()
841 __sk_nulls_del_node_init_rcu(sk); in inet_unhash()
842 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in inet_unhash()
850 int l3mdev, const struct sock *sk) in inet_bind2_bucket_match() argument
856 return inet_bind2_bucket_addr_match(tb, sk); in inet_bind2_bucket_match()
860 unsigned short port, int l3mdev, const struct sock *sk) in inet_bind2_bucket_match_addr_any() argument
873 if (sk->sk_family == AF_INET6 && in inet_bind2_bucket_match_addr_any()
874 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) in inet_bind2_bucket_match_addr_any()
883 unsigned short port, int l3mdev, const struct sock *sk) in inet_bind2_bucket_find() argument
888 if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk)) in inet_bind2_bucket_find()
895 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port) in inet_bhash2_addr_any_hashbucket() argument
897 struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk); in inet_bhash2_addr_any_hashbucket()
901 if (sk->sk_family == AF_INET6) in inet_bhash2_addr_any_hashbucket()
910 static void inet_update_saddr(struct sock *sk, void *saddr, int family) in inet_update_saddr() argument
913 inet_sk(sk)->inet_saddr = *(__be32 *)saddr; in inet_update_saddr()
914 sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr); in inet_update_saddr()
918 sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr; in inet_update_saddr()
923 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset) in __inet_bhash2_update_saddr() argument
925 struct inet_hashinfo *hinfo = tcp_get_hashinfo(sk); in __inet_bhash2_update_saddr()
928 int l3mdev = inet_sk_bound_l3mdev(sk); in __inet_bhash2_update_saddr()
929 int port = inet_sk(sk)->inet_num; in __inet_bhash2_update_saddr()
930 struct net *net = sock_net(sk); in __inet_bhash2_update_saddr()
933 if (!inet_csk(sk)->icsk_bind2_hash) { in __inet_bhash2_update_saddr()
936 inet_reset_saddr(sk); in __inet_bhash2_update_saddr()
938 inet_update_saddr(sk, saddr, family); in __inet_bhash2_update_saddr()
955 inet_put_port(sk); in __inet_bhash2_update_saddr()
956 inet_reset_saddr(sk); in __inet_bhash2_update_saddr()
964 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); in __inet_bhash2_update_saddr()
972 __sk_del_bind_node(sk); in __inet_bhash2_update_saddr()
973 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash); in __inet_bhash2_update_saddr()
977 inet_reset_saddr(sk); in __inet_bhash2_update_saddr()
979 inet_update_saddr(sk, saddr, family); in __inet_bhash2_update_saddr()
981 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); in __inet_bhash2_update_saddr()
984 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); in __inet_bhash2_update_saddr()
987 inet_bind2_bucket_init(tb2, net, head2, inet_csk(sk)->icsk_bind_hash, sk); in __inet_bhash2_update_saddr()
988 if (sk_is_connect_bind(sk)) { in __inet_bhash2_update_saddr()
993 inet_csk(sk)->icsk_bind2_hash = tb2; in __inet_bhash2_update_saddr()
994 sk_add_bind_node(sk, &tb2->owners); in __inet_bhash2_update_saddr()
1005 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family) in inet_bhash2_update_saddr() argument
1007 return __inet_bhash2_update_saddr(sk, saddr, family, false); in inet_bhash2_update_saddr()
1011 void inet_bhash2_reset_saddr(struct sock *sk) in inet_bhash2_reset_saddr() argument
1013 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in inet_bhash2_reset_saddr()
1014 __inet_bhash2_update_saddr(sk, NULL, 0, true); in inet_bhash2_reset_saddr()
1032 struct sock *sk, u64 port_offset, in __inet_hash_connect() argument
1041 int port = inet_sk(sk)->inet_num; in __inet_hash_connect()
1042 struct net *net = sock_net(sk); in __inet_hash_connect()
1054 ret = check_established(death_row, sk, port, NULL, false, in __inet_hash_connect()
1060 l3mdev = inet_sk_bound_l3mdev(sk); in __inet_hash_connect()
1062 local_ports = inet_sk_get_local_port_range(sk, &low, &high); in __inet_hash_connect()
1099 if (!check_established(death_row, sk, port, &tw, true, in __inet_hash_connect()
1118 if (!check_established(death_row, sk, in __inet_hash_connect()
1153 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); in __inet_hash_connect()
1156 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); in __inet_hash_connect()
1159 head2, tb, sk); in __inet_hash_connect()
1175 inet_bind_hash(sk, tb, tb2, port); in __inet_hash_connect()
1176 sk->sk_userlocks |= SOCK_CONNECT_BIND; in __inet_hash_connect()
1178 if (sk_unhashed(sk)) { in __inet_hash_connect()
1179 inet_sk(sk)->inet_sport = htons(port); in __inet_hash_connect()
1180 inet_ehash_nolisten(sk, (struct sock *)tw, NULL); in __inet_hash_connect()
1194 if (sk_hashed(sk)) { in __inet_hash_connect()
1195 spinlock_t *lock = inet_ehash_lockp(hinfo, sk->sk_hash); in __inet_hash_connect()
1197 sock_prot_inuse_add(net, sk->sk_prot, -1); in __inet_hash_connect()
1200 __sk_nulls_del_node_init_rcu(sk); in __inet_hash_connect()
1203 sk->sk_hash = 0; in __inet_hash_connect()
1204 inet_sk(sk)->inet_sport = 0; in __inet_hash_connect()
1205 inet_sk(sk)->inet_num = 0; in __inet_hash_connect()
1228 struct sock *sk) in inet_hash_connect() argument
1230 const struct inet_sock *inet = inet_sk(sk); in inet_hash_connect()
1231 const struct net *net = sock_net(sk); in inet_hash_connect()
1235 if (!inet_sk(sk)->inet_num) in inet_hash_connect()
1236 port_offset = inet_sk_port_offset(sk); in inet_hash_connect()
1241 return __inet_hash_connect(death_row, sk, port_offset, hash_port0, in inet_hash_connect()