1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PF_INET protocol family socket handler.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Florian La Roche, <flla@stud.uni-sb.de>
12 * Alan Cox, <A.Cox@swansea.ac.uk>
13 *
14 * Changes (see also sock.c)
15 *
16 * piggy,
17 * Karl Knutson : Socket protocol table
18 * A.N.Kuznetsov : Socket death error in accept().
19 * John Richardson : Fix non blocking error in connect()
20 * so sockets that fail to connect
21 * don't return -EINPROGRESS.
22 * Alan Cox : Asynchronous I/O support
23 * Alan Cox : Keep correct socket pointer on sock
24 * structures
25 * when accept() ed
26 * Alan Cox : Semantics of SO_LINGER aren't state
27 * moved to close when you look carefully.
28 * With this fixed and the accept bug fixed
29 * some RPC stuff seems happier.
30 * Niibe Yutaka : 4.4BSD style write async I/O
31 * Alan Cox,
32 * Tony Gale : Fixed reuse semantics.
33 * Alan Cox : bind() shouldn't abort existing but dead
34 * sockets. Stops FTP netin:.. I hope.
35 * Alan Cox : bind() works correctly for RAW sockets.
36 * Note that FreeBSD at least was broken
37 * in this respect so be careful with
38 * compatibility tests...
39 * Alan Cox : routing cache support
40 * Alan Cox : memzero the socket structure for
41 * compactness.
42 * Matt Day : nonblock connect error handler
43 * Alan Cox : Allow large numbers of pending sockets
44 * (eg for big web sites), but only if
45 * specifically application requested.
46 * Alan Cox : New buffering throughout IP. Used
47 * dumbly.
48 * Alan Cox : New buffering now used smartly.
49 * Alan Cox : BSD rather than common sense
50 * interpretation of listen.
51 * Germano Caronni : Assorted small races.
52 * Alan Cox : sendmsg/recvmsg basic support.
53 * Alan Cox : Only sendmsg/recvmsg now supported.
54 * Alan Cox : Locked down bind (see security list).
55 * Alan Cox : Loosened bind a little.
56 * Mike McLagan : ADD/DEL DLCI Ioctls
57 * Willy Konynenberg : Transparent proxying support.
58 * David S. Miller : New socket lookup architecture.
59 * Some other random speedups.
60 * Cyrus Durgin : Cleaned up file for kmod hacks.
61 * Andi Kleen : Fix inet_stream_connect TCP race.
62 */
63
64 #define pr_fmt(fmt) "IPv4: " fmt
65
66 #include <linux/err.h>
67 #include <linux/errno.h>
68 #include <linux/types.h>
69 #include <linux/socket.h>
70 #include <linux/in.h>
71 #include <linux/kernel.h>
72 #include <linux/kmod.h>
73 #include <linux/sched.h>
74 #include <linux/timer.h>
75 #include <linux/string.h>
76 #include <linux/sockios.h>
77 #include <linux/net.h>
78 #include <linux/capability.h>
79 #include <linux/fcntl.h>
80 #include <linux/mm.h>
81 #include <linux/interrupt.h>
82 #include <linux/stat.h>
83 #include <linux/init.h>
84 #include <linux/poll.h>
85 #include <linux/netfilter_ipv4.h>
86 #include <linux/random.h>
87 #include <linux/slab.h>
88
89 #include <linux/uaccess.h>
90
91 #include <linux/inet.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
94 #include <linux/netdevice.h>
95 #include <net/checksum.h>
96 #include <net/ip.h>
97 #include <net/protocol.h>
98 #include <net/arp.h>
99 #include <net/route.h>
100 #include <net/ip_fib.h>
101 #include <net/inet_connection_sock.h>
102 #include <net/gro.h>
103 #include <net/gso.h>
104 #include <net/tcp.h>
105 #include <net/psp.h>
106 #include <net/udp.h>
107 #include <net/ping.h>
108 #include <linux/skbuff.h>
109 #include <net/sock.h>
110 #include <net/raw.h>
111 #include <net/icmp.h>
112 #include <net/inet_common.h>
113 #include <net/ip_tunnels.h>
114 #include <net/xfrm.h>
115 #include <net/net_namespace.h>
116 #include <net/secure_seq.h>
117 #ifdef CONFIG_IP_MROUTE
118 #include <linux/mroute.h>
119 #endif
120 #include <net/l3mdev.h>
121 #include <net/compat.h>
122 #include <net/rps.h>
123
124 #include <trace/events/sock.h>
125
126 /* Keep the definition of IPv6 disable here for now, to avoid annoying linker
127 * issues in case IPv6=m
128 */
129 int disable_ipv6_mod;
130 EXPORT_SYMBOL(disable_ipv6_mod);
131
132 /* The inetsw table contains everything that inet_create needs to
133 * build a new socket.
134 */
135 static struct list_head inetsw[SOCK_MAX];
136 static DEFINE_SPINLOCK(inetsw_lock);
137
138 /* New destruction routine */
139
inet_sock_destruct(struct sock * sk)140 void inet_sock_destruct(struct sock *sk)
141 {
142 struct inet_sock *inet = inet_sk(sk);
143
144 __skb_queue_purge(&sk->sk_receive_queue);
145 __skb_queue_purge(&sk->sk_error_queue);
146
147 sk_mem_reclaim_final(sk);
148
149 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
150 pr_err("Attempt to release TCP socket in state %d %p\n",
151 sk->sk_state, sk);
152 return;
153 }
154 if (!sock_flag(sk, SOCK_DEAD)) {
155 pr_err("Attempt to release alive inet socket %p\n", sk);
156 return;
157 }
158
159 WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
160 WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
161 WARN_ON_ONCE(sk->sk_wmem_queued);
162 WARN_ON_ONCE(sk->sk_forward_alloc);
163
164 kfree(rcu_dereference_protected(inet->inet_opt, 1));
165 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
166 dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
167 psp_sk_assoc_free(sk);
168 }
169 EXPORT_SYMBOL(inet_sock_destruct);
170
171 /*
172 * The routines beyond this point handle the behaviour of an AF_INET
173 * socket object. Mostly it punts to the subprotocols of IP to do
174 * the work.
175 */
176
177 /*
178 * Automatically bind an unbound socket.
179 */
180
inet_autobind(struct sock * sk)181 static int inet_autobind(struct sock *sk)
182 {
183 struct inet_sock *inet;
184 /* We may need to bind the socket. */
185 lock_sock(sk);
186 inet = inet_sk(sk);
187 if (!inet->inet_num) {
188 if (sk->sk_prot->get_port(sk, 0)) {
189 release_sock(sk);
190 return -EAGAIN;
191 }
192 inet->inet_sport = htons(inet->inet_num);
193 }
194 release_sock(sk);
195 return 0;
196 }
197
__inet_listen_sk(struct sock * sk,int backlog)198 int __inet_listen_sk(struct sock *sk, int backlog)
199 {
200 unsigned char old_state = sk->sk_state;
201 int err, tcp_fastopen;
202
203 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
204 return -EINVAL;
205
206 WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
207 /* Really, if the socket is already in listen state
208 * we can only allow the backlog to be adjusted.
209 */
210 if (old_state != TCP_LISTEN) {
211 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
212 * Note that only TCP sockets (SOCK_STREAM) will reach here.
213 * Also fastopen backlog may already been set via the option
214 * because the socket was in TCP_LISTEN state previously but
215 * was shutdown() rather than close().
216 */
217 tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
218 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
219 (tcp_fastopen & TFO_SERVER_ENABLE) &&
220 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
221 fastopen_queue_tune(sk, backlog);
222 tcp_fastopen_init_key_once(sock_net(sk));
223 }
224
225 err = inet_csk_listen_start(sk);
226 if (err)
227 return err;
228
229 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
230 }
231 return 0;
232 }
233
234 /*
235 * Move a socket into listening state.
236 */
inet_listen(struct socket * sock,int backlog)237 int inet_listen(struct socket *sock, int backlog)
238 {
239 struct sock *sk = sock->sk;
240 int err = -EINVAL;
241
242 lock_sock(sk);
243
244 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
245 goto out;
246
247 err = __inet_listen_sk(sk, backlog);
248
249 out:
250 release_sock(sk);
251 return err;
252 }
253 EXPORT_SYMBOL(inet_listen);
254
255 /*
256 * Create an inet socket.
257 */
258
inet_create(struct net * net,struct socket * sock,int protocol,int kern)259 static int inet_create(struct net *net, struct socket *sock, int protocol,
260 int kern)
261 {
262 struct sock *sk;
263 struct inet_protosw *answer;
264 struct inet_sock *inet;
265 struct proto *answer_prot;
266 unsigned char answer_flags;
267 int try_loading_module = 0;
268 int err;
269
270 if (protocol < 0 || protocol >= IPPROTO_MAX)
271 return -EINVAL;
272
273 sock->state = SS_UNCONNECTED;
274
275 /* Look for the requested type/protocol pair. */
276 lookup_protocol:
277 err = -ESOCKTNOSUPPORT;
278 rcu_read_lock();
279 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
280
281 err = 0;
282 /* Check the non-wild match. */
283 if (protocol == answer->protocol) {
284 if (protocol != IPPROTO_IP)
285 break;
286 } else {
287 /* Check for the two wild cases. */
288 if (IPPROTO_IP == protocol) {
289 protocol = answer->protocol;
290 break;
291 }
292 if (IPPROTO_IP == answer->protocol)
293 break;
294 }
295 err = -EPROTONOSUPPORT;
296 }
297
298 if (unlikely(err)) {
299 if (try_loading_module < 2) {
300 rcu_read_unlock();
301 /*
302 * Be more specific, e.g. net-pf-2-proto-132-type-1
303 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
304 */
305 if (++try_loading_module == 1)
306 request_module("net-pf-%d-proto-%d-type-%d",
307 PF_INET, protocol, sock->type);
308 /*
309 * Fall back to generic, e.g. net-pf-2-proto-132
310 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
311 */
312 else
313 request_module("net-pf-%d-proto-%d",
314 PF_INET, protocol);
315 goto lookup_protocol;
316 } else
317 goto out_rcu_unlock;
318 }
319
320 err = -EPERM;
321 if (sock->type == SOCK_RAW && !kern &&
322 !ns_capable(net->user_ns, CAP_NET_RAW))
323 goto out_rcu_unlock;
324
325 sock->ops = answer->ops;
326 answer_prot = answer->prot;
327 answer_flags = answer->flags;
328 rcu_read_unlock();
329
330 WARN_ON(!answer_prot->slab);
331
332 err = -ENOMEM;
333 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
334 if (!sk)
335 goto out;
336
337 err = 0;
338 if (INET_PROTOSW_REUSE & answer_flags)
339 sk->sk_reuse = SK_CAN_REUSE;
340
341 if (INET_PROTOSW_ICSK & answer_flags)
342 inet_init_csk_locks(sk);
343
344 inet = inet_sk(sk);
345 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
346
347 inet_clear_bit(NODEFRAG, sk);
348
349 if (SOCK_RAW == sock->type) {
350 inet->inet_num = protocol;
351 if (IPPROTO_RAW == protocol)
352 inet_set_bit(HDRINCL, sk);
353 }
354
355 if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
356 inet->pmtudisc = IP_PMTUDISC_DONT;
357 else
358 inet->pmtudisc = IP_PMTUDISC_WANT;
359
360 atomic_set(&inet->inet_id, 0);
361
362 sock_init_data(sock, sk);
363
364 sk->sk_destruct = inet_sock_destruct;
365 sk->sk_protocol = protocol;
366 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
367 sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
368
369 inet->uc_ttl = -1;
370 inet_set_bit(MC_LOOP, sk);
371 inet->mc_ttl = 1;
372 inet_set_bit(MC_ALL, sk);
373 inet->mc_index = 0;
374 inet->mc_list = NULL;
375 inet->rcv_tos = 0;
376
377 if (inet->inet_num) {
378 /* It assumes that any protocol which allows
379 * the user to assign a number at socket
380 * creation time automatically
381 * shares.
382 */
383 inet->inet_sport = htons(inet->inet_num);
384 /* Add to protocol hash chains. */
385 err = sk->sk_prot->hash(sk);
386 if (err)
387 goto out_sk_release;
388 }
389
390 if (sk->sk_prot->init) {
391 err = sk->sk_prot->init(sk);
392 if (err)
393 goto out_sk_release;
394 }
395
396 if (!kern) {
397 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
398 if (err)
399 goto out_sk_release;
400 }
401 out:
402 return err;
403 out_rcu_unlock:
404 rcu_read_unlock();
405 goto out;
406 out_sk_release:
407 sk_common_release(sk);
408 sock->sk = NULL;
409 goto out;
410 }
411
412
413 /*
414 * The peer socket should always be NULL (or else). When we call this
415 * function we are destroying the object and from then on nobody
416 * should refer to it.
417 */
inet_release(struct socket * sock)418 int inet_release(struct socket *sock)
419 {
420 struct sock *sk = sock->sk;
421
422 if (sk) {
423 long timeout;
424
425 if (!sk->sk_kern_sock)
426 BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
427
428 /* Applications forget to leave groups before exiting */
429 ip_mc_drop_socket(sk);
430
431 /* If linger is set, we don't return until the close
432 * is complete. Otherwise we return immediately. The
433 * actually closing is done the same either way.
434 *
435 * If the close is due to the process exiting, we never
436 * linger..
437 */
438 timeout = 0;
439 if (sock_flag(sk, SOCK_LINGER) &&
440 !(current->flags & PF_EXITING))
441 timeout = sk->sk_lingertime;
442 sk->sk_prot->close(sk, timeout);
443 sock->sk = NULL;
444 }
445 return 0;
446 }
447 EXPORT_SYMBOL(inet_release);
448
inet_bind_sk(struct sock * sk,struct sockaddr_unsized * uaddr,int addr_len)449 int inet_bind_sk(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len)
450 {
451 u32 flags = BIND_WITH_LOCK;
452 int err;
453
454 /* If the socket has its own bind function then use it. (RAW) */
455 if (sk->sk_prot->bind) {
456 return sk->sk_prot->bind(sk, uaddr, addr_len);
457 }
458 if (addr_len < sizeof(struct sockaddr_in))
459 return -EINVAL;
460
461 /* BPF prog is run before any checks are done so that if the prog
462 * changes context in a wrong way it will be caught.
463 */
464 err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
465 CGROUP_INET4_BIND, &flags);
466 if (err)
467 return err;
468
469 return __inet_bind(sk, uaddr, addr_len, flags);
470 }
471
inet_bind(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len)472 int inet_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len)
473 {
474 return inet_bind_sk(sock->sk, uaddr, addr_len);
475 }
476 EXPORT_SYMBOL(inet_bind);
477
__inet_bind(struct sock * sk,struct sockaddr_unsized * uaddr,int addr_len,u32 flags)478 int __inet_bind(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len,
479 u32 flags)
480 {
481 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
482 struct inet_sock *inet = inet_sk(sk);
483 struct net *net = sock_net(sk);
484 unsigned short snum;
485 int chk_addr_ret;
486 u32 tb_id = RT_TABLE_LOCAL;
487 int err;
488
489 if (addr->sin_family != AF_INET) {
490 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
491 * only if s_addr is INADDR_ANY.
492 */
493 err = -EAFNOSUPPORT;
494 if (addr->sin_family != AF_UNSPEC ||
495 addr->sin_addr.s_addr != htonl(INADDR_ANY))
496 goto out;
497 }
498
499 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
500 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
501
502 /* Not specified by any standard per-se, however it breaks too
503 * many applications when removed. It is unfortunate since
504 * allowing applications to make a non-local bind solves
505 * several problems with systems using dynamic addressing.
506 * (ie. your servers still start up even if your ISDN link
507 * is temporarily down)
508 */
509 err = -EADDRNOTAVAIL;
510 if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
511 chk_addr_ret))
512 goto out;
513
514 snum = ntohs(addr->sin_port);
515 err = -EACCES;
516 if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
517 snum && inet_port_requires_bind_service(net, snum) &&
518 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
519 goto out;
520
521 /* We keep a pair of addresses. rcv_saddr is the one
522 * used by hash lookups, and saddr is used for transmit.
523 *
524 * In the BSD API these are the same except where it
525 * would be illegal to use them (multicast/broadcast) in
526 * which case the sending device address is used.
527 */
528 if (flags & BIND_WITH_LOCK)
529 lock_sock(sk);
530
531 /* Check these errors (active socket, double bind). */
532 err = -EINVAL;
533 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
534 goto out_release_sock;
535
536 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
537 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
538 inet->inet_saddr = 0; /* Use device */
539
540 /* Make sure we are allowed to bind here. */
541 if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
542 (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
543 err = sk->sk_prot->get_port(sk, snum);
544 if (err) {
545 inet->inet_saddr = inet->inet_rcv_saddr = 0;
546 goto out_release_sock;
547 }
548 if (!(flags & BIND_FROM_BPF)) {
549 err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
550 if (err) {
551 inet->inet_saddr = inet->inet_rcv_saddr = 0;
552 if (sk->sk_prot->put_port)
553 sk->sk_prot->put_port(sk);
554 goto out_release_sock;
555 }
556 }
557 }
558
559 if (inet->inet_rcv_saddr)
560 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
561 if (snum)
562 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
563 inet->inet_sport = htons(inet->inet_num);
564 inet->inet_daddr = 0;
565 inet->inet_dport = 0;
566 sk_dst_reset(sk);
567 err = 0;
568 out_release_sock:
569 if (flags & BIND_WITH_LOCK)
570 release_sock(sk);
571 out:
572 return err;
573 }
574
inet_dgram_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags)575 int inet_dgram_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
576 int addr_len, int flags)
577 {
578 struct sock *sk = sock->sk;
579 const struct proto *prot;
580 int err;
581
582 if (addr_len < sizeof(uaddr->sa_family))
583 return -EINVAL;
584
585 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
586 prot = READ_ONCE(sk->sk_prot);
587
588 if (uaddr->sa_family == AF_UNSPEC)
589 return prot->disconnect(sk, flags);
590
591 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
592 err = prot->pre_connect(sk, uaddr, addr_len);
593 if (err)
594 return err;
595 }
596
597 if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
598 return -EAGAIN;
599 return prot->connect(sk, uaddr, addr_len);
600 }
601 EXPORT_SYMBOL(inet_dgram_connect);
602
inet_wait_for_connect(struct sock * sk,long timeo,int writebias)603 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
604 {
605 DEFINE_WAIT_FUNC(wait, woken_wake_function);
606
607 add_wait_queue(sk_sleep(sk), &wait);
608 sk->sk_write_pending += writebias;
609
610 /* Basic assumption: if someone sets sk->sk_err, he _must_
611 * change state of the socket from TCP_SYN_*.
612 * Connect() does not allow to get error notifications
613 * without closing the socket.
614 */
615 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
616 release_sock(sk);
617 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
618 lock_sock(sk);
619 if (signal_pending(current) || !timeo)
620 break;
621 }
622 remove_wait_queue(sk_sleep(sk), &wait);
623 sk->sk_write_pending -= writebias;
624 return timeo;
625 }
626
627 /*
628 * Connect to a remote host. There is regrettably still a little
629 * TCP 'magic' in here.
630 */
__inet_stream_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags,int is_sendmsg)631 int __inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
632 int addr_len, int flags, int is_sendmsg)
633 {
634 struct sock *sk = sock->sk;
635 int err;
636 long timeo;
637
638 /*
639 * uaddr can be NULL and addr_len can be 0 if:
640 * sk is a TCP fastopen active socket and
641 * TCP_FASTOPEN_CONNECT sockopt is set and
642 * we already have a valid cookie for this socket.
643 * In this case, user can call write() after connect().
644 * write() will invoke tcp_sendmsg_fastopen() which calls
645 * __inet_stream_connect().
646 */
647 if (uaddr) {
648 if (addr_len < sizeof(uaddr->sa_family))
649 return -EINVAL;
650
651 if (uaddr->sa_family == AF_UNSPEC) {
652 sk->sk_disconnects++;
653 err = sk->sk_prot->disconnect(sk, flags);
654 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
655 goto out;
656 }
657 }
658
659 switch (sock->state) {
660 default:
661 err = -EINVAL;
662 goto out;
663 case SS_CONNECTED:
664 err = -EISCONN;
665 goto out;
666 case SS_CONNECTING:
667 if (inet_test_bit(DEFER_CONNECT, sk))
668 err = is_sendmsg ? -EINPROGRESS : -EISCONN;
669 else
670 err = -EALREADY;
671 /* Fall out of switch with err, set for this state */
672 break;
673 case SS_UNCONNECTED:
674 err = -EISCONN;
675 if (sk->sk_state != TCP_CLOSE)
676 goto out;
677
678 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
679 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
680 if (err)
681 goto out;
682 }
683
684 err = sk->sk_prot->connect(sk, uaddr, addr_len);
685 if (err < 0)
686 goto out;
687
688 sock->state = SS_CONNECTING;
689
690 if (!err && inet_test_bit(DEFER_CONNECT, sk))
691 goto out;
692
693 /* Just entered SS_CONNECTING state; the only
694 * difference is that return value in non-blocking
695 * case is EINPROGRESS, rather than EALREADY.
696 */
697 err = -EINPROGRESS;
698 break;
699 }
700
701 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
702
703 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
704 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
705 tcp_sk(sk)->fastopen_req &&
706 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
707 int dis = sk->sk_disconnects;
708
709 /* Error code is set above */
710 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
711 goto out;
712
713 err = sock_intr_errno(timeo);
714 if (signal_pending(current))
715 goto out;
716
717 if (dis != sk->sk_disconnects) {
718 err = -EPIPE;
719 goto out;
720 }
721 }
722
723 /* Connection was closed by RST, timeout, ICMP error
724 * or another process disconnected us.
725 */
726 if (sk->sk_state == TCP_CLOSE)
727 goto sock_error;
728
729 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
730 * and error was received after socket entered established state.
731 * Hence, it is handled normally after connect() return successfully.
732 */
733
734 sock->state = SS_CONNECTED;
735 err = 0;
736 out:
737 return err;
738
739 sock_error:
740 err = sock_error(sk) ? : -ECONNABORTED;
741 sock->state = SS_UNCONNECTED;
742 sk->sk_disconnects++;
743 if (sk->sk_prot->disconnect(sk, flags))
744 sock->state = SS_DISCONNECTING;
745 goto out;
746 }
747 EXPORT_SYMBOL(__inet_stream_connect);
748
inet_stream_connect(struct socket * sock,struct sockaddr_unsized * uaddr,int addr_len,int flags)749 int inet_stream_connect(struct socket *sock, struct sockaddr_unsized *uaddr,
750 int addr_len, int flags)
751 {
752 int err;
753
754 lock_sock(sock->sk);
755 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
756 release_sock(sock->sk);
757 return err;
758 }
759 EXPORT_SYMBOL(inet_stream_connect);
760
__inet_accept(struct socket * sock,struct socket * newsock,struct sock * newsk)761 void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
762 {
763 if (mem_cgroup_sockets_enabled) {
764 mem_cgroup_sk_alloc(newsk);
765 __sk_charge(newsk, GFP_KERNEL);
766 }
767
768 sock_rps_record_flow(newsk);
769 WARN_ON(!((1 << newsk->sk_state) &
770 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
771 TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
772 TCPF_CLOSING | TCPF_CLOSE_WAIT |
773 TCPF_CLOSE)));
774
775 if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
776 set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
777 sock_graft(newsk, newsock);
778
779 newsock->state = SS_CONNECTED;
780 }
781 EXPORT_SYMBOL_GPL(__inet_accept);
782
783 /*
784 * Accept a pending connection. The TCP layer now gives BSD semantics.
785 */
786
inet_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)787 int inet_accept(struct socket *sock, struct socket *newsock,
788 struct proto_accept_arg *arg)
789 {
790 struct sock *sk1 = sock->sk, *sk2;
791
792 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
793 arg->err = -EINVAL;
794 sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg);
795 if (!sk2)
796 return arg->err;
797
798 lock_sock(sk2);
799 __inet_accept(sock, newsock, sk2);
800 release_sock(sk2);
801 return 0;
802 }
803 EXPORT_SYMBOL(inet_accept);
804
805 /*
806 * This does both peername and sockname.
807 */
inet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)808 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
809 int peer)
810 {
811 struct sock *sk = sock->sk;
812 struct inet_sock *inet = inet_sk(sk);
813 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
814 int sin_addr_len = sizeof(*sin);
815
816 sin->sin_family = AF_INET;
817 lock_sock(sk);
818 if (peer) {
819 if (!inet->inet_dport ||
820 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
821 peer == 1)) {
822 release_sock(sk);
823 return -ENOTCONN;
824 }
825 sin->sin_port = inet->inet_dport;
826 sin->sin_addr.s_addr = inet->inet_daddr;
827 BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
828 CGROUP_INET4_GETPEERNAME);
829 } else {
830 __be32 addr = inet->inet_rcv_saddr;
831 if (!addr)
832 addr = inet->inet_saddr;
833 sin->sin_port = inet->inet_sport;
834 sin->sin_addr.s_addr = addr;
835 BPF_CGROUP_RUN_SA_PROG(sk, sin, &sin_addr_len,
836 CGROUP_INET4_GETSOCKNAME);
837 }
838 release_sock(sk);
839 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
840 return sin_addr_len;
841 }
842 EXPORT_SYMBOL(inet_getname);
843
inet_send_prepare(struct sock * sk)844 int inet_send_prepare(struct sock *sk)
845 {
846 sock_rps_record_flow(sk);
847
848 /* We may need to bind the socket. */
849 if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
850 inet_autobind(sk))
851 return -EAGAIN;
852
853 return 0;
854 }
855 EXPORT_SYMBOL_GPL(inet_send_prepare);
856
inet_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)857 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
858 {
859 struct sock *sk = sock->sk;
860 const struct proto *prot;
861
862 if (unlikely(inet_send_prepare(sk)))
863 return -EAGAIN;
864
865 prot = READ_ONCE(sk->sk_prot);
866 return INDIRECT_CALL_2(prot->sendmsg, tcp_sendmsg, udp_sendmsg,
867 sk, msg, size);
868 }
869 EXPORT_SYMBOL(inet_sendmsg);
870
inet_splice_eof(struct socket * sock)871 void inet_splice_eof(struct socket *sock)
872 {
873 const struct proto *prot;
874 struct sock *sk = sock->sk;
875
876 if (unlikely(inet_send_prepare(sk)))
877 return;
878
879 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
880 prot = READ_ONCE(sk->sk_prot);
881 if (prot->splice_eof)
882 prot->splice_eof(sock);
883 }
884 EXPORT_SYMBOL_GPL(inet_splice_eof);
885
inet_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)886 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
887 int flags)
888 {
889 struct sock *sk = sock->sk;
890 const struct proto *prot;
891
892 if (likely(!(flags & MSG_ERRQUEUE)))
893 sock_rps_record_flow(sk);
894
895 prot = READ_ONCE(sk->sk_prot);
896 return INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udp_recvmsg,
897 sk, msg, size, flags);
898 }
899 EXPORT_SYMBOL(inet_recvmsg);
900
inet_shutdown(struct socket * sock,int how)901 int inet_shutdown(struct socket *sock, int how)
902 {
903 struct sock *sk = sock->sk;
904 int err = 0;
905
906 /* This should really check to make sure
907 * the socket is a TCP socket. (WHY AC...)
908 */
909 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
910 1->2 bit 2 snds.
911 2->3 */
912 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
913 return -EINVAL;
914
915 lock_sock(sk);
916 if (sock->state == SS_CONNECTING) {
917 if ((1 << sk->sk_state) &
918 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
919 sock->state = SS_DISCONNECTING;
920 else
921 sock->state = SS_CONNECTED;
922 }
923
924 switch (sk->sk_state) {
925 case TCP_CLOSE:
926 err = -ENOTCONN;
927 /* Hack to wake up other listeners, who can poll for
928 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
929 fallthrough;
930 default:
931 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
932 if (sk->sk_prot->shutdown)
933 sk->sk_prot->shutdown(sk, how);
934 break;
935
936 /* Remaining two branches are temporary solution for missing
937 * close() in multithreaded environment. It is _not_ a good idea,
938 * but we have no choice until close() is repaired at VFS level.
939 */
940 case TCP_LISTEN:
941 if (!(how & RCV_SHUTDOWN))
942 break;
943 fallthrough;
944 case TCP_SYN_SENT:
945 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
946 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
947 break;
948 }
949
950 /* Wake up anyone sleeping in poll. */
951 sk->sk_state_change(sk);
952 release_sock(sk);
953 return err;
954 }
955 EXPORT_SYMBOL(inet_shutdown);
956
957 /*
958 * ioctl() calls you can issue on an INET socket. Most of these are
959 * device configuration and stuff and very rarely used. Some ioctls
960 * pass on to the socket itself.
961 *
962 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
963 * loads the devconfigure module does its configuring and unloads it.
964 * There's a good 20K of config code hanging around the kernel.
965 */
966
inet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)967 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
968 {
969 struct sock *sk = sock->sk;
970 int err = 0;
971 struct net *net = sock_net(sk);
972 void __user *p = (void __user *)arg;
973 struct ifreq ifr;
974 struct rtentry rt;
975
976 switch (cmd) {
977 case SIOCADDRT:
978 case SIOCDELRT:
979 if (copy_from_user(&rt, p, sizeof(struct rtentry)))
980 return -EFAULT;
981 err = ip_rt_ioctl(net, cmd, &rt);
982 break;
983 case SIOCRTMSG:
984 err = -EINVAL;
985 break;
986 case SIOCDARP:
987 case SIOCGARP:
988 case SIOCSARP:
989 err = arp_ioctl(net, cmd, (void __user *)arg);
990 break;
991 case SIOCGIFADDR:
992 case SIOCGIFBRDADDR:
993 case SIOCGIFNETMASK:
994 case SIOCGIFDSTADDR:
995 case SIOCGIFPFLAGS:
996 if (get_user_ifreq(&ifr, NULL, p))
997 return -EFAULT;
998 err = devinet_ioctl(net, cmd, &ifr);
999 if (!err && put_user_ifreq(&ifr, p))
1000 err = -EFAULT;
1001 break;
1002
1003 case SIOCSIFADDR:
1004 case SIOCSIFBRDADDR:
1005 case SIOCSIFNETMASK:
1006 case SIOCSIFDSTADDR:
1007 case SIOCSIFPFLAGS:
1008 case SIOCSIFFLAGS:
1009 if (get_user_ifreq(&ifr, NULL, p))
1010 return -EFAULT;
1011 err = devinet_ioctl(net, cmd, &ifr);
1012 break;
1013 default:
1014 if (sk->sk_prot->ioctl)
1015 err = sk_ioctl(sk, cmd, (void __user *)arg);
1016 else
1017 err = -ENOIOCTLCMD;
1018 break;
1019 }
1020 return err;
1021 }
1022 EXPORT_SYMBOL(inet_ioctl);
1023
1024 #ifdef CONFIG_COMPAT
inet_compat_routing_ioctl(struct sock * sk,unsigned int cmd,struct compat_rtentry __user * ur)1025 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
1026 struct compat_rtentry __user *ur)
1027 {
1028 compat_uptr_t rtdev;
1029 struct rtentry rt;
1030
1031 if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1032 3 * sizeof(struct sockaddr)) ||
1033 get_user(rt.rt_flags, &ur->rt_flags) ||
1034 get_user(rt.rt_metric, &ur->rt_metric) ||
1035 get_user(rt.rt_mtu, &ur->rt_mtu) ||
1036 get_user(rt.rt_window, &ur->rt_window) ||
1037 get_user(rt.rt_irtt, &ur->rt_irtt) ||
1038 get_user(rtdev, &ur->rt_dev))
1039 return -EFAULT;
1040
1041 rt.rt_dev = compat_ptr(rtdev);
1042 return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1043 }
1044
inet_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1045 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1046 {
1047 void __user *argp = compat_ptr(arg);
1048 struct sock *sk = sock->sk;
1049
1050 switch (cmd) {
1051 case SIOCADDRT:
1052 case SIOCDELRT:
1053 return inet_compat_routing_ioctl(sk, cmd, argp);
1054 default:
1055 if (!sk->sk_prot->compat_ioctl)
1056 return -ENOIOCTLCMD;
1057 return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1058 }
1059 }
1060 #endif /* CONFIG_COMPAT */
1061
1062 const struct proto_ops inet_stream_ops = {
1063 .family = PF_INET,
1064 .owner = THIS_MODULE,
1065 .release = inet_release,
1066 .bind = inet_bind,
1067 .connect = inet_stream_connect,
1068 .socketpair = sock_no_socketpair,
1069 .accept = inet_accept,
1070 .getname = inet_getname,
1071 .poll = tcp_poll,
1072 .ioctl = inet_ioctl,
1073 .gettstamp = sock_gettstamp,
1074 .listen = inet_listen,
1075 .shutdown = inet_shutdown,
1076 .setsockopt = sock_common_setsockopt,
1077 .getsockopt = sock_common_getsockopt,
1078 .sendmsg = inet_sendmsg,
1079 .recvmsg = inet_recvmsg,
1080 #ifdef CONFIG_MMU
1081 .mmap = tcp_mmap,
1082 #endif
1083 .splice_eof = inet_splice_eof,
1084 .splice_read = tcp_splice_read,
1085 .set_peek_off = sk_set_peek_off,
1086 .read_sock = tcp_read_sock,
1087 .read_skb = tcp_read_skb,
1088 .sendmsg_locked = tcp_sendmsg_locked,
1089 .peek_len = tcp_peek_len,
1090 #ifdef CONFIG_COMPAT
1091 .compat_ioctl = inet_compat_ioctl,
1092 #endif
1093 .set_rcvlowat = tcp_set_rcvlowat,
1094 .set_rcvbuf = tcp_set_rcvbuf,
1095 };
1096 EXPORT_SYMBOL(inet_stream_ops);
1097
1098 const struct proto_ops inet_dgram_ops = {
1099 .family = PF_INET,
1100 .owner = THIS_MODULE,
1101 .release = inet_release,
1102 .bind = inet_bind,
1103 .connect = inet_dgram_connect,
1104 .socketpair = sock_no_socketpair,
1105 .accept = sock_no_accept,
1106 .getname = inet_getname,
1107 .poll = udp_poll,
1108 .ioctl = inet_ioctl,
1109 .gettstamp = sock_gettstamp,
1110 .listen = sock_no_listen,
1111 .shutdown = inet_shutdown,
1112 .setsockopt = sock_common_setsockopt,
1113 .getsockopt = sock_common_getsockopt,
1114 .sendmsg = inet_sendmsg,
1115 .read_skb = udp_read_skb,
1116 .recvmsg = inet_recvmsg,
1117 .mmap = sock_no_mmap,
1118 .splice_eof = inet_splice_eof,
1119 .set_peek_off = udp_set_peek_off,
1120 #ifdef CONFIG_COMPAT
1121 .compat_ioctl = inet_compat_ioctl,
1122 #endif
1123 };
1124 EXPORT_SYMBOL(inet_dgram_ops);
1125
1126 /*
1127 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1128 * udp_poll
1129 */
1130 static const struct proto_ops inet_sockraw_ops = {
1131 .family = PF_INET,
1132 .owner = THIS_MODULE,
1133 .release = inet_release,
1134 .bind = inet_bind,
1135 .connect = inet_dgram_connect,
1136 .socketpair = sock_no_socketpair,
1137 .accept = sock_no_accept,
1138 .getname = inet_getname,
1139 .poll = datagram_poll,
1140 .ioctl = inet_ioctl,
1141 .gettstamp = sock_gettstamp,
1142 .listen = sock_no_listen,
1143 .shutdown = inet_shutdown,
1144 .setsockopt = sock_common_setsockopt,
1145 .getsockopt = sock_common_getsockopt,
1146 .sendmsg = inet_sendmsg,
1147 .recvmsg = inet_recvmsg,
1148 .mmap = sock_no_mmap,
1149 .splice_eof = inet_splice_eof,
1150 #ifdef CONFIG_COMPAT
1151 .compat_ioctl = inet_compat_ioctl,
1152 #endif
1153 };
1154
1155 static const struct net_proto_family inet_family_ops = {
1156 .family = PF_INET,
1157 .create = inet_create,
1158 .owner = THIS_MODULE,
1159 };
1160
1161 /* Upon startup we insert all the elements in inetsw_array[] into
1162 * the linked list inetsw.
1163 */
1164 static struct inet_protosw inetsw_array[] =
1165 {
1166 {
1167 .type = SOCK_STREAM,
1168 .protocol = IPPROTO_TCP,
1169 .prot = &tcp_prot,
1170 .ops = &inet_stream_ops,
1171 .flags = INET_PROTOSW_PERMANENT |
1172 INET_PROTOSW_ICSK,
1173 },
1174
1175 {
1176 .type = SOCK_DGRAM,
1177 .protocol = IPPROTO_UDP,
1178 .prot = &udp_prot,
1179 .ops = &inet_dgram_ops,
1180 .flags = INET_PROTOSW_PERMANENT,
1181 },
1182
1183 {
1184 .type = SOCK_DGRAM,
1185 .protocol = IPPROTO_ICMP,
1186 .prot = &ping_prot,
1187 .ops = &inet_sockraw_ops,
1188 .flags = INET_PROTOSW_REUSE,
1189 },
1190
1191 {
1192 .type = SOCK_RAW,
1193 .protocol = IPPROTO_IP, /* wild card */
1194 .prot = &raw_prot,
1195 .ops = &inet_sockraw_ops,
1196 .flags = INET_PROTOSW_REUSE,
1197 }
1198 };
1199
1200 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1201
inet_register_protosw(struct inet_protosw * p)1202 void inet_register_protosw(struct inet_protosw *p)
1203 {
1204 struct list_head *lh;
1205 struct inet_protosw *answer;
1206 int protocol = p->protocol;
1207 struct list_head *last_perm;
1208
1209 spin_lock_bh(&inetsw_lock);
1210
1211 if (p->type >= SOCK_MAX)
1212 goto out_illegal;
1213
1214 /* If we are trying to override a permanent protocol, bail. */
1215 last_perm = &inetsw[p->type];
1216 list_for_each(lh, &inetsw[p->type]) {
1217 answer = list_entry(lh, struct inet_protosw, list);
1218 /* Check only the non-wild match. */
1219 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1220 break;
1221 if (protocol == answer->protocol)
1222 goto out_permanent;
1223 last_perm = lh;
1224 }
1225
1226 /* Add the new entry after the last permanent entry if any, so that
1227 * the new entry does not override a permanent entry when matched with
1228 * a wild-card protocol. But it is allowed to override any existing
1229 * non-permanent entry. This means that when we remove this entry, the
1230 * system automatically returns to the old behavior.
1231 */
1232 list_add_rcu(&p->list, last_perm);
1233 out:
1234 spin_unlock_bh(&inetsw_lock);
1235
1236 return;
1237
1238 out_permanent:
1239 pr_err("Attempt to override permanent protocol %d\n", protocol);
1240 goto out;
1241
1242 out_illegal:
1243 pr_err("Ignoring attempt to register invalid socket type %d\n",
1244 p->type);
1245 goto out;
1246 }
1247 EXPORT_SYMBOL(inet_register_protosw);
1248
inet_unregister_protosw(struct inet_protosw * p)1249 void inet_unregister_protosw(struct inet_protosw *p)
1250 {
1251 if (INET_PROTOSW_PERMANENT & p->flags) {
1252 pr_err("Attempt to unregister permanent protocol %d\n",
1253 p->protocol);
1254 } else {
1255 spin_lock_bh(&inetsw_lock);
1256 list_del_rcu(&p->list);
1257 spin_unlock_bh(&inetsw_lock);
1258
1259 synchronize_net();
1260 }
1261 }
1262 EXPORT_SYMBOL(inet_unregister_protosw);
1263
inet_sk_reselect_saddr(struct sock * sk)1264 static int inet_sk_reselect_saddr(struct sock *sk)
1265 {
1266 struct inet_sock *inet = inet_sk(sk);
1267 __be32 old_saddr = inet->inet_saddr;
1268 __be32 daddr = inet->inet_daddr;
1269 struct flowi4 *fl4;
1270 struct rtable *rt;
1271 __be32 new_saddr;
1272 struct ip_options_rcu *inet_opt;
1273 int err;
1274
1275 inet_opt = rcu_dereference_protected(inet->inet_opt,
1276 lockdep_sock_is_held(sk));
1277 if (inet_opt && inet_opt->opt.srr)
1278 daddr = inet_opt->opt.faddr;
1279
1280 /* Query new route. */
1281 fl4 = &inet->cork.fl.u.ip4;
1282 rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
1283 sk->sk_protocol, inet->inet_sport,
1284 inet->inet_dport, sk);
1285 if (IS_ERR(rt))
1286 return PTR_ERR(rt);
1287
1288 new_saddr = fl4->saddr;
1289
1290 if (new_saddr == old_saddr) {
1291 sk_setup_caps(sk, &rt->dst);
1292 return 0;
1293 }
1294
1295 err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
1296 if (err) {
1297 ip_rt_put(rt);
1298 return err;
1299 }
1300
1301 sk_setup_caps(sk, &rt->dst);
1302
1303 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1304 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1305 __func__, &old_saddr, &new_saddr);
1306 }
1307
1308 /*
1309 * XXX The only one ugly spot where we need to
1310 * XXX really change the sockets identity after
1311 * XXX it has entered the hashes. -DaveM
1312 *
1313 * Besides that, it does not check for connection
1314 * uniqueness. Wait for troubles.
1315 */
1316 return __sk_prot_rehash(sk);
1317 }
1318
inet_sk_rebuild_header(struct sock * sk)1319 int inet_sk_rebuild_header(struct sock *sk)
1320 {
1321 struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
1322 struct inet_sock *inet = inet_sk(sk);
1323 struct flowi4 *fl4;
1324 int err;
1325
1326 /* Route is OK, nothing to do. */
1327 if (rt)
1328 return 0;
1329
1330 /* Reroute. */
1331 fl4 = &inet->cork.fl.u.ip4;
1332 inet_sk_init_flowi4(inet, fl4);
1333 rt = ip_route_output_flow(sock_net(sk), fl4, sk);
1334 if (!IS_ERR(rt)) {
1335 err = 0;
1336 sk_setup_caps(sk, &rt->dst);
1337 } else {
1338 err = PTR_ERR(rt);
1339
1340 /* Routing failed... */
1341 sk->sk_route_caps = 0;
1342
1343 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1344 sk->sk_state != TCP_SYN_SENT ||
1345 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1346 (err = inet_sk_reselect_saddr(sk)) != 0)
1347 WRITE_ONCE(sk->sk_err_soft, -err);
1348 }
1349
1350 return err;
1351 }
1352 EXPORT_SYMBOL(inet_sk_rebuild_header);
1353
inet_sk_set_state(struct sock * sk,int state)1354 void inet_sk_set_state(struct sock *sk, int state)
1355 {
1356 trace_inet_sock_set_state(sk, sk->sk_state, state);
1357 sk->sk_state = state;
1358 }
1359 EXPORT_SYMBOL(inet_sk_set_state);
1360
inet_sk_state_store(struct sock * sk,int newstate)1361 void inet_sk_state_store(struct sock *sk, int newstate)
1362 {
1363 trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1364 smp_store_release(&sk->sk_state, newstate);
1365 }
1366
inet_gso_segment(struct sk_buff * skb,netdev_features_t features)1367 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1368 netdev_features_t features)
1369 {
1370 bool udpfrag = false, fixedid = false, gso_partial, encap;
1371 struct sk_buff *segs = ERR_PTR(-EINVAL);
1372 const struct net_offload *ops;
1373 unsigned int offset = 0;
1374 struct iphdr *iph;
1375 int proto, tot_len;
1376 int nhoff;
1377 int ihl;
1378 int id;
1379
1380 skb_reset_network_header(skb);
1381 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1382 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1383 goto out;
1384
1385 iph = ip_hdr(skb);
1386 ihl = iph->ihl * 4;
1387 if (ihl < sizeof(*iph))
1388 goto out;
1389
1390 id = ntohs(iph->id);
1391 proto = iph->protocol;
1392
1393 /* Warning: after this point, iph might be no longer valid */
1394 if (unlikely(!pskb_may_pull(skb, ihl)))
1395 goto out;
1396 __skb_pull(skb, ihl);
1397
1398 encap = SKB_GSO_CB(skb)->encap_level > 0;
1399 if (encap)
1400 features &= skb->dev->hw_enc_features;
1401 SKB_GSO_CB(skb)->encap_level += ihl;
1402
1403 skb_reset_transport_header(skb);
1404
1405 segs = ERR_PTR(-EPROTONOSUPPORT);
1406
1407 fixedid = !!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCP_FIXEDID << encap));
1408
1409 if (!skb->encapsulation || encap)
1410 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1411
1412 ops = rcu_dereference(inet_offloads[proto]);
1413 if (likely(ops && ops->callbacks.gso_segment)) {
1414 segs = ops->callbacks.gso_segment(skb, features);
1415 if (!segs)
1416 skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1417 }
1418
1419 if (IS_ERR_OR_NULL(segs))
1420 goto out;
1421
1422 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1423
1424 skb = segs;
1425 do {
1426 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1427 if (udpfrag) {
1428 iph->frag_off = htons(offset >> 3);
1429 if (skb->next)
1430 iph->frag_off |= htons(IP_MF);
1431 offset += skb->len - nhoff - ihl;
1432 tot_len = skb->len - nhoff;
1433 } else if (skb_is_gso(skb)) {
1434 if (!fixedid) {
1435 iph->id = htons(id);
1436 id += skb_shinfo(skb)->gso_segs;
1437 }
1438
1439 if (gso_partial)
1440 tot_len = skb_shinfo(skb)->gso_size +
1441 SKB_GSO_CB(skb)->data_offset +
1442 skb->head - (unsigned char *)iph;
1443 else
1444 tot_len = skb->len - nhoff;
1445 } else {
1446 if (!fixedid)
1447 iph->id = htons(id++);
1448 tot_len = skb->len - nhoff;
1449 }
1450 iph->tot_len = htons(tot_len);
1451 ip_send_check(iph);
1452 if (encap)
1453 skb_reset_inner_headers(skb);
1454 skb->network_header = (u8 *)iph - skb->head;
1455 skb_reset_mac_len(skb);
1456 } while ((skb = skb->next));
1457
1458 out:
1459 return segs;
1460 }
1461
ipip_gso_segment(struct sk_buff * skb,netdev_features_t features)1462 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1463 netdev_features_t features)
1464 {
1465 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1466 return ERR_PTR(-EINVAL);
1467
1468 return inet_gso_segment(skb, features);
1469 }
1470
inet_gro_receive(struct list_head * head,struct sk_buff * skb)1471 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1472 {
1473 const struct net_offload *ops;
1474 struct sk_buff *pp = NULL;
1475 const struct iphdr *iph;
1476 struct sk_buff *p;
1477 unsigned int hlen;
1478 unsigned int off;
1479 int flush = 1;
1480 int proto;
1481
1482 off = skb_gro_offset(skb);
1483 hlen = off + sizeof(*iph);
1484 iph = skb_gro_header(skb, hlen, off);
1485 if (unlikely(!iph))
1486 goto out;
1487
1488 proto = iph->protocol;
1489
1490 ops = rcu_dereference(inet_offloads[proto]);
1491 if (!ops || !ops->callbacks.gro_receive)
1492 goto out;
1493
1494 if (*(u8 *)iph != 0x45)
1495 goto out;
1496
1497 if (ip_is_fragment(iph))
1498 goto out;
1499
1500 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1501 goto out;
1502
1503 NAPI_GRO_CB(skb)->proto = proto;
1504 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF));
1505
1506 list_for_each_entry(p, head, list) {
1507 struct iphdr *iph2;
1508
1509 if (!NAPI_GRO_CB(p)->same_flow)
1510 continue;
1511
1512 iph2 = (struct iphdr *)(p->data + off);
1513 /* The above works because, with the exception of the top
1514 * (inner most) layer, we only aggregate pkts with the same
1515 * hdr length so all the hdrs we'll need to verify will start
1516 * at the same offset.
1517 */
1518 if ((iph->protocol ^ iph2->protocol) |
1519 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1520 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1521 NAPI_GRO_CB(p)->same_flow = 0;
1522 continue;
1523 }
1524 }
1525
1526 NAPI_GRO_CB(skb)->flush |= flush;
1527 NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
1528
1529 /* Note : No need to call skb_gro_postpull_rcsum() here,
1530 * as we already checked checksum over ipv4 header was 0
1531 */
1532 skb_gro_pull(skb, sizeof(*iph));
1533 skb_set_transport_header(skb, skb_gro_offset(skb));
1534
1535 pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1536 ops->callbacks.gro_receive, head, skb);
1537
1538 out:
1539 skb_gro_flush_final(skb, pp, flush);
1540
1541 return pp;
1542 }
1543
ipip_gro_receive(struct list_head * head,struct sk_buff * skb)1544 static struct sk_buff *ipip_gro_receive(struct list_head *head,
1545 struct sk_buff *skb)
1546 {
1547 if (NAPI_GRO_CB(skb)->encap_mark) {
1548 NAPI_GRO_CB(skb)->flush = 1;
1549 return NULL;
1550 }
1551
1552 NAPI_GRO_CB(skb)->encap_mark = 1;
1553
1554 return inet_gro_receive(head, skb);
1555 }
1556
1557 #define SECONDS_PER_DAY 86400
1558
1559 /* inet_current_timestamp - Return IP network timestamp
1560 *
1561 * Return milliseconds since midnight in network byte order.
1562 */
inet_current_timestamp(void)1563 __be32 inet_current_timestamp(void)
1564 {
1565 u32 secs;
1566 u32 msecs;
1567 struct timespec64 ts;
1568
1569 ktime_get_real_ts64(&ts);
1570
1571 /* Get secs since midnight. */
1572 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1573 /* Convert to msecs. */
1574 msecs = secs * MSEC_PER_SEC;
1575 /* Convert nsec to msec. */
1576 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1577
1578 /* Convert to network byte order. */
1579 return htonl(msecs);
1580 }
1581 EXPORT_SYMBOL(inet_current_timestamp);
1582
inet_recv_error(struct sock * sk,struct msghdr * msg,int len)1583 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1584 {
1585 unsigned int family = READ_ONCE(sk->sk_family);
1586
1587 if (family == AF_INET)
1588 return ip_recv_error(sk, msg, len);
1589 #if IS_ENABLED(CONFIG_IPV6)
1590 if (family == AF_INET6)
1591 return pingv6_ops.ipv6_recv_error(sk, msg, len);
1592 #endif
1593 return -EINVAL;
1594 }
1595 EXPORT_SYMBOL(inet_recv_error);
1596
inet_gro_complete(struct sk_buff * skb,int nhoff)1597 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1598 {
1599 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1600 const struct net_offload *ops;
1601 __be16 totlen = iph->tot_len;
1602 int proto = iph->protocol;
1603 int err = -ENOSYS;
1604
1605 if (skb->encapsulation) {
1606 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1607 skb_set_inner_network_header(skb, nhoff);
1608 }
1609
1610 iph_set_totlen(iph, skb->len - nhoff);
1611 csum_replace2(&iph->check, totlen, iph->tot_len);
1612
1613 ops = rcu_dereference(inet_offloads[proto]);
1614 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1615 goto out;
1616
1617 /* Only need to add sizeof(*iph) to get to the next hdr below
1618 * because any hdr with option will have been flushed in
1619 * inet_gro_receive().
1620 */
1621 err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1622 tcp4_gro_complete, udp4_gro_complete,
1623 skb, nhoff + sizeof(*iph));
1624
1625 out:
1626 return err;
1627 }
1628
ipip_gro_complete(struct sk_buff * skb,int nhoff)1629 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1630 {
1631 skb->encapsulation = 1;
1632 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1633 return inet_gro_complete(skb, nhoff);
1634 }
1635
inet_ctl_sock_create(struct sock ** sk,unsigned short family,unsigned short type,unsigned char protocol,struct net * net)1636 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1637 unsigned short type, unsigned char protocol,
1638 struct net *net)
1639 {
1640 struct socket *sock;
1641 int rc = sock_create_kern(net, family, type, protocol, &sock);
1642
1643 if (rc == 0) {
1644 *sk = sock->sk;
1645 (*sk)->sk_allocation = GFP_ATOMIC;
1646 (*sk)->sk_use_task_frag = false;
1647 /*
1648 * Unhash it so that IP input processing does not even see it,
1649 * we do not wish this socket to see incoming packets.
1650 */
1651 (*sk)->sk_prot->unhash(*sk);
1652 }
1653 return rc;
1654 }
1655 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1656
snmp_fold_field(void __percpu * mib,int offt)1657 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1658 {
1659 unsigned long res = 0;
1660 int i;
1661
1662 for_each_possible_cpu(i)
1663 res += snmp_get_cpu_field(mib, i, offt);
1664 return res;
1665 }
1666 EXPORT_SYMBOL_GPL(snmp_fold_field);
1667
1668 #if BITS_PER_LONG==32
1669
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offt,size_t syncp_offset)1670 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1671 size_t syncp_offset)
1672 {
1673 void *bhptr;
1674 struct u64_stats_sync *syncp;
1675 u64 v;
1676 unsigned int start;
1677
1678 bhptr = per_cpu_ptr(mib, cpu);
1679 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1680 do {
1681 start = u64_stats_fetch_begin(syncp);
1682 v = *(((u64 *)bhptr) + offt);
1683 } while (u64_stats_fetch_retry(syncp, start));
1684
1685 return v;
1686 }
1687 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1688
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_offset)1689 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1690 {
1691 u64 res = 0;
1692 int cpu;
1693
1694 for_each_possible_cpu(cpu) {
1695 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1696 }
1697 return res;
1698 }
1699 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1700 #endif
1701
1702 #ifdef CONFIG_IP_MULTICAST
1703 static const struct net_protocol igmp_protocol = {
1704 .handler = igmp_rcv,
1705 };
1706 #endif
1707
1708 static const struct net_protocol icmp_protocol = {
1709 .handler = icmp_rcv,
1710 .err_handler = icmp_err,
1711 .no_policy = 1,
1712 };
1713
ipv4_mib_init_net(struct net * net)1714 static __net_init int ipv4_mib_init_net(struct net *net)
1715 {
1716 int i;
1717
1718 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1719 if (!net->mib.tcp_statistics)
1720 goto err_tcp_mib;
1721 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1722 if (!net->mib.ip_statistics)
1723 goto err_ip_mib;
1724
1725 for_each_possible_cpu(i) {
1726 struct ipstats_mib *af_inet_stats;
1727 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1728 u64_stats_init(&af_inet_stats->syncp);
1729 }
1730
1731 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1732 if (!net->mib.net_statistics)
1733 goto err_net_mib;
1734 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1735 if (!net->mib.udp_statistics)
1736 goto err_udp_mib;
1737 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1738 if (!net->mib.icmp_statistics)
1739 goto err_icmp_mib;
1740 net->mib.icmpmsg_statistics = kzalloc_obj(struct icmpmsg_mib);
1741 if (!net->mib.icmpmsg_statistics)
1742 goto err_icmpmsg_mib;
1743
1744 tcp_mib_init(net);
1745 return 0;
1746
1747 err_icmpmsg_mib:
1748 free_percpu(net->mib.icmp_statistics);
1749 err_icmp_mib:
1750 free_percpu(net->mib.udp_statistics);
1751 err_udp_mib:
1752 free_percpu(net->mib.net_statistics);
1753 err_net_mib:
1754 free_percpu(net->mib.ip_statistics);
1755 err_ip_mib:
1756 free_percpu(net->mib.tcp_statistics);
1757 err_tcp_mib:
1758 return -ENOMEM;
1759 }
1760
ipv4_mib_exit_net(struct net * net)1761 static __net_exit void ipv4_mib_exit_net(struct net *net)
1762 {
1763 kfree(net->mib.icmpmsg_statistics);
1764 free_percpu(net->mib.icmp_statistics);
1765 free_percpu(net->mib.udp_statistics);
1766 free_percpu(net->mib.net_statistics);
1767 free_percpu(net->mib.ip_statistics);
1768 free_percpu(net->mib.tcp_statistics);
1769 #ifdef CONFIG_MPTCP
1770 /* allocated on demand, see mptcp_init_sock() */
1771 free_percpu(net->mib.mptcp_statistics);
1772 #endif
1773 }
1774
1775 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1776 .init = ipv4_mib_init_net,
1777 .exit = ipv4_mib_exit_net,
1778 };
1779
init_ipv4_mibs(void)1780 static int __init init_ipv4_mibs(void)
1781 {
1782 return register_pernet_subsys(&ipv4_mib_ops);
1783 }
1784
inet_init_net(struct net * net)1785 static __net_init int inet_init_net(struct net *net)
1786 {
1787 /*
1788 * Set defaults for local port range
1789 */
1790 net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u;
1791
1792 seqlock_init(&net->ipv4.ping_group_range.lock);
1793 /*
1794 * Sane defaults - nobody may create ping sockets.
1795 * Boot scripts should set this to distro-specific group.
1796 */
1797 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1798 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1799
1800 /* Default values for sysctl-controlled parameters.
1801 * We set them here, in case sysctl is not compiled.
1802 */
1803 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1804 net->ipv4.sysctl_ip_fwd_update_priority = 1;
1805 net->ipv4.sysctl_ip_dynaddr = 0;
1806 net->ipv4.sysctl_ip_early_demux = 1;
1807 net->ipv4.sysctl_udp_early_demux = 1;
1808 net->ipv4.sysctl_tcp_early_demux = 1;
1809 net->ipv4.sysctl_nexthop_compat_mode = 1;
1810 #ifdef CONFIG_SYSCTL
1811 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1812 #endif
1813
1814 /* Some igmp sysctl, whose values are always used */
1815 net->ipv4.sysctl_igmp_max_memberships = 20;
1816 net->ipv4.sysctl_igmp_max_msf = 10;
1817 /* IGMP reports for link-local multicast groups are enabled by default */
1818 net->ipv4.sysctl_igmp_llm_reports = 1;
1819 net->ipv4.sysctl_igmp_qrv = 2;
1820
1821 net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1822
1823 return 0;
1824 }
1825
1826 static __net_initdata struct pernet_operations af_inet_ops = {
1827 .init = inet_init_net,
1828 };
1829
init_inet_pernet_ops(void)1830 static int __init init_inet_pernet_ops(void)
1831 {
1832 return register_pernet_subsys(&af_inet_ops);
1833 }
1834
1835 static int ipv4_proc_init(void);
1836
1837 /*
1838 * IP protocol layer initialiser
1839 */
1840
1841
1842 static const struct net_offload ipip_offload = {
1843 .callbacks = {
1844 .gso_segment = ipip_gso_segment,
1845 .gro_receive = ipip_gro_receive,
1846 .gro_complete = ipip_gro_complete,
1847 },
1848 };
1849
ipip_offload_init(void)1850 static int __init ipip_offload_init(void)
1851 {
1852 return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1853 }
1854
ipv4_offload_init(void)1855 static int __init ipv4_offload_init(void)
1856 {
1857 /*
1858 * Add offloads
1859 */
1860 if (udpv4_offload_init() < 0)
1861 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1862 if (tcpv4_offload_init() < 0)
1863 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1864 if (ipip_offload_init() < 0)
1865 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1866
1867 net_hotdata.ip_packet_offload = (struct packet_offload) {
1868 .type = cpu_to_be16(ETH_P_IP),
1869 .callbacks = {
1870 .gso_segment = inet_gso_segment,
1871 .gro_receive = inet_gro_receive,
1872 .gro_complete = inet_gro_complete,
1873 },
1874 };
1875 dev_add_offload(&net_hotdata.ip_packet_offload);
1876 return 0;
1877 }
1878
1879 fs_initcall(ipv4_offload_init);
1880
1881 static struct packet_type ip_packet_type __read_mostly = {
1882 .type = cpu_to_be16(ETH_P_IP),
1883 .func = ip_rcv,
1884 .list_func = ip_list_rcv,
1885 };
1886
inet_init(void)1887 static int __init inet_init(void)
1888 {
1889 struct inet_protosw *q;
1890 struct list_head *r;
1891 int rc;
1892
1893 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1894
1895 raw_hashinfo_init(&raw_v4_hashinfo);
1896
1897 rc = proto_register(&tcp_prot, 1);
1898 if (rc)
1899 goto out;
1900
1901 rc = proto_register(&udp_prot, 1);
1902 if (rc)
1903 goto out_unregister_tcp_proto;
1904
1905 rc = proto_register(&raw_prot, 1);
1906 if (rc)
1907 goto out_unregister_udp_proto;
1908
1909 rc = proto_register(&ping_prot, 1);
1910 if (rc)
1911 goto out_unregister_raw_proto;
1912
1913 /*
1914 * Tell SOCKET that we are alive...
1915 */
1916
1917 (void)sock_register(&inet_family_ops);
1918
1919 #ifdef CONFIG_SYSCTL
1920 ip_static_sysctl_init();
1921 #endif
1922
1923 /*
1924 * Add all the base protocols.
1925 */
1926
1927 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1928 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1929
1930 net_hotdata.udp_protocol = (struct net_protocol) {
1931 .handler = udp_rcv,
1932 .err_handler = udp_err,
1933 .no_policy = 1,
1934 };
1935 if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
1936 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1937
1938 net_hotdata.tcp_protocol = (struct net_protocol) {
1939 .handler = tcp_v4_rcv,
1940 .err_handler = tcp_v4_err,
1941 .no_policy = 1,
1942 .icmp_strict_tag_validation = 1,
1943 };
1944 if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
1945 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1946 #ifdef CONFIG_IP_MULTICAST
1947 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1948 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1949 #endif
1950
1951 /* Register the socket-side information for inet_create. */
1952 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1953 INIT_LIST_HEAD(r);
1954
1955 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1956 inet_register_protosw(q);
1957
1958 /*
1959 * Set the ARP module up
1960 */
1961
1962 arp_init();
1963
1964 /*
1965 * Set the IP module up
1966 */
1967
1968 ip_init();
1969
1970 /* Initialise per-cpu ipv4 mibs */
1971 if (init_ipv4_mibs())
1972 panic("%s: Cannot init ipv4 mibs\n", __func__);
1973
1974 /* Setup TCP slab cache for open requests. */
1975 tcp_init();
1976
1977 /* Setup UDP memory threshold */
1978 udp_init();
1979
1980 raw_init();
1981
1982 ping_init();
1983
1984 /*
1985 * Set the ICMP layer up
1986 */
1987
1988 if (icmp_init() < 0)
1989 panic("Failed to create the ICMP control socket.\n");
1990
1991 /*
1992 * Initialise the multicast router
1993 */
1994 #if defined(CONFIG_IP_MROUTE)
1995 if (ip_mr_init())
1996 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1997 #endif
1998
1999 if (init_inet_pernet_ops())
2000 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2001
2002 ipv4_proc_init();
2003
2004 ipfrag_init();
2005
2006 dev_add_pack(&ip_packet_type);
2007
2008 ip_tunnel_core_init();
2009
2010 rc = 0;
2011 out:
2012 return rc;
2013 out_unregister_raw_proto:
2014 proto_unregister(&raw_prot);
2015 out_unregister_udp_proto:
2016 proto_unregister(&udp_prot);
2017 out_unregister_tcp_proto:
2018 proto_unregister(&tcp_prot);
2019 goto out;
2020 }
2021
2022 fs_initcall(inet_init);
2023
2024 /* ------------------------------------------------------------------------ */
2025
2026 #ifdef CONFIG_PROC_FS
ipv4_proc_init(void)2027 static int __init ipv4_proc_init(void)
2028 {
2029 int rc = 0;
2030
2031 if (raw_proc_init())
2032 goto out_raw;
2033 if (tcp4_proc_init())
2034 goto out_tcp;
2035 if (udp4_proc_init())
2036 goto out_udp;
2037 if (ping_proc_init())
2038 goto out_ping;
2039 if (ip_misc_proc_init())
2040 goto out_misc;
2041 out:
2042 return rc;
2043 out_misc:
2044 ping_proc_exit();
2045 out_ping:
2046 udp4_proc_exit();
2047 out_udp:
2048 tcp4_proc_exit();
2049 out_tcp:
2050 raw_proc_exit();
2051 out_raw:
2052 rc = -ENOMEM;
2053 goto out;
2054 }
2055
2056 #else /* CONFIG_PROC_FS */
ipv4_proc_init(void)2057 static int __init ipv4_proc_init(void)
2058 {
2059 return 0;
2060 }
2061 #endif /* CONFIG_PROC_FS */
2062