2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PF_INET protocol family socket handler.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Florian La Roche, <flla@stud.uni-sb.de>
11 * Alan Cox, <A.Cox@swansea.ac.uk>
13 * Changes (see also sock.c)
16 * Karl Knutson : Socket protocol table
17 * A.N.Kuznetsov : Socket death error in accept().
18 * John Richardson : Fix non blocking error in connect()
19 * so sockets that fail to connect
20 * don't return -EINPROGRESS.
21 * Alan Cox : Asynchronous I/O support
22 * Alan Cox : Keep correct socket pointer on sock
25 * Alan Cox : Semantics of SO_LINGER aren't state
26 * moved to close when you look carefully.
27 * With this fixed and the accept bug fixed
28 * some RPC stuff seems happier.
29 * Niibe Yutaka : 4.4BSD style write async I/O
31 * Tony Gale : Fixed reuse semantics.
32 * Alan Cox : bind() shouldn't abort existing but dead
33 * sockets. Stops FTP netin:.. I hope.
34 * Alan Cox : bind() works correctly for RAW sockets.
35 * Note that FreeBSD at least was broken
36 * in this respect so be careful with
37 * compatibility tests...
38 * Alan Cox : routing cache support
39 * Alan Cox : memzero the socket structure for
41 * Matt Day : nonblock connect error handler
42 * Alan Cox : Allow large numbers of pending sockets
43 * (eg for big web sites), but only if
44 * specifically application requested.
45 * Alan Cox : New buffering throughout IP. Used
47 * Alan Cox : New buffering now used smartly.
48 * Alan Cox : BSD rather than common sense
49 * interpretation of listen.
50 * Germano Caronni : Assorted small races.
51 * Alan Cox : sendmsg/recvmsg basic support.
52 * Alan Cox : Only sendmsg/recvmsg now supported.
53 * Alan Cox : Locked down bind (see security list).
54 * Alan Cox : Loosened bind a little.
55 * Mike McLagan : ADD/DEL DLCI Ioctls
56 * Willy Konynenberg : Transparent proxying support.
57 * David S. Miller : New socket lookup architecture.
58 * Some other random speedups.
59 * Cyrus Durgin : Cleaned up file for kmod hacks.
60 * Andi Kleen : Fix inet_stream_connect TCP race.
62 * This program is free software; you can redistribute it and/or
63 * modify it under the terms of the GNU General Public License
64 * as published by the Free Software Foundation; either version
65 * 2 of the License, or (at your option) any later version.
68 #define pr_fmt(fmt) "IPv4: " fmt
70 #include <linux/err.h>
71 #include <linux/errno.h>
72 #include <linux/types.h>
73 #include <linux/socket.h>
75 #include <linux/kernel.h>
76 #include <linux/kmod.h>
77 #include <linux/sched.h>
78 #include <linux/timer.h>
79 #include <linux/string.h>
80 #include <linux/sockios.h>
81 #include <linux/net.h>
82 #include <linux/capability.h>
83 #include <linux/fcntl.h>
85 #include <linux/interrupt.h>
86 #include <linux/stat.h>
87 #include <linux/init.h>
88 #include <linux/poll.h>
89 #include <linux/netfilter_ipv4.h>
90 #include <linux/random.h>
91 #include <linux/slab.h>
93 #include <linux/uaccess.h>
95 #include <linux/inet.h>
96 #include <linux/igmp.h>
97 #include <linux/inetdevice.h>
98 #include <linux/netdevice.h>
99 #include <net/checksum.h>
101 #include <net/protocol.h>
103 #include <net/route.h>
104 #include <net/ip_fib.h>
105 #include <net/inet_connection_sock.h>
108 #include <net/udplite.h>
109 #include <net/ping.h>
110 #include <linux/skbuff.h>
111 #include <net/sock.h>
113 #include <net/icmp.h>
114 #include <net/inet_common.h>
115 #include <net/ip_tunnels.h>
116 #include <net/xfrm.h>
117 #include <net/net_namespace.h>
118 #include <net/secure_seq.h>
119 #ifdef CONFIG_IP_MROUTE
120 #include <linux/mroute.h>
122 #include <net/l3mdev.h>
125 /* The inetsw table contains everything that inet_create needs to
126 * build a new socket.
128 static struct list_head inetsw[SOCK_MAX];
129 static DEFINE_SPINLOCK(inetsw_lock);
131 /* New destruction routine */
133 void inet_sock_destruct(struct sock *sk)
135 struct inet_sock *inet = inet_sk(sk);
137 __skb_queue_purge(&sk->sk_receive_queue);
138 __skb_queue_purge(&sk->sk_error_queue);
142 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
143 pr_err("Attempt to release TCP socket in state %d %p\n",
147 if (!sock_flag(sk, SOCK_DEAD)) {
148 pr_err("Attempt to release alive inet socket %p\n", sk);
152 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
153 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
154 WARN_ON(sk->sk_wmem_queued);
155 WARN_ON(sk->sk_forward_alloc);
157 kfree(rcu_dereference_protected(inet->inet_opt, 1));
158 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
159 dst_release(sk->sk_rx_dst);
160 sk_refcnt_debug_dec(sk);
162 EXPORT_SYMBOL(inet_sock_destruct);
165 * The routines beyond this point handle the behaviour of an AF_INET
166 * socket object. Mostly it punts to the subprotocols of IP to do
171 * Automatically bind an unbound socket.
174 static int inet_autobind(struct sock *sk)
176 struct inet_sock *inet;
177 /* We may need to bind the socket. */
180 if (!inet->inet_num) {
181 if (sk->sk_prot->get_port(sk, 0)) {
185 inet->inet_sport = htons(inet->inet_num);
192 * Move a socket into listening state.
194 int inet_listen(struct socket *sock, int backlog)
196 struct sock *sk = sock->sk;
197 unsigned char old_state;
198 int err, tcp_fastopen;
203 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
206 old_state = sk->sk_state;
207 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
210 /* Really, if the socket is already in listen state
211 * we can only allow the backlog to be adjusted.
213 if (old_state != TCP_LISTEN) {
214 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
215 * Note that only TCP sockets (SOCK_STREAM) will reach here.
216 * Also fastopen backlog may already been set via the option
217 * because the socket was in TCP_LISTEN state previously but
218 * was shutdown() rather than close().
220 tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
221 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
222 (tcp_fastopen & TFO_SERVER_ENABLE) &&
223 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
224 fastopen_queue_tune(sk, backlog);
225 tcp_fastopen_init_key_once(sock_net(sk));
228 err = inet_csk_listen_start(sk, backlog);
232 sk->sk_max_ack_backlog = backlog;
239 EXPORT_SYMBOL(inet_listen);
242 * Create an inet socket.
245 static int inet_create(struct net *net, struct socket *sock, int protocol,
249 struct inet_protosw *answer;
250 struct inet_sock *inet;
251 struct proto *answer_prot;
252 unsigned char answer_flags;
253 int try_loading_module = 0;
256 if (protocol < 0 || protocol >= IPPROTO_MAX)
259 sock->state = SS_UNCONNECTED;
261 /* Look for the requested type/protocol pair. */
263 err = -ESOCKTNOSUPPORT;
265 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
268 /* Check the non-wild match. */
269 if (protocol == answer->protocol) {
270 if (protocol != IPPROTO_IP)
273 /* Check for the two wild cases. */
274 if (IPPROTO_IP == protocol) {
275 protocol = answer->protocol;
278 if (IPPROTO_IP == answer->protocol)
281 err = -EPROTONOSUPPORT;
285 if (try_loading_module < 2) {
288 * Be more specific, e.g. net-pf-2-proto-132-type-1
289 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
291 if (++try_loading_module == 1)
292 request_module("net-pf-%d-proto-%d-type-%d",
293 PF_INET, protocol, sock->type);
295 * Fall back to generic, e.g. net-pf-2-proto-132
296 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
299 request_module("net-pf-%d-proto-%d",
301 goto lookup_protocol;
307 if (sock->type == SOCK_RAW && !kern &&
308 !ns_capable(net->user_ns, CAP_NET_RAW))
311 sock->ops = answer->ops;
312 answer_prot = answer->prot;
313 answer_flags = answer->flags;
316 WARN_ON(!answer_prot->slab);
319 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
324 if (INET_PROTOSW_REUSE & answer_flags)
325 sk->sk_reuse = SK_CAN_REUSE;
328 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
332 if (SOCK_RAW == sock->type) {
333 inet->inet_num = protocol;
334 if (IPPROTO_RAW == protocol)
338 if (net->ipv4.sysctl_ip_no_pmtu_disc)
339 inet->pmtudisc = IP_PMTUDISC_DONT;
341 inet->pmtudisc = IP_PMTUDISC_WANT;
345 sock_init_data(sock, sk);
347 sk->sk_destruct = inet_sock_destruct;
348 sk->sk_protocol = protocol;
349 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
356 inet->mc_list = NULL;
359 sk_refcnt_debug_inc(sk);
361 if (inet->inet_num) {
362 /* It assumes that any protocol which allows
363 * the user to assign a number at socket
364 * creation time automatically
367 inet->inet_sport = htons(inet->inet_num);
368 /* Add to protocol hash chains. */
369 err = sk->sk_prot->hash(sk);
371 sk_common_release(sk);
376 if (sk->sk_prot->init) {
377 err = sk->sk_prot->init(sk);
379 sk_common_release(sk);
385 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
387 sk_common_release(sk);
400 * The peer socket should always be NULL (or else). When we call this
401 * function we are destroying the object and from then on nobody
402 * should refer to it.
404 int inet_release(struct socket *sock)
406 struct sock *sk = sock->sk;
411 /* Applications forget to leave groups before exiting */
412 ip_mc_drop_socket(sk);
414 /* If linger is set, we don't return until the close
415 * is complete. Otherwise we return immediately. The
416 * actually closing is done the same either way.
418 * If the close is due to the process exiting, we never
422 if (sock_flag(sk, SOCK_LINGER) &&
423 !(current->flags & PF_EXITING))
424 timeout = sk->sk_lingertime;
426 sk->sk_prot->close(sk, timeout);
430 EXPORT_SYMBOL(inet_release);
432 int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
434 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
435 struct sock *sk = sock->sk;
436 struct inet_sock *inet = inet_sk(sk);
437 struct net *net = sock_net(sk);
440 u32 tb_id = RT_TABLE_LOCAL;
443 /* If the socket has its own bind function then use it. (RAW) */
444 if (sk->sk_prot->bind) {
445 err = sk->sk_prot->bind(sk, uaddr, addr_len);
449 if (addr_len < sizeof(struct sockaddr_in))
452 if (addr->sin_family != AF_INET) {
453 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
454 * only if s_addr is INADDR_ANY.
457 if (addr->sin_family != AF_UNSPEC ||
458 addr->sin_addr.s_addr != htonl(INADDR_ANY))
462 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
463 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
465 /* Not specified by any standard per-se, however it breaks too
466 * many applications when removed. It is unfortunate since
467 * allowing applications to make a non-local bind solves
468 * several problems with systems using dynamic addressing.
469 * (ie. your servers still start up even if your ISDN link
470 * is temporarily down)
472 err = -EADDRNOTAVAIL;
473 if (!net->ipv4.sysctl_ip_nonlocal_bind &&
474 !(inet->freebind || inet->transparent) &&
475 addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
476 chk_addr_ret != RTN_LOCAL &&
477 chk_addr_ret != RTN_MULTICAST &&
478 chk_addr_ret != RTN_BROADCAST)
481 snum = ntohs(addr->sin_port);
483 if (snum && snum < inet_prot_sock(net) &&
484 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
487 /* We keep a pair of addresses. rcv_saddr is the one
488 * used by hash lookups, and saddr is used for transmit.
490 * In the BSD API these are the same except where it
491 * would be illegal to use them (multicast/broadcast) in
492 * which case the sending device address is used.
496 /* Check these errors (active socket, double bind). */
498 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
499 goto out_release_sock;
501 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
502 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
503 inet->inet_saddr = 0; /* Use device */
505 /* Make sure we are allowed to bind here. */
506 if ((snum || !inet->bind_address_no_port) &&
507 sk->sk_prot->get_port(sk, snum)) {
508 inet->inet_saddr = inet->inet_rcv_saddr = 0;
510 goto out_release_sock;
513 if (inet->inet_rcv_saddr)
514 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
516 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
517 inet->inet_sport = htons(inet->inet_num);
518 inet->inet_daddr = 0;
519 inet->inet_dport = 0;
527 EXPORT_SYMBOL(inet_bind);
529 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
530 int addr_len, int flags)
532 struct sock *sk = sock->sk;
534 if (addr_len < sizeof(uaddr->sa_family))
536 if (uaddr->sa_family == AF_UNSPEC)
537 return sk->sk_prot->disconnect(sk, flags);
539 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
541 return sk->sk_prot->connect(sk, uaddr, addr_len);
543 EXPORT_SYMBOL(inet_dgram_connect);
545 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
547 DEFINE_WAIT_FUNC(wait, woken_wake_function);
549 add_wait_queue(sk_sleep(sk), &wait);
550 sk->sk_write_pending += writebias;
552 /* Basic assumption: if someone sets sk->sk_err, he _must_
553 * change state of the socket from TCP_SYN_*.
554 * Connect() does not allow to get error notifications
555 * without closing the socket.
557 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
559 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
561 if (signal_pending(current) || !timeo)
564 remove_wait_queue(sk_sleep(sk), &wait);
565 sk->sk_write_pending -= writebias;
570 * Connect to a remote host. There is regrettably still a little
571 * TCP 'magic' in here.
573 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
574 int addr_len, int flags, int is_sendmsg)
576 struct sock *sk = sock->sk;
581 * uaddr can be NULL and addr_len can be 0 if:
582 * sk is a TCP fastopen active socket and
583 * TCP_FASTOPEN_CONNECT sockopt is set and
584 * we already have a valid cookie for this socket.
585 * In this case, user can call write() after connect().
586 * write() will invoke tcp_sendmsg_fastopen() which calls
587 * __inet_stream_connect().
590 if (addr_len < sizeof(uaddr->sa_family))
593 if (uaddr->sa_family == AF_UNSPEC) {
594 err = sk->sk_prot->disconnect(sk, flags);
595 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
600 switch (sock->state) {
608 if (inet_sk(sk)->defer_connect)
609 err = is_sendmsg ? -EINPROGRESS : -EISCONN;
612 /* Fall out of switch with err, set for this state */
616 if (sk->sk_state != TCP_CLOSE)
619 err = sk->sk_prot->connect(sk, uaddr, addr_len);
623 sock->state = SS_CONNECTING;
625 if (!err && inet_sk(sk)->defer_connect)
628 /* Just entered SS_CONNECTING state; the only
629 * difference is that return value in non-blocking
630 * case is EINPROGRESS, rather than EALREADY.
636 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
638 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
639 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
640 tcp_sk(sk)->fastopen_req &&
641 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
643 /* Error code is set above */
644 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
647 err = sock_intr_errno(timeo);
648 if (signal_pending(current))
652 /* Connection was closed by RST, timeout, ICMP error
653 * or another process disconnected us.
655 if (sk->sk_state == TCP_CLOSE)
658 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
659 * and error was received after socket entered established state.
660 * Hence, it is handled normally after connect() return successfully.
663 sock->state = SS_CONNECTED;
669 err = sock_error(sk) ? : -ECONNABORTED;
670 sock->state = SS_UNCONNECTED;
671 if (sk->sk_prot->disconnect(sk, flags))
672 sock->state = SS_DISCONNECTING;
675 EXPORT_SYMBOL(__inet_stream_connect);
677 int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
678 int addr_len, int flags)
683 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
684 release_sock(sock->sk);
687 EXPORT_SYMBOL(inet_stream_connect);
690 * Accept a pending connection. The TCP layer now gives BSD semantics.
693 int inet_accept(struct socket *sock, struct socket *newsock, int flags,
696 struct sock *sk1 = sock->sk;
698 struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
705 sock_rps_record_flow(sk2);
706 WARN_ON(!((1 << sk2->sk_state) &
707 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
708 TCPF_CLOSE_WAIT | TCPF_CLOSE)));
710 sock_graft(sk2, newsock);
712 newsock->state = SS_CONNECTED;
718 EXPORT_SYMBOL(inet_accept);
722 * This does both peername and sockname.
724 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
725 int *uaddr_len, int peer)
727 struct sock *sk = sock->sk;
728 struct inet_sock *inet = inet_sk(sk);
729 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
731 sin->sin_family = AF_INET;
733 if (!inet->inet_dport ||
734 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
737 sin->sin_port = inet->inet_dport;
738 sin->sin_addr.s_addr = inet->inet_daddr;
740 __be32 addr = inet->inet_rcv_saddr;
742 addr = inet->inet_saddr;
743 sin->sin_port = inet->inet_sport;
744 sin->sin_addr.s_addr = addr;
746 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
747 *uaddr_len = sizeof(*sin);
750 EXPORT_SYMBOL(inet_getname);
752 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
754 struct sock *sk = sock->sk;
756 sock_rps_record_flow(sk);
758 /* We may need to bind the socket. */
759 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
763 return sk->sk_prot->sendmsg(sk, msg, size);
765 EXPORT_SYMBOL(inet_sendmsg);
767 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
768 size_t size, int flags)
770 struct sock *sk = sock->sk;
772 sock_rps_record_flow(sk);
774 /* We may need to bind the socket. */
775 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
779 if (sk->sk_prot->sendpage)
780 return sk->sk_prot->sendpage(sk, page, offset, size, flags);
781 return sock_no_sendpage(sock, page, offset, size, flags);
783 EXPORT_SYMBOL(inet_sendpage);
785 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
788 struct sock *sk = sock->sk;
792 sock_rps_record_flow(sk);
794 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
795 flags & ~MSG_DONTWAIT, &addr_len);
797 msg->msg_namelen = addr_len;
800 EXPORT_SYMBOL(inet_recvmsg);
802 int inet_shutdown(struct socket *sock, int how)
804 struct sock *sk = sock->sk;
807 /* This should really check to make sure
808 * the socket is a TCP socket. (WHY AC...)
810 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
813 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
817 if (sock->state == SS_CONNECTING) {
818 if ((1 << sk->sk_state) &
819 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
820 sock->state = SS_DISCONNECTING;
822 sock->state = SS_CONNECTED;
825 switch (sk->sk_state) {
828 /* Hack to wake up other listeners, who can poll for
829 POLLHUP, even on eg. unconnected UDP sockets -- RR */
832 sk->sk_shutdown |= how;
833 if (sk->sk_prot->shutdown)
834 sk->sk_prot->shutdown(sk, how);
837 /* Remaining two branches are temporary solution for missing
838 * close() in multithreaded environment. It is _not_ a good idea,
839 * but we have no choice until close() is repaired at VFS level.
842 if (!(how & RCV_SHUTDOWN))
846 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
847 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
851 /* Wake up anyone sleeping in poll. */
852 sk->sk_state_change(sk);
856 EXPORT_SYMBOL(inet_shutdown);
859 * ioctl() calls you can issue on an INET socket. Most of these are
860 * device configuration and stuff and very rarely used. Some ioctls
861 * pass on to the socket itself.
863 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
864 * loads the devconfigure module does its configuring and unloads it.
865 * There's a good 20K of config code hanging around the kernel.
868 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
870 struct sock *sk = sock->sk;
872 struct net *net = sock_net(sk);
876 err = sock_get_timestamp(sk, (struct timeval __user *)arg);
879 err = sock_get_timestampns(sk, (struct timespec __user *)arg);
884 err = ip_rt_ioctl(net, cmd, (void __user *)arg);
889 err = arp_ioctl(net, cmd, (void __user *)arg);
902 err = devinet_ioctl(net, cmd, (void __user *)arg);
905 if (sk->sk_prot->ioctl)
906 err = sk->sk_prot->ioctl(sk, cmd, arg);
913 EXPORT_SYMBOL(inet_ioctl);
916 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
918 struct sock *sk = sock->sk;
919 int err = -ENOIOCTLCMD;
921 if (sk->sk_prot->compat_ioctl)
922 err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
928 const struct proto_ops inet_stream_ops = {
930 .owner = THIS_MODULE,
931 .release = inet_release,
933 .connect = inet_stream_connect,
934 .socketpair = sock_no_socketpair,
935 .accept = inet_accept,
936 .getname = inet_getname,
939 .listen = inet_listen,
940 .shutdown = inet_shutdown,
941 .setsockopt = sock_common_setsockopt,
942 .getsockopt = sock_common_getsockopt,
943 .sendmsg = inet_sendmsg,
944 .recvmsg = inet_recvmsg,
945 .mmap = sock_no_mmap,
946 .sendpage = inet_sendpage,
947 .splice_read = tcp_splice_read,
948 .read_sock = tcp_read_sock,
949 .sendmsg_locked = tcp_sendmsg_locked,
950 .sendpage_locked = tcp_sendpage_locked,
951 .peek_len = tcp_peek_len,
953 .compat_setsockopt = compat_sock_common_setsockopt,
954 .compat_getsockopt = compat_sock_common_getsockopt,
955 .compat_ioctl = inet_compat_ioctl,
958 EXPORT_SYMBOL(inet_stream_ops);
960 const struct proto_ops inet_dgram_ops = {
962 .owner = THIS_MODULE,
963 .release = inet_release,
965 .connect = inet_dgram_connect,
966 .socketpair = sock_no_socketpair,
967 .accept = sock_no_accept,
968 .getname = inet_getname,
971 .listen = sock_no_listen,
972 .shutdown = inet_shutdown,
973 .setsockopt = sock_common_setsockopt,
974 .getsockopt = sock_common_getsockopt,
975 .sendmsg = inet_sendmsg,
976 .recvmsg = inet_recvmsg,
977 .mmap = sock_no_mmap,
978 .sendpage = inet_sendpage,
979 .set_peek_off = sk_set_peek_off,
981 .compat_setsockopt = compat_sock_common_setsockopt,
982 .compat_getsockopt = compat_sock_common_getsockopt,
983 .compat_ioctl = inet_compat_ioctl,
986 EXPORT_SYMBOL(inet_dgram_ops);
989 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
992 static const struct proto_ops inet_sockraw_ops = {
994 .owner = THIS_MODULE,
995 .release = inet_release,
997 .connect = inet_dgram_connect,
998 .socketpair = sock_no_socketpair,
999 .accept = sock_no_accept,
1000 .getname = inet_getname,
1001 .poll = datagram_poll,
1002 .ioctl = inet_ioctl,
1003 .listen = sock_no_listen,
1004 .shutdown = inet_shutdown,
1005 .setsockopt = sock_common_setsockopt,
1006 .getsockopt = sock_common_getsockopt,
1007 .sendmsg = inet_sendmsg,
1008 .recvmsg = inet_recvmsg,
1009 .mmap = sock_no_mmap,
1010 .sendpage = inet_sendpage,
1011 #ifdef CONFIG_COMPAT
1012 .compat_setsockopt = compat_sock_common_setsockopt,
1013 .compat_getsockopt = compat_sock_common_getsockopt,
1014 .compat_ioctl = inet_compat_ioctl,
1018 static const struct net_proto_family inet_family_ops = {
1020 .create = inet_create,
1021 .owner = THIS_MODULE,
1024 /* Upon startup we insert all the elements in inetsw_array[] into
1025 * the linked list inetsw.
1027 static struct inet_protosw inetsw_array[] =
1030 .type = SOCK_STREAM,
1031 .protocol = IPPROTO_TCP,
1033 .ops = &inet_stream_ops,
1034 .flags = INET_PROTOSW_PERMANENT |
1040 .protocol = IPPROTO_UDP,
1042 .ops = &inet_dgram_ops,
1043 .flags = INET_PROTOSW_PERMANENT,
1048 .protocol = IPPROTO_ICMP,
1050 .ops = &inet_sockraw_ops,
1051 .flags = INET_PROTOSW_REUSE,
1056 .protocol = IPPROTO_IP, /* wild card */
1058 .ops = &inet_sockraw_ops,
1059 .flags = INET_PROTOSW_REUSE,
1063 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1065 void inet_register_protosw(struct inet_protosw *p)
1067 struct list_head *lh;
1068 struct inet_protosw *answer;
1069 int protocol = p->protocol;
1070 struct list_head *last_perm;
1072 spin_lock_bh(&inetsw_lock);
1074 if (p->type >= SOCK_MAX)
1077 /* If we are trying to override a permanent protocol, bail. */
1078 last_perm = &inetsw[p->type];
1079 list_for_each(lh, &inetsw[p->type]) {
1080 answer = list_entry(lh, struct inet_protosw, list);
1081 /* Check only the non-wild match. */
1082 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1084 if (protocol == answer->protocol)
1089 /* Add the new entry after the last permanent entry if any, so that
1090 * the new entry does not override a permanent entry when matched with
1091 * a wild-card protocol. But it is allowed to override any existing
1092 * non-permanent entry. This means that when we remove this entry, the
1093 * system automatically returns to the old behavior.
1095 list_add_rcu(&p->list, last_perm);
1097 spin_unlock_bh(&inetsw_lock);
1102 pr_err("Attempt to override permanent protocol %d\n", protocol);
1106 pr_err("Ignoring attempt to register invalid socket type %d\n",
1110 EXPORT_SYMBOL(inet_register_protosw);
1112 void inet_unregister_protosw(struct inet_protosw *p)
1114 if (INET_PROTOSW_PERMANENT & p->flags) {
1115 pr_err("Attempt to unregister permanent protocol %d\n",
1118 spin_lock_bh(&inetsw_lock);
1119 list_del_rcu(&p->list);
1120 spin_unlock_bh(&inetsw_lock);
1125 EXPORT_SYMBOL(inet_unregister_protosw);
1127 static int inet_sk_reselect_saddr(struct sock *sk)
1129 struct inet_sock *inet = inet_sk(sk);
1130 __be32 old_saddr = inet->inet_saddr;
1131 __be32 daddr = inet->inet_daddr;
1135 struct ip_options_rcu *inet_opt;
1137 inet_opt = rcu_dereference_protected(inet->inet_opt,
1138 lockdep_sock_is_held(sk));
1139 if (inet_opt && inet_opt->opt.srr)
1140 daddr = inet_opt->opt.faddr;
1142 /* Query new route. */
1143 fl4 = &inet->cork.fl.u.ip4;
1144 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1145 sk->sk_bound_dev_if, sk->sk_protocol,
1146 inet->inet_sport, inet->inet_dport, sk);
1150 sk_setup_caps(sk, &rt->dst);
1152 new_saddr = fl4->saddr;
1154 if (new_saddr == old_saddr)
1157 if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) {
1158 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1159 __func__, &old_saddr, &new_saddr);
1162 inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
1165 * XXX The only one ugly spot where we need to
1166 * XXX really change the sockets identity after
1167 * XXX it has entered the hashes. -DaveM
1169 * Besides that, it does not check for connection
1170 * uniqueness. Wait for troubles.
1172 return __sk_prot_rehash(sk);
1175 int inet_sk_rebuild_header(struct sock *sk)
1177 struct inet_sock *inet = inet_sk(sk);
1178 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1180 struct ip_options_rcu *inet_opt;
1184 /* Route is OK, nothing to do. */
1190 inet_opt = rcu_dereference(inet->inet_opt);
1191 daddr = inet->inet_daddr;
1192 if (inet_opt && inet_opt->opt.srr)
1193 daddr = inet_opt->opt.faddr;
1195 fl4 = &inet->cork.fl.u.ip4;
1196 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1197 inet->inet_dport, inet->inet_sport,
1198 sk->sk_protocol, RT_CONN_FLAGS(sk),
1199 sk->sk_bound_dev_if);
1202 sk_setup_caps(sk, &rt->dst);
1206 /* Routing failed... */
1207 sk->sk_route_caps = 0;
1209 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1210 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1212 if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr ||
1213 sk->sk_state != TCP_SYN_SENT ||
1214 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1215 (err = inet_sk_reselect_saddr(sk)) != 0)
1216 sk->sk_err_soft = -err;
1221 EXPORT_SYMBOL(inet_sk_rebuild_header);
1223 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1224 netdev_features_t features)
1226 bool fixedid = false, gso_partial, encap;
1227 struct sk_buff *segs = ERR_PTR(-EINVAL);
1228 const struct net_offload *ops;
1235 skb_reset_network_header(skb);
1236 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1237 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1242 if (ihl < sizeof(*iph))
1245 id = ntohs(iph->id);
1246 proto = iph->protocol;
1248 /* Warning: after this point, iph might be no longer valid */
1249 if (unlikely(!pskb_may_pull(skb, ihl)))
1251 __skb_pull(skb, ihl);
1253 encap = SKB_GSO_CB(skb)->encap_level > 0;
1255 features &= skb->dev->hw_enc_features;
1256 SKB_GSO_CB(skb)->encap_level += ihl;
1258 skb_reset_transport_header(skb);
1260 segs = ERR_PTR(-EPROTONOSUPPORT);
1262 if (!skb->encapsulation || encap) {
1263 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1265 /* fixed ID is invalid if DF bit is not set */
1266 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1270 ops = rcu_dereference(inet_offloads[proto]);
1271 if (likely(ops && ops->callbacks.gso_segment))
1272 segs = ops->callbacks.gso_segment(skb, features);
1274 if (IS_ERR_OR_NULL(segs))
1277 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1281 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1282 if (skb_is_gso(skb)) {
1284 iph->id = htons(id);
1285 id += skb_shinfo(skb)->gso_segs;
1289 tot_len = skb_shinfo(skb)->gso_size +
1290 SKB_GSO_CB(skb)->data_offset +
1291 skb->head - (unsigned char *)iph;
1293 tot_len = skb->len - nhoff;
1296 iph->id = htons(id++);
1297 tot_len = skb->len - nhoff;
1299 iph->tot_len = htons(tot_len);
1302 skb_reset_inner_headers(skb);
1303 skb->network_header = (u8 *)iph - skb->head;
1304 } while ((skb = skb->next));
1309 EXPORT_SYMBOL(inet_gso_segment);
1311 struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1313 const struct net_offload *ops;
1314 struct sk_buff **pp = NULL;
1316 const struct iphdr *iph;
1323 off = skb_gro_offset(skb);
1324 hlen = off + sizeof(*iph);
1325 iph = skb_gro_header_fast(skb, off);
1326 if (skb_gro_header_hard(skb, hlen)) {
1327 iph = skb_gro_header_slow(skb, hlen, off);
1332 proto = iph->protocol;
1335 ops = rcu_dereference(inet_offloads[proto]);
1336 if (!ops || !ops->callbacks.gro_receive)
1339 if (*(u8 *)iph != 0x45)
1342 if (ip_is_fragment(iph))
1345 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1348 id = ntohl(*(__be32 *)&iph->id);
1349 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1352 for (p = *head; p; p = p->next) {
1356 if (!NAPI_GRO_CB(p)->same_flow)
1359 iph2 = (struct iphdr *)(p->data + off);
1360 /* The above works because, with the exception of the top
1361 * (inner most) layer, we only aggregate pkts with the same
1362 * hdr length so all the hdrs we'll need to verify will start
1363 * at the same offset.
1365 if ((iph->protocol ^ iph2->protocol) |
1366 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1367 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1368 NAPI_GRO_CB(p)->same_flow = 0;
1372 /* All fields must match except length and checksum. */
1373 NAPI_GRO_CB(p)->flush |=
1374 (iph->ttl ^ iph2->ttl) |
1375 (iph->tos ^ iph2->tos) |
1376 ((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1378 NAPI_GRO_CB(p)->flush |= flush;
1380 /* We need to store of the IP ID check to be included later
1381 * when we can verify that this packet does in fact belong
1384 flush_id = (u16)(id - ntohs(iph2->id));
1386 /* This bit of code makes it much easier for us to identify
1387 * the cases where we are doing atomic vs non-atomic IP ID
1388 * checks. Specifically an atomic check can return IP ID
1389 * values 0 - 0xFFFF, while a non-atomic check can only
1390 * return 0 or 0xFFFF.
1392 if (!NAPI_GRO_CB(p)->is_atomic ||
1393 !(iph->frag_off & htons(IP_DF))) {
1394 flush_id ^= NAPI_GRO_CB(p)->count;
1395 flush_id = flush_id ? 0xFFFF : 0;
1398 /* If the previous IP ID value was based on an atomic
1399 * datagram we can overwrite the value and ignore it.
1401 if (NAPI_GRO_CB(skb)->is_atomic)
1402 NAPI_GRO_CB(p)->flush_id = flush_id;
1404 NAPI_GRO_CB(p)->flush_id |= flush_id;
1407 NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
1408 NAPI_GRO_CB(skb)->flush |= flush;
1409 skb_set_network_header(skb, off);
1410 /* The above will be needed by the transport layer if there is one
1411 * immediately following this IP hdr.
1414 /* Note : No need to call skb_gro_postpull_rcsum() here,
1415 * as we already checked checksum over ipv4 header was 0
1417 skb_gro_pull(skb, sizeof(*iph));
1418 skb_set_transport_header(skb, skb_gro_offset(skb));
1420 pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
1426 skb_gro_flush_final(skb, pp, flush);
1430 EXPORT_SYMBOL(inet_gro_receive);
1432 static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
1433 struct sk_buff *skb)
1435 if (NAPI_GRO_CB(skb)->encap_mark) {
1436 NAPI_GRO_CB(skb)->flush = 1;
1440 NAPI_GRO_CB(skb)->encap_mark = 1;
1442 return inet_gro_receive(head, skb);
1445 #define SECONDS_PER_DAY 86400
1447 /* inet_current_timestamp - Return IP network timestamp
1449 * Return milliseconds since midnight in network byte order.
1451 __be32 inet_current_timestamp(void)
1455 struct timespec64 ts;
1457 ktime_get_real_ts64(&ts);
1459 /* Get secs since midnight. */
1460 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1461 /* Convert to msecs. */
1462 msecs = secs * MSEC_PER_SEC;
1463 /* Convert nsec to msec. */
1464 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1466 /* Convert to network byte order. */
1467 return htonl(msecs);
1469 EXPORT_SYMBOL(inet_current_timestamp);
1471 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1473 if (sk->sk_family == AF_INET)
1474 return ip_recv_error(sk, msg, len, addr_len);
1475 #if IS_ENABLED(CONFIG_IPV6)
1476 if (sk->sk_family == AF_INET6)
1477 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1482 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1484 __be16 newlen = htons(skb->len - nhoff);
1485 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1486 const struct net_offload *ops;
1487 int proto = iph->protocol;
1490 if (skb->encapsulation) {
1491 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1492 skb_set_inner_network_header(skb, nhoff);
1495 csum_replace2(&iph->check, iph->tot_len, newlen);
1496 iph->tot_len = newlen;
1499 ops = rcu_dereference(inet_offloads[proto]);
1500 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1503 /* Only need to add sizeof(*iph) to get to the next hdr below
1504 * because any hdr with option will have been flushed in
1505 * inet_gro_receive().
1507 err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
1514 EXPORT_SYMBOL(inet_gro_complete);
1516 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1518 skb->encapsulation = 1;
1519 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1520 return inet_gro_complete(skb, nhoff);
1523 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1524 unsigned short type, unsigned char protocol,
1527 struct socket *sock;
1528 int rc = sock_create_kern(net, family, type, protocol, &sock);
1532 (*sk)->sk_allocation = GFP_ATOMIC;
1534 * Unhash it so that IP input processing does not even see it,
1535 * we do not wish this socket to see incoming packets.
1537 (*sk)->sk_prot->unhash(*sk);
1541 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1543 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
1545 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
1547 EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
1549 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1551 unsigned long res = 0;
1554 for_each_possible_cpu(i)
1555 res += snmp_get_cpu_field(mib, i, offt);
1558 EXPORT_SYMBOL_GPL(snmp_fold_field);
1560 #if BITS_PER_LONG==32
1562 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1563 size_t syncp_offset)
1566 struct u64_stats_sync *syncp;
1570 bhptr = per_cpu_ptr(mib, cpu);
1571 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1573 start = u64_stats_fetch_begin_irq(syncp);
1574 v = *(((u64 *)bhptr) + offt);
1575 } while (u64_stats_fetch_retry_irq(syncp, start));
1579 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1581 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1586 for_each_possible_cpu(cpu) {
1587 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1591 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1594 #ifdef CONFIG_IP_MULTICAST
1595 static const struct net_protocol igmp_protocol = {
1596 .handler = igmp_rcv,
1601 /* thinking of making this const? Don't.
1602 * early_demux can change based on sysctl.
1604 static struct net_protocol tcp_protocol = {
1605 .early_demux = tcp_v4_early_demux,
1606 .early_demux_handler = tcp_v4_early_demux,
1607 .handler = tcp_v4_rcv,
1608 .err_handler = tcp_v4_err,
1611 .icmp_strict_tag_validation = 1,
1614 /* thinking of making this const? Don't.
1615 * early_demux can change based on sysctl.
1617 static struct net_protocol udp_protocol = {
1618 .early_demux = udp_v4_early_demux,
1619 .early_demux_handler = udp_v4_early_demux,
1621 .err_handler = udp_err,
1626 static const struct net_protocol icmp_protocol = {
1627 .handler = icmp_rcv,
1628 .err_handler = icmp_err,
1633 static __net_init int ipv4_mib_init_net(struct net *net)
1637 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1638 if (!net->mib.tcp_statistics)
1640 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1641 if (!net->mib.ip_statistics)
1644 for_each_possible_cpu(i) {
1645 struct ipstats_mib *af_inet_stats;
1646 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1647 u64_stats_init(&af_inet_stats->syncp);
1650 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1651 if (!net->mib.net_statistics)
1653 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1654 if (!net->mib.udp_statistics)
1656 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1657 if (!net->mib.udplite_statistics)
1658 goto err_udplite_mib;
1659 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1660 if (!net->mib.icmp_statistics)
1662 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1664 if (!net->mib.icmpmsg_statistics)
1665 goto err_icmpmsg_mib;
1671 free_percpu(net->mib.icmp_statistics);
1673 free_percpu(net->mib.udplite_statistics);
1675 free_percpu(net->mib.udp_statistics);
1677 free_percpu(net->mib.net_statistics);
1679 free_percpu(net->mib.ip_statistics);
1681 free_percpu(net->mib.tcp_statistics);
1686 static __net_exit void ipv4_mib_exit_net(struct net *net)
1688 kfree(net->mib.icmpmsg_statistics);
1689 free_percpu(net->mib.icmp_statistics);
1690 free_percpu(net->mib.udplite_statistics);
1691 free_percpu(net->mib.udp_statistics);
1692 free_percpu(net->mib.net_statistics);
1693 free_percpu(net->mib.ip_statistics);
1694 free_percpu(net->mib.tcp_statistics);
1697 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1698 .init = ipv4_mib_init_net,
1699 .exit = ipv4_mib_exit_net,
1702 static int __init init_ipv4_mibs(void)
1704 return register_pernet_subsys(&ipv4_mib_ops);
1707 static __net_init int inet_init_net(struct net *net)
1710 * Set defaults for local port range
1712 seqlock_init(&net->ipv4.ip_local_ports.lock);
1713 net->ipv4.ip_local_ports.range[0] = 32768;
1714 net->ipv4.ip_local_ports.range[1] = 60999;
1716 seqlock_init(&net->ipv4.ping_group_range.lock);
1718 * Sane defaults - nobody may create ping sockets.
1719 * Boot scripts should set this to distro-specific group.
1721 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1722 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1724 /* Default values for sysctl-controlled parameters.
1725 * We set them here, in case sysctl is not compiled.
1727 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1728 net->ipv4.sysctl_ip_dynaddr = 0;
1729 net->ipv4.sysctl_ip_early_demux = 1;
1730 net->ipv4.sysctl_udp_early_demux = 1;
1731 net->ipv4.sysctl_tcp_early_demux = 1;
1732 #ifdef CONFIG_SYSCTL
1733 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1736 /* Some igmp sysctl, whose values are always used */
1737 net->ipv4.sysctl_igmp_max_memberships = 20;
1738 net->ipv4.sysctl_igmp_max_msf = 10;
1739 /* IGMP reports for link-local multicast groups are enabled by default */
1740 net->ipv4.sysctl_igmp_llm_reports = 1;
1741 net->ipv4.sysctl_igmp_qrv = 2;
1746 static __net_exit void inet_exit_net(struct net *net)
1750 static __net_initdata struct pernet_operations af_inet_ops = {
1751 .init = inet_init_net,
1752 .exit = inet_exit_net,
1755 static int __init init_inet_pernet_ops(void)
1757 return register_pernet_subsys(&af_inet_ops);
1760 static int ipv4_proc_init(void);
1763 * IP protocol layer initialiser
1766 static struct packet_offload ip_packet_offload __read_mostly = {
1767 .type = cpu_to_be16(ETH_P_IP),
1769 .gso_segment = inet_gso_segment,
1770 .gro_receive = inet_gro_receive,
1771 .gro_complete = inet_gro_complete,
1775 static const struct net_offload ipip_offload = {
1777 .gso_segment = inet_gso_segment,
1778 .gro_receive = ipip_gro_receive,
1779 .gro_complete = ipip_gro_complete,
1783 static int __init ipip_offload_init(void)
1785 return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1788 static int __init ipv4_offload_init(void)
1793 if (udpv4_offload_init() < 0)
1794 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1795 if (tcpv4_offload_init() < 0)
1796 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1797 if (ipip_offload_init() < 0)
1798 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1800 dev_add_offload(&ip_packet_offload);
1804 fs_initcall(ipv4_offload_init);
1806 static struct packet_type ip_packet_type __read_mostly = {
1807 .type = cpu_to_be16(ETH_P_IP),
1811 static int __init inet_init(void)
1813 struct inet_protosw *q;
1814 struct list_head *r;
1817 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1819 rc = proto_register(&tcp_prot, 1);
1823 rc = proto_register(&udp_prot, 1);
1825 goto out_unregister_tcp_proto;
1827 rc = proto_register(&raw_prot, 1);
1829 goto out_unregister_udp_proto;
1831 rc = proto_register(&ping_prot, 1);
1833 goto out_unregister_raw_proto;
1836 * Tell SOCKET that we are alive...
1839 (void)sock_register(&inet_family_ops);
1841 #ifdef CONFIG_SYSCTL
1842 ip_static_sysctl_init();
1846 * Add all the base protocols.
1849 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1850 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1851 if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1852 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1853 if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1854 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1855 #ifdef CONFIG_IP_MULTICAST
1856 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1857 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1860 /* Register the socket-side information for inet_create. */
1861 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1864 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1865 inet_register_protosw(q);
1868 * Set the ARP module up
1874 * Set the IP module up
1879 /* Setup TCP slab cache for open requests. */
1882 /* Setup UDP memory threshold */
1885 /* Add UDP-Lite (RFC 3828) */
1886 udplite4_register();
1891 * Set the ICMP layer up
1894 if (icmp_init() < 0)
1895 panic("Failed to create the ICMP control socket.\n");
1898 * Initialise the multicast router
1900 #if defined(CONFIG_IP_MROUTE)
1902 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
1905 if (init_inet_pernet_ops())
1906 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
1908 * Initialise per-cpu ipv4 mibs
1911 if (init_ipv4_mibs())
1912 pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
1918 dev_add_pack(&ip_packet_type);
1920 ip_tunnel_core_init();
1925 out_unregister_raw_proto:
1926 proto_unregister(&raw_prot);
1927 out_unregister_udp_proto:
1928 proto_unregister(&udp_prot);
1929 out_unregister_tcp_proto:
1930 proto_unregister(&tcp_prot);
1934 fs_initcall(inet_init);
1936 /* ------------------------------------------------------------------------ */
1938 #ifdef CONFIG_PROC_FS
1939 static int __init ipv4_proc_init(void)
1943 if (raw_proc_init())
1945 if (tcp4_proc_init())
1947 if (udp4_proc_init())
1949 if (ping_proc_init())
1951 if (ip_misc_proc_init())
1968 #else /* CONFIG_PROC_FS */
1969 static int __init ipv4_proc_init(void)
1973 #endif /* CONFIG_PROC_FS */