2 * net/tipc/socket.c: TIPC socket API
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
41 #include "name_table.h"
44 #include "name_distr.h"
50 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
51 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
52 #define TIPC_FWD_MSG 1
53 #define TIPC_MAX_PORT 0xffffffff
54 #define TIPC_MIN_PORT 1
55 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
58 TIPC_LISTEN = TCP_LISTEN,
59 TIPC_ESTABLISHED = TCP_ESTABLISHED,
60 TIPC_OPEN = TCP_CLOSE,
61 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
62 TIPC_CONNECTING = TCP_SYN_SENT,
65 struct sockaddr_pair {
66 struct sockaddr_tipc sock;
67 struct sockaddr_tipc member;
71 * struct tipc_sock - TIPC socket structure
72 * @sk: socket - interacts with 'port' and with user via the socket API
73 * @conn_type: TIPC type used when connection was established
74 * @conn_instance: TIPC instance used when connection was established
75 * @published: non-zero if port has one or more associated names
76 * @max_pkt: maximum packet size "hint" used when building messages sent by port
77 * @portid: unique port identity in TIPC socket hash table
78 * @phdr: preformatted message header used when sending messages
79 * #cong_links: list of congested links
80 * @publications: list of publications for port
81 * @blocking_link: address of the congested link we are currently sleeping on
82 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
106 atomic_t dupl_rcvcnt;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
118 struct tipc_group *group;
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
147 static u32 tsk_own_node(struct tipc_sock *tsk)
149 return msg_prevnode(&tsk->phdr);
152 static u32 tsk_peer_node(struct tipc_sock *tsk)
154 return msg_destnode(&tsk->phdr);
157 static u32 tsk_peer_port(struct tipc_sock *tsk)
159 return msg_destport(&tsk->phdr);
162 static bool tsk_unreliable(struct tipc_sock *tsk)
164 return msg_src_droppable(&tsk->phdr) != 0;
167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
172 static bool tsk_unreturnable(struct tipc_sock *tsk)
174 return msg_dest_droppable(&tsk->phdr) != 0;
177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
182 static int tsk_importance(struct tipc_sock *tsk)
184 return msg_importance(&tsk->phdr);
187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
191 msg_set_importance(&tsk->phdr, (u32)imp);
195 static struct tipc_sock *tipc_sk(const struct sock *sk)
197 return container_of(sk, struct tipc_sock, sk);
200 static bool tsk_conn_cong(struct tipc_sock *tsk)
202 return tsk->snt_unacked > tsk->snd_win;
205 static u16 tsk_blocks(int len)
207 return ((len / FLOWCTL_BLK_SZ) + 1);
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
214 static u16 tsk_adv_blocks(int len)
216 return len / FLOWCTL_BLK_SZ / 4;
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
233 * Caller must hold socket lock
235 static void tsk_advance_rx_queue(struct sock *sk)
237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
240 /* tipc_sk_respond() : send response message back to sender
242 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
246 u32 onode = tipc_own_addr(sock_net(sk));
248 if (!tipc_msg_reverse(onode, &skb, err))
251 dnode = msg_destnode(buf_msg(skb));
252 selector = msg_origport(buf_msg(skb));
253 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
257 * tsk_rej_rx_queue - reject all buffers in socket receive queue
259 * Caller must hold socket lock
261 static void tsk_rej_rx_queue(struct sock *sk)
265 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
266 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
269 static bool tipc_sk_connected(struct sock *sk)
271 return sk->sk_state == TIPC_ESTABLISHED;
274 /* tipc_sk_type_connectionless - check if the socket is datagram socket
277 * Returns true if connection less, false otherwise
279 static bool tipc_sk_type_connectionless(struct sock *sk)
281 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
284 /* tsk_peer_msg - verify if message was sent by connected port's peer
286 * Handles cases where the node's network address has changed from
287 * the default of <0.0.0> to its configured setting.
289 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
291 struct sock *sk = &tsk->sk;
292 u32 self = tipc_own_addr(sock_net(sk));
293 u32 peer_port = tsk_peer_port(tsk);
294 u32 orig_node, peer_node;
296 if (unlikely(!tipc_sk_connected(sk)))
299 if (unlikely(msg_origport(msg) != peer_port))
302 orig_node = msg_orignode(msg);
303 peer_node = tsk_peer_node(tsk);
305 if (likely(orig_node == peer_node))
308 if (!orig_node && peer_node == self)
311 if (!peer_node && orig_node == self)
317 /* tipc_set_sk_state - set the sk_state of the socket
320 * Caller must hold socket lock
322 * Returns 0 on success, errno otherwise
324 static int tipc_set_sk_state(struct sock *sk, int state)
326 int oldsk_state = sk->sk_state;
334 case TIPC_CONNECTING:
335 if (oldsk_state == TIPC_OPEN)
338 case TIPC_ESTABLISHED:
339 if (oldsk_state == TIPC_CONNECTING ||
340 oldsk_state == TIPC_OPEN)
343 case TIPC_DISCONNECTING:
344 if (oldsk_state == TIPC_CONNECTING ||
345 oldsk_state == TIPC_ESTABLISHED)
351 sk->sk_state = state;
356 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
358 struct sock *sk = sock->sk;
359 int err = sock_error(sk);
360 int typ = sock->type;
364 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
365 if (sk->sk_state == TIPC_DISCONNECTING)
367 else if (!tipc_sk_connected(sk))
372 if (signal_pending(current))
373 return sock_intr_errno(*timeout);
378 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
383 while ((rc_ = !(condition_))) { \
384 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
386 rc_ = tipc_sk_sock_err((sock_), timeo_); \
389 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
391 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
392 sched_annotate_sleep(); \
394 remove_wait_queue(sk_sleep(sk_), &wait_); \
400 * tipc_sk_create - create a TIPC socket
401 * @net: network namespace (must be default network)
402 * @sock: pre-allocated socket structure
403 * @protocol: protocol indicator (must be 0)
404 * @kern: caused by kernel or by userspace?
406 * This routine creates additional data structures used by the TIPC socket,
407 * initializes them, and links them together.
409 * Returns 0 on success, errno otherwise
411 static int tipc_sk_create(struct net *net, struct socket *sock,
412 int protocol, int kern)
415 const struct proto_ops *ops;
417 struct tipc_sock *tsk;
418 struct tipc_msg *msg;
420 /* Validate arguments */
421 if (unlikely(protocol != 0))
422 return -EPROTONOSUPPORT;
424 switch (sock->type) {
439 /* Allocate socket's protocol area */
440 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
445 tsk->max_pkt = MAX_PKT_DEFAULT;
446 INIT_LIST_HEAD(&tsk->publications);
447 INIT_LIST_HEAD(&tsk->cong_links);
449 tn = net_generic(sock_net(sk), tipc_net_id);
451 /* Finish initializing socket data structures */
453 sock_init_data(sock, sk);
454 tipc_set_sk_state(sk, TIPC_OPEN);
455 if (tipc_sk_insert(tsk)) {
456 pr_warn("Socket create failed; port number exhausted\n");
460 /* Ensure tsk is visible before we read own_addr. */
463 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
464 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
466 msg_set_origport(msg, tsk->portid);
467 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
469 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
470 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
471 sk->sk_data_ready = tipc_data_ready;
472 sk->sk_write_space = tipc_write_space;
473 sk->sk_destruct = tipc_sock_destruct;
474 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
475 tsk->group_is_open = true;
476 atomic_set(&tsk->dupl_rcvcnt, 0);
478 /* Start out with safe limits until we receive an advertised window */
479 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
480 tsk->rcv_win = tsk->snd_win;
482 if (tipc_sk_type_connectionless(sk)) {
483 tsk_set_unreturnable(tsk, true);
484 if (sock->type == SOCK_DGRAM)
485 tsk_set_unreliable(tsk, true);
491 static void tipc_sk_callback(struct rcu_head *head)
493 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
498 /* Caller should hold socket lock for the socket. */
499 static void __tipc_shutdown(struct socket *sock, int error)
501 struct sock *sk = sock->sk;
502 struct tipc_sock *tsk = tipc_sk(sk);
503 struct net *net = sock_net(sk);
504 long timeout = CONN_TIMEOUT_DEFAULT;
505 u32 dnode = tsk_peer_node(tsk);
508 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
509 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
510 !tsk_conn_cong(tsk)));
512 /* Reject all unreceived messages, except on an active connection
513 * (which disconnects locally & sends a 'FIN+' to peer).
515 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
516 if (TIPC_SKB_CB(skb)->bytes_read) {
520 if (!tipc_sk_type_connectionless(sk) &&
521 sk->sk_state != TIPC_DISCONNECTING) {
522 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
523 tipc_node_remove_conn(net, dnode, tsk->portid);
525 tipc_sk_respond(sk, skb, error);
528 if (tipc_sk_type_connectionless(sk))
531 if (sk->sk_state != TIPC_DISCONNECTING) {
532 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
533 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
534 tsk_own_node(tsk), tsk_peer_port(tsk),
537 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
538 tipc_node_remove_conn(net, dnode, tsk->portid);
539 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
544 * tipc_release - destroy a TIPC socket
545 * @sock: socket to destroy
547 * This routine cleans up any messages that are still queued on the socket.
548 * For DGRAM and RDM socket types, all queued messages are rejected.
549 * For SEQPACKET and STREAM socket types, the first message is rejected
550 * and any others are discarded. (If the first message on a STREAM socket
551 * is partially-read, it is discarded and the next one is rejected instead.)
553 * NOTE: Rejected messages are not necessarily returned to the sender! They
554 * are returned or discarded according to the "destination droppable" setting
555 * specified for the message by the sender.
557 * Returns 0 on success, errno otherwise
559 static int tipc_release(struct socket *sock)
561 struct sock *sk = sock->sk;
562 struct tipc_sock *tsk;
565 * Exit if socket isn't fully initialized (occurs when a failed accept()
566 * releases a pre-allocated child socket that was never used)
574 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
575 sk->sk_shutdown = SHUTDOWN_MASK;
577 tipc_sk_withdraw(tsk, 0, NULL);
578 sk_stop_timer(sk, &sk->sk_timer);
581 /* Reject any messages that accumulated in backlog queue */
583 tipc_dest_list_purge(&tsk->cong_links);
584 tsk->cong_link_cnt = 0;
585 call_rcu(&tsk->rcu, tipc_sk_callback);
592 * tipc_bind - associate or disassocate TIPC name(s) with a socket
593 * @sock: socket structure
594 * @uaddr: socket address describing name(s) and desired operation
595 * @uaddr_len: size of socket address data structure
597 * Name and name sequence binding is indicated using a positive scope value;
598 * a negative scope value unbinds the specified name. Specifying no name
599 * (i.e. a socket address length of 0) unbinds all names from the socket.
601 * Returns 0 on success, errno otherwise
603 * NOTE: This routine doesn't need to take the socket lock since it doesn't
604 * access any non-constant socket information.
606 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
609 struct sock *sk = sock->sk;
610 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
611 struct tipc_sock *tsk = tipc_sk(sk);
615 if (unlikely(!uaddr_len)) {
616 res = tipc_sk_withdraw(tsk, 0, NULL);
623 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
627 if (addr->family != AF_TIPC) {
632 if (addr->addrtype == TIPC_ADDR_NAME)
633 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
634 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
639 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
640 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
641 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
646 res = (addr->scope >= 0) ?
647 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
648 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
655 * tipc_getname - get port ID of socket or peer socket
656 * @sock: socket structure
657 * @uaddr: area for returned socket address
658 * @uaddr_len: area for returned length of socket address
659 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
661 * Returns 0 on success, errno otherwise
663 * NOTE: This routine doesn't need to take the socket lock since it only
664 * accesses socket information that is unchanging (or which changes in
665 * a completely predictable manner).
667 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
670 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
671 struct sock *sk = sock->sk;
672 struct tipc_sock *tsk = tipc_sk(sk);
674 memset(addr, 0, sizeof(*addr));
676 if ((!tipc_sk_connected(sk)) &&
677 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
679 addr->addr.id.ref = tsk_peer_port(tsk);
680 addr->addr.id.node = tsk_peer_node(tsk);
682 addr->addr.id.ref = tsk->portid;
683 addr->addr.id.node = tipc_own_addr(sock_net(sk));
686 addr->addrtype = TIPC_ADDR_ID;
687 addr->family = AF_TIPC;
689 addr->addr.name.domain = 0;
691 return sizeof(*addr);
695 * tipc_poll - read pollmask
696 * @file: file structure associated with the socket
697 * @sock: socket for which to calculate the poll bits
699 * Returns pollmask value
702 * It appears that the usual socket locking mechanisms are not useful here
703 * since the pollmask info is potentially out-of-date the moment this routine
704 * exits. TCP and other protocols seem to rely on higher level poll routines
705 * to handle any preventable race conditions, so TIPC will do the same ...
707 * IMPORTANT: The fact that a read or write operation is indicated does NOT
708 * imply that the operation will succeed, merely that it should be performed
709 * and will not block.
711 static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events)
713 struct sock *sk = sock->sk;
714 struct tipc_sock *tsk = tipc_sk(sk);
715 __poll_t revents = 0;
717 if (sk->sk_shutdown & RCV_SHUTDOWN)
718 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
719 if (sk->sk_shutdown == SHUTDOWN_MASK)
722 switch (sk->sk_state) {
723 case TIPC_ESTABLISHED:
724 case TIPC_CONNECTING:
725 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
729 if (!skb_queue_empty(&sk->sk_receive_queue))
730 revents |= EPOLLIN | EPOLLRDNORM;
733 if (tsk->group_is_open && !tsk->cong_link_cnt)
735 if (!tipc_sk_type_connectionless(sk))
737 if (skb_queue_empty(&sk->sk_receive_queue))
739 revents |= EPOLLIN | EPOLLRDNORM;
741 case TIPC_DISCONNECTING:
742 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
749 * tipc_sendmcast - send multicast message
750 * @sock: socket structure
751 * @seq: destination address
752 * @msg: message to send
753 * @dlen: length of data to send
754 * @timeout: timeout to wait for wakeup
756 * Called from function tipc_sendmsg(), which has done all sanity checks
757 * Returns the number of bytes sent on success, or errno
759 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
760 struct msghdr *msg, size_t dlen, long timeout)
762 struct sock *sk = sock->sk;
763 struct tipc_sock *tsk = tipc_sk(sk);
764 struct tipc_msg *hdr = &tsk->phdr;
765 struct net *net = sock_net(sk);
766 int mtu = tipc_bcast_get_mtu(net);
767 struct tipc_mc_method *method = &tsk->mc_method;
768 struct sk_buff_head pkts;
769 struct tipc_nlist dsts;
775 /* Block or return if any destination link is congested */
776 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
780 /* Lookup destination nodes */
781 tipc_nlist_init(&dsts, tipc_own_addr(net));
782 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
784 if (!dsts.local && !dsts.remote)
785 return -EHOSTUNREACH;
787 /* Build message header */
788 msg_set_type(hdr, TIPC_MCAST_MSG);
789 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
790 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
791 msg_set_destport(hdr, 0);
792 msg_set_destnode(hdr, 0);
793 msg_set_nametype(hdr, seq->type);
794 msg_set_namelower(hdr, seq->lower);
795 msg_set_nameupper(hdr, seq->upper);
797 /* Build message as chain of buffers */
798 skb_queue_head_init(&pkts);
799 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
801 /* Send message if build was successful */
802 if (unlikely(rc == dlen))
803 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
804 &tsk->cong_link_cnt);
806 tipc_nlist_purge(&dsts);
808 return rc ? rc : dlen;
812 * tipc_send_group_msg - send a message to a member in the group
813 * @net: network namespace
814 * @m: message to send
816 * @dnode: destination node
817 * @dport: destination port
818 * @dlen: total length of message data
820 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
821 struct msghdr *m, struct tipc_member *mb,
822 u32 dnode, u32 dport, int dlen)
824 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
825 struct tipc_mc_method *method = &tsk->mc_method;
826 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
827 struct tipc_msg *hdr = &tsk->phdr;
828 struct sk_buff_head pkts;
831 /* Complete message header */
832 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
833 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
834 msg_set_destport(hdr, dport);
835 msg_set_destnode(hdr, dnode);
836 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
838 /* Build message as chain of buffers */
839 skb_queue_head_init(&pkts);
840 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
841 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
842 if (unlikely(rc != dlen))
846 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
847 if (unlikely(rc == -ELINKCONG)) {
848 tipc_dest_push(&tsk->cong_links, dnode, 0);
849 tsk->cong_link_cnt++;
852 /* Update send window */
853 tipc_group_update_member(mb, blks);
855 /* A broadcast sent within next EXPIRE period must follow same path */
856 method->rcast = true;
857 method->mandatory = true;
862 * tipc_send_group_unicast - send message to a member in the group
863 * @sock: socket structure
864 * @m: message to send
865 * @dlen: total length of message data
866 * @timeout: timeout to wait for wakeup
868 * Called from function tipc_sendmsg(), which has done all sanity checks
869 * Returns the number of bytes sent on success, or errno
871 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
872 int dlen, long timeout)
874 struct sock *sk = sock->sk;
875 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
876 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
877 struct tipc_sock *tsk = tipc_sk(sk);
878 struct tipc_group *grp = tsk->group;
879 struct net *net = sock_net(sk);
880 struct tipc_member *mb = NULL;
884 node = dest->addr.id.node;
885 port = dest->addr.id.ref;
887 return -EHOSTUNREACH;
889 /* Block or return if destination link or member is congested */
890 rc = tipc_wait_for_cond(sock, &timeout,
891 !tipc_dest_find(&tsk->cong_links, node, 0) &&
892 !tipc_group_cong(grp, node, port, blks, &mb));
897 return -EHOSTUNREACH;
899 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
901 return rc ? rc : dlen;
905 * tipc_send_group_anycast - send message to any member with given identity
906 * @sock: socket structure
907 * @m: message to send
908 * @dlen: total length of message data
909 * @timeout: timeout to wait for wakeup
911 * Called from function tipc_sendmsg(), which has done all sanity checks
912 * Returns the number of bytes sent on success, or errno
914 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
915 int dlen, long timeout)
917 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
918 struct sock *sk = sock->sk;
919 struct tipc_sock *tsk = tipc_sk(sk);
920 struct list_head *cong_links = &tsk->cong_links;
921 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
922 struct tipc_group *grp = tsk->group;
923 struct tipc_msg *hdr = &tsk->phdr;
924 struct tipc_member *first = NULL;
925 struct tipc_member *mbr = NULL;
926 struct net *net = sock_net(sk);
927 u32 node, port, exclude;
928 struct list_head dsts;
929 u32 type, inst, scope;
934 INIT_LIST_HEAD(&dsts);
936 type = msg_nametype(hdr);
937 inst = dest->addr.name.name.instance;
938 scope = msg_lookup_scope(hdr);
939 exclude = tipc_group_exclude(grp);
941 while (++lookups < 4) {
944 /* Look for a non-congested destination member, if any */
946 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
947 &dstcnt, exclude, false))
948 return -EHOSTUNREACH;
949 tipc_dest_pop(&dsts, &node, &port);
950 cong = tipc_group_cong(grp, node, port, blks, &mbr);
959 /* Start over if destination was not in member list */
963 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
966 /* Block or return if destination link or member is congested */
967 rc = tipc_wait_for_cond(sock, &timeout,
968 !tipc_dest_find(cong_links, node, 0) &&
969 !tipc_group_cong(grp, node, port,
974 /* Send, unless destination disappeared while waiting */
979 if (unlikely(lookups >= 4))
980 return -EHOSTUNREACH;
982 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
984 return rc ? rc : dlen;
988 * tipc_send_group_bcast - send message to all members in communication group
989 * @sk: socket structure
990 * @m: message to send
991 * @dlen: total length of message data
992 * @timeout: timeout to wait for wakeup
994 * Called from function tipc_sendmsg(), which has done all sanity checks
995 * Returns the number of bytes sent on success, or errno
997 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
998 int dlen, long timeout)
1000 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1001 struct sock *sk = sock->sk;
1002 struct net *net = sock_net(sk);
1003 struct tipc_sock *tsk = tipc_sk(sk);
1004 struct tipc_group *grp = tsk->group;
1005 struct tipc_nlist *dsts = tipc_group_dests(grp);
1006 struct tipc_mc_method *method = &tsk->mc_method;
1007 bool ack = method->mandatory && method->rcast;
1008 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1009 struct tipc_msg *hdr = &tsk->phdr;
1010 int mtu = tipc_bcast_get_mtu(net);
1011 struct sk_buff_head pkts;
1012 int rc = -EHOSTUNREACH;
1014 if (!dsts->local && !dsts->remote)
1015 return -EHOSTUNREACH;
1017 /* Block or return if any destination link or member is congested */
1018 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
1019 !tipc_group_bc_cong(grp, blks));
1023 /* Complete message header */
1025 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1026 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1028 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1029 msg_set_nameinst(hdr, 0);
1031 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1032 msg_set_destport(hdr, 0);
1033 msg_set_destnode(hdr, 0);
1034 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
1036 /* Avoid getting stuck with repeated forced replicasts */
1037 msg_set_grp_bc_ack_req(hdr, ack);
1039 /* Build message as chain of buffers */
1040 skb_queue_head_init(&pkts);
1041 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1042 if (unlikely(rc != dlen))
1046 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1050 /* Update broadcast sequence number and send windows */
1051 tipc_group_update_bc_members(tsk->group, blks, ack);
1053 /* Broadcast link is now free to choose method for next broadcast */
1054 method->mandatory = false;
1055 method->expires = jiffies;
1061 * tipc_send_group_mcast - send message to all members with given identity
1062 * @sock: socket structure
1063 * @m: message to send
1064 * @dlen: total length of message data
1065 * @timeout: timeout to wait for wakeup
1067 * Called from function tipc_sendmsg(), which has done all sanity checks
1068 * Returns the number of bytes sent on success, or errno
1070 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1071 int dlen, long timeout)
1073 struct sock *sk = sock->sk;
1074 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1075 struct tipc_sock *tsk = tipc_sk(sk);
1076 struct tipc_group *grp = tsk->group;
1077 struct tipc_msg *hdr = &tsk->phdr;
1078 struct net *net = sock_net(sk);
1079 u32 type, inst, scope, exclude;
1080 struct list_head dsts;
1083 INIT_LIST_HEAD(&dsts);
1085 type = msg_nametype(hdr);
1086 inst = dest->addr.name.name.instance;
1087 scope = msg_lookup_scope(hdr);
1088 exclude = tipc_group_exclude(grp);
1090 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1091 &dstcnt, exclude, true))
1092 return -EHOSTUNREACH;
1095 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1096 return tipc_send_group_unicast(sock, m, dlen, timeout);
1099 tipc_dest_list_purge(&dsts);
1100 return tipc_send_group_bcast(sock, m, dlen, timeout);
1104 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1105 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1106 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1108 * Multi-threaded: parallel calls with reference to same queues may occur
1110 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1111 struct sk_buff_head *inputq)
1113 u32 self = tipc_own_addr(net);
1114 u32 type, lower, upper, scope;
1115 struct sk_buff *skb, *_skb;
1116 u32 portid, oport, onode;
1117 struct sk_buff_head tmpq;
1118 struct list_head dports;
1119 struct tipc_msg *hdr;
1120 int user, mtyp, hlen;
1123 __skb_queue_head_init(&tmpq);
1124 INIT_LIST_HEAD(&dports);
1126 skb = tipc_skb_peek(arrvq, &inputq->lock);
1127 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1129 user = msg_user(hdr);
1130 mtyp = msg_type(hdr);
1131 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1132 oport = msg_origport(hdr);
1133 onode = msg_orignode(hdr);
1134 type = msg_nametype(hdr);
1136 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1137 spin_lock_bh(&inputq->lock);
1138 if (skb_peek(arrvq) == skb) {
1139 __skb_dequeue(arrvq);
1140 __skb_queue_tail(inputq, skb);
1143 spin_unlock_bh(&inputq->lock);
1147 /* Group messages require exact scope match */
1148 if (msg_in_group(hdr)) {
1151 scope = msg_lookup_scope(hdr);
1154 /* TIPC_NODE_SCOPE means "any scope" in this context */
1156 scope = TIPC_NODE_SCOPE;
1158 scope = TIPC_CLUSTER_SCOPE;
1160 lower = msg_namelower(hdr);
1161 upper = msg_nameupper(hdr);
1164 /* Create destination port list: */
1165 tipc_nametbl_mc_lookup(net, type, lower, upper,
1166 scope, exact, &dports);
1168 /* Clone message per destination */
1169 while (tipc_dest_pop(&dports, NULL, &portid)) {
1170 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1172 msg_set_destport(buf_msg(_skb), portid);
1173 __skb_queue_tail(&tmpq, _skb);
1176 pr_warn("Failed to clone mcast rcv buffer\n");
1178 /* Append to inputq if not already done by other thread */
1179 spin_lock_bh(&inputq->lock);
1180 if (skb_peek(arrvq) == skb) {
1181 skb_queue_splice_tail_init(&tmpq, inputq);
1182 kfree_skb(__skb_dequeue(arrvq));
1184 spin_unlock_bh(&inputq->lock);
1185 __skb_queue_purge(&tmpq);
1188 tipc_sk_rcv(net, inputq);
1192 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1193 * @tsk: receiving socket
1194 * @skb: pointer to message buffer.
1196 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1197 struct sk_buff_head *xmitq)
1199 struct tipc_msg *hdr = buf_msg(skb);
1200 u32 onode = tsk_own_node(tsk);
1201 struct sock *sk = &tsk->sk;
1202 int mtyp = msg_type(hdr);
1205 /* Ignore if connection cannot be validated: */
1206 if (!tsk_peer_msg(tsk, hdr))
1209 if (unlikely(msg_errcode(hdr))) {
1210 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1211 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1212 tsk_peer_port(tsk));
1213 sk->sk_state_change(sk);
1217 tsk->probe_unacked = false;
1219 if (mtyp == CONN_PROBE) {
1220 msg_set_type(hdr, CONN_PROBE_REPLY);
1221 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1222 __skb_queue_tail(xmitq, skb);
1224 } else if (mtyp == CONN_ACK) {
1225 conn_cong = tsk_conn_cong(tsk);
1226 tsk->snt_unacked -= msg_conn_ack(hdr);
1227 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1228 tsk->snd_win = msg_adv_win(hdr);
1230 sk->sk_write_space(sk);
1231 } else if (mtyp != CONN_PROBE_REPLY) {
1232 pr_warn("Received unknown CONN_PROTO msg\n");
1239 * tipc_sendmsg - send message in connectionless manner
1240 * @sock: socket structure
1241 * @m: message to send
1242 * @dsz: amount of user data to be sent
1244 * Message must have an destination specified explicitly.
1245 * Used for SOCK_RDM and SOCK_DGRAM messages,
1246 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1247 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1249 * Returns the number of bytes sent on success, or errno otherwise
1251 static int tipc_sendmsg(struct socket *sock,
1252 struct msghdr *m, size_t dsz)
1254 struct sock *sk = sock->sk;
1258 ret = __tipc_sendmsg(sock, m, dsz);
1264 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1266 struct sock *sk = sock->sk;
1267 struct net *net = sock_net(sk);
1268 struct tipc_sock *tsk = tipc_sk(sk);
1269 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1270 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1271 struct list_head *clinks = &tsk->cong_links;
1272 bool syn = !tipc_sk_type_connectionless(sk);
1273 struct tipc_group *grp = tsk->group;
1274 struct tipc_msg *hdr = &tsk->phdr;
1275 struct tipc_name_seq *seq;
1276 struct sk_buff_head pkts;
1277 u32 dport, dnode = 0;
1281 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1285 if (unlikely(m->msg_namelen < sizeof(*dest)))
1287 if (unlikely(dest->family != AF_TIPC))
1293 return tipc_send_group_bcast(sock, m, dlen, timeout);
1294 if (dest->addrtype == TIPC_ADDR_NAME)
1295 return tipc_send_group_anycast(sock, m, dlen, timeout);
1296 if (dest->addrtype == TIPC_ADDR_ID)
1297 return tipc_send_group_unicast(sock, m, dlen, timeout);
1298 if (dest->addrtype == TIPC_ADDR_MCAST)
1299 return tipc_send_group_mcast(sock, m, dlen, timeout);
1303 if (unlikely(!dest)) {
1305 if (!syn || dest->family != AF_TIPC)
1306 return -EDESTADDRREQ;
1309 if (unlikely(syn)) {
1310 if (sk->sk_state == TIPC_LISTEN)
1312 if (sk->sk_state != TIPC_OPEN)
1316 if (dest->addrtype == TIPC_ADDR_NAME) {
1317 tsk->conn_type = dest->addr.name.name.type;
1318 tsk->conn_instance = dest->addr.name.name.instance;
1322 seq = &dest->addr.nameseq;
1323 if (dest->addrtype == TIPC_ADDR_MCAST)
1324 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1326 if (dest->addrtype == TIPC_ADDR_NAME) {
1327 type = dest->addr.name.name.type;
1328 inst = dest->addr.name.name.instance;
1329 dnode = dest->addr.name.domain;
1330 msg_set_type(hdr, TIPC_NAMED_MSG);
1331 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1332 msg_set_nametype(hdr, type);
1333 msg_set_nameinst(hdr, inst);
1334 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1335 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1336 msg_set_destnode(hdr, dnode);
1337 msg_set_destport(hdr, dport);
1338 if (unlikely(!dport && !dnode))
1339 return -EHOSTUNREACH;
1340 } else if (dest->addrtype == TIPC_ADDR_ID) {
1341 dnode = dest->addr.id.node;
1342 msg_set_type(hdr, TIPC_DIRECT_MSG);
1343 msg_set_lookup_scope(hdr, 0);
1344 msg_set_destnode(hdr, dnode);
1345 msg_set_destport(hdr, dest->addr.id.ref);
1346 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1351 /* Block or return if destination link is congested */
1352 rc = tipc_wait_for_cond(sock, &timeout,
1353 !tipc_dest_find(clinks, dnode, 0));
1357 skb_queue_head_init(&pkts);
1358 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1359 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1360 if (unlikely(rc != dlen))
1363 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1364 if (unlikely(rc == -ELINKCONG)) {
1365 tipc_dest_push(clinks, dnode, 0);
1366 tsk->cong_link_cnt++;
1370 if (unlikely(syn && !rc))
1371 tipc_set_sk_state(sk, TIPC_CONNECTING);
1373 return rc ? rc : dlen;
1377 * tipc_sendstream - send stream-oriented data
1378 * @sock: socket structure
1380 * @dsz: total length of data to be transmitted
1382 * Used for SOCK_STREAM data.
1384 * Returns the number of bytes sent on success (or partial success),
1385 * or errno if no data sent
1387 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1389 struct sock *sk = sock->sk;
1393 ret = __tipc_sendstream(sock, m, dsz);
1399 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1401 struct sock *sk = sock->sk;
1402 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1403 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1404 struct tipc_sock *tsk = tipc_sk(sk);
1405 struct tipc_msg *hdr = &tsk->phdr;
1406 struct net *net = sock_net(sk);
1407 struct sk_buff_head pkts;
1408 u32 dnode = tsk_peer_node(tsk);
1412 skb_queue_head_init(&pkts);
1414 if (unlikely(dlen > INT_MAX))
1417 /* Handle implicit connection setup */
1418 if (unlikely(dest)) {
1419 rc = __tipc_sendmsg(sock, m, dlen);
1420 if (dlen && (dlen == rc))
1421 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1426 rc = tipc_wait_for_cond(sock, &timeout,
1427 (!tsk->cong_link_cnt &&
1428 !tsk_conn_cong(tsk) &&
1429 tipc_sk_connected(sk)));
1433 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1434 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1435 if (unlikely(rc != send))
1438 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1439 if (unlikely(rc == -ELINKCONG)) {
1440 tsk->cong_link_cnt = 1;
1444 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1447 } while (sent < dlen && !rc);
1449 return sent ? sent : rc;
1453 * tipc_send_packet - send a connection-oriented message
1454 * @sock: socket structure
1455 * @m: message to send
1456 * @dsz: length of data to be transmitted
1458 * Used for SOCK_SEQPACKET messages.
1460 * Returns the number of bytes sent on success, or errno otherwise
1462 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1464 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1467 return tipc_sendstream(sock, m, dsz);
1470 /* tipc_sk_finish_conn - complete the setup of a connection
1472 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1475 struct sock *sk = &tsk->sk;
1476 struct net *net = sock_net(sk);
1477 struct tipc_msg *msg = &tsk->phdr;
1479 msg_set_destnode(msg, peer_node);
1480 msg_set_destport(msg, peer_port);
1481 msg_set_type(msg, TIPC_CONN_MSG);
1482 msg_set_lookup_scope(msg, 0);
1483 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1485 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1486 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1487 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1488 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1489 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1490 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1493 /* Fall back to message based flow control */
1494 tsk->rcv_win = FLOWCTL_MSG_WIN;
1495 tsk->snd_win = FLOWCTL_MSG_WIN;
1499 * tipc_sk_set_orig_addr - capture sender's address for received message
1500 * @m: descriptor for message info
1501 * @hdr: received message header
1503 * Note: Address is not captured if not requested by receiver.
1505 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1507 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1508 struct tipc_msg *hdr = buf_msg(skb);
1513 srcaddr->sock.family = AF_TIPC;
1514 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1515 srcaddr->sock.scope = 0;
1516 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1517 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1518 srcaddr->sock.addr.name.domain = 0;
1519 m->msg_namelen = sizeof(struct sockaddr_tipc);
1521 if (!msg_in_group(hdr))
1524 /* Group message users may also want to know sending member's id */
1525 srcaddr->member.family = AF_TIPC;
1526 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1527 srcaddr->member.scope = 0;
1528 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1529 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1530 srcaddr->member.addr.name.domain = 0;
1531 m->msg_namelen = sizeof(*srcaddr);
1535 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1536 * @m: descriptor for message info
1537 * @msg: received message header
1538 * @tsk: TIPC port associated with message
1540 * Note: Ancillary data is not captured if not requested by receiver.
1542 * Returns 0 if successful, otherwise errno
1544 static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1545 struct tipc_sock *tsk)
1553 if (likely(m->msg_controllen == 0))
1556 /* Optionally capture errored message object(s) */
1557 err = msg ? msg_errcode(msg) : 0;
1558 if (unlikely(err)) {
1560 anc_data[1] = msg_data_sz(msg);
1561 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1565 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1572 /* Optionally capture message destination object */
1573 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1574 switch (dest_type) {
1575 case TIPC_NAMED_MSG:
1577 anc_data[0] = msg_nametype(msg);
1578 anc_data[1] = msg_namelower(msg);
1579 anc_data[2] = msg_namelower(msg);
1581 case TIPC_MCAST_MSG:
1583 anc_data[0] = msg_nametype(msg);
1584 anc_data[1] = msg_namelower(msg);
1585 anc_data[2] = msg_nameupper(msg);
1588 has_name = (tsk->conn_type != 0);
1589 anc_data[0] = tsk->conn_type;
1590 anc_data[1] = tsk->conn_instance;
1591 anc_data[2] = tsk->conn_instance;
1597 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1605 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1607 struct sock *sk = &tsk->sk;
1608 struct net *net = sock_net(sk);
1609 struct sk_buff *skb = NULL;
1610 struct tipc_msg *msg;
1611 u32 peer_port = tsk_peer_port(tsk);
1612 u32 dnode = tsk_peer_node(tsk);
1614 if (!tipc_sk_connected(sk))
1616 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1617 dnode, tsk_own_node(tsk), peer_port,
1618 tsk->portid, TIPC_OK);
1622 msg_set_conn_ack(msg, tsk->rcv_unacked);
1623 tsk->rcv_unacked = 0;
1625 /* Adjust to and advertize the correct window limit */
1626 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1627 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1628 msg_set_adv_win(msg, tsk->rcv_win);
1630 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1633 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1635 struct sock *sk = sock->sk;
1637 long timeo = *timeop;
1638 int err = sock_error(sk);
1644 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1645 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1646 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1651 timeo = schedule_timeout(timeo);
1655 if (!skb_queue_empty(&sk->sk_receive_queue))
1660 err = sock_intr_errno(timeo);
1661 if (signal_pending(current))
1664 err = sock_error(sk);
1668 finish_wait(sk_sleep(sk), &wait);
1674 * tipc_recvmsg - receive packet-oriented message
1675 * @m: descriptor for message info
1676 * @buflen: length of user buffer area
1677 * @flags: receive flags
1679 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1680 * If the complete message doesn't fit in user area, truncate it.
1682 * Returns size of returned message data, errno otherwise
1684 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1685 size_t buflen, int flags)
1687 struct sock *sk = sock->sk;
1688 bool connected = !tipc_sk_type_connectionless(sk);
1689 struct tipc_sock *tsk = tipc_sk(sk);
1690 int rc, err, hlen, dlen, copy;
1691 struct sk_buff_head xmitq;
1692 struct tipc_msg *hdr;
1693 struct sk_buff *skb;
1697 /* Catch invalid receive requests */
1698 if (unlikely(!buflen))
1702 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1706 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1708 /* Step rcv queue to first msg with data or error; wait if necessary */
1710 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1713 skb = skb_peek(&sk->sk_receive_queue);
1715 dlen = msg_data_sz(hdr);
1716 hlen = msg_hdr_sz(hdr);
1717 err = msg_errcode(hdr);
1718 grp_evt = msg_is_grp_evt(hdr);
1719 if (likely(dlen || err))
1721 tsk_advance_rx_queue(sk);
1724 /* Collect msg meta data, including error code and rejected data */
1725 tipc_sk_set_orig_addr(m, skb);
1726 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1730 /* Capture data if non-error msg, otherwise just set return value */
1732 copy = min_t(int, dlen, buflen);
1733 if (unlikely(copy != dlen))
1734 m->msg_flags |= MSG_TRUNC;
1735 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1739 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1745 /* Mark message as group event if applicable */
1746 if (unlikely(grp_evt)) {
1747 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1748 m->msg_flags |= MSG_EOR;
1749 m->msg_flags |= MSG_OOB;
1753 /* Caption of data or error code/rejected data was successful */
1754 if (unlikely(flags & MSG_PEEK))
1757 /* Send group flow control advertisement when applicable */
1758 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1759 skb_queue_head_init(&xmitq);
1760 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1761 msg_orignode(hdr), msg_origport(hdr),
1763 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1766 tsk_advance_rx_queue(sk);
1768 if (likely(!connected))
1771 /* Send connection flow control advertisement when applicable */
1772 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1773 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1774 tipc_sk_send_ack(tsk);
1777 return rc ? rc : copy;
1781 * tipc_recvstream - receive stream-oriented data
1782 * @m: descriptor for message info
1783 * @buflen: total size of user buffer area
1784 * @flags: receive flags
1786 * Used for SOCK_STREAM messages only. If not enough data is available
1787 * will optionally wait for more; never truncates data.
1789 * Returns size of returned message data, errno otherwise
1791 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1792 size_t buflen, int flags)
1794 struct sock *sk = sock->sk;
1795 struct tipc_sock *tsk = tipc_sk(sk);
1796 struct sk_buff *skb;
1797 struct tipc_msg *hdr;
1798 struct tipc_skb_cb *skb_cb;
1799 bool peek = flags & MSG_PEEK;
1800 int offset, required, copy, copied = 0;
1801 int hlen, dlen, err, rc;
1804 /* Catch invalid receive attempts */
1805 if (unlikely(!buflen))
1810 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1814 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1815 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1818 /* Look at first msg in receive queue; wait if necessary */
1819 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1822 skb = skb_peek(&sk->sk_receive_queue);
1823 skb_cb = TIPC_SKB_CB(skb);
1825 dlen = msg_data_sz(hdr);
1826 hlen = msg_hdr_sz(hdr);
1827 err = msg_errcode(hdr);
1829 /* Discard any empty non-errored (SYN-) message */
1830 if (unlikely(!dlen && !err)) {
1831 tsk_advance_rx_queue(sk);
1835 /* Collect msg meta data, incl. error code and rejected data */
1837 tipc_sk_set_orig_addr(m, skb);
1838 rc = tipc_sk_anc_data_recv(m, hdr, tsk);
1843 /* Copy data if msg ok, otherwise return error/partial data */
1845 offset = skb_cb->bytes_read;
1846 copy = min_t(int, dlen - offset, buflen - copied);
1847 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1852 if (unlikely(offset < dlen)) {
1854 skb_cb->bytes_read = offset;
1859 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1868 tsk_advance_rx_queue(sk);
1870 /* Send connection flow control advertisement when applicable */
1871 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1872 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1873 tipc_sk_send_ack(tsk);
1875 /* Exit if all requested data or FIN/error received */
1876 if (copied == buflen || err)
1879 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1882 return copied ? copied : rc;
1886 * tipc_write_space - wake up thread if port congestion is released
1889 static void tipc_write_space(struct sock *sk)
1891 struct socket_wq *wq;
1894 wq = rcu_dereference(sk->sk_wq);
1895 if (skwq_has_sleeper(wq))
1896 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1897 EPOLLWRNORM | EPOLLWRBAND);
1902 * tipc_data_ready - wake up threads to indicate messages have been received
1904 * @len: the length of messages
1906 static void tipc_data_ready(struct sock *sk)
1908 struct socket_wq *wq;
1911 wq = rcu_dereference(sk->sk_wq);
1912 if (skwq_has_sleeper(wq))
1913 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1914 EPOLLRDNORM | EPOLLRDBAND);
1918 static void tipc_sock_destruct(struct sock *sk)
1920 __skb_queue_purge(&sk->sk_receive_queue);
1923 static void tipc_sk_proto_rcv(struct sock *sk,
1924 struct sk_buff_head *inputq,
1925 struct sk_buff_head *xmitq)
1927 struct sk_buff *skb = __skb_dequeue(inputq);
1928 struct tipc_sock *tsk = tipc_sk(sk);
1929 struct tipc_msg *hdr = buf_msg(skb);
1930 struct tipc_group *grp = tsk->group;
1931 bool wakeup = false;
1933 switch (msg_user(hdr)) {
1935 tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
1938 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1939 tsk->cong_link_cnt--;
1942 case GROUP_PROTOCOL:
1943 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
1946 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
1947 hdr, inputq, xmitq);
1954 sk->sk_write_space(sk);
1960 * tipc_filter_connect - Handle incoming message for a connection-based socket
1962 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1964 * Returns true if everything ok, false otherwise
1966 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
1968 struct sock *sk = &tsk->sk;
1969 struct net *net = sock_net(sk);
1970 struct tipc_msg *hdr = buf_msg(skb);
1971 u32 pport = msg_origport(hdr);
1972 u32 pnode = msg_orignode(hdr);
1974 if (unlikely(msg_mcast(hdr)))
1977 switch (sk->sk_state) {
1978 case TIPC_CONNECTING:
1979 /* Accept only ACK or NACK message */
1980 if (unlikely(!msg_connected(hdr))) {
1981 if (pport != tsk_peer_port(tsk) ||
1982 pnode != tsk_peer_node(tsk))
1985 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1986 sk->sk_err = ECONNREFUSED;
1987 sk->sk_state_change(sk);
1991 if (unlikely(msg_errcode(hdr))) {
1992 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1993 sk->sk_err = ECONNREFUSED;
1994 sk->sk_state_change(sk);
1998 if (unlikely(!msg_isdata(hdr))) {
1999 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2000 sk->sk_err = EINVAL;
2001 sk->sk_state_change(sk);
2005 tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
2006 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2008 /* If 'ACK+' message, add to socket receive queue */
2009 if (msg_data_sz(hdr))
2012 /* If empty 'ACK-' message, wake up sleeping connect() */
2013 sk->sk_data_ready(sk);
2015 /* 'ACK-' message is neither accepted nor rejected: */
2016 msg_set_dest_droppable(hdr, 1);
2020 case TIPC_DISCONNECTING:
2023 /* Accept only SYN message */
2024 if (!msg_connected(hdr) && !(msg_errcode(hdr)))
2027 case TIPC_ESTABLISHED:
2028 /* Accept only connection-based messages sent by peer */
2029 if (unlikely(!tsk_peer_msg(tsk, hdr)))
2032 if (unlikely(msg_errcode(hdr))) {
2033 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2034 /* Let timer expire on it's own */
2035 tipc_node_remove_conn(net, tsk_peer_node(tsk),
2037 sk->sk_state_change(sk);
2041 pr_err("Unknown sk_state %u\n", sk->sk_state);
2048 * rcvbuf_limit - get proper overload limit of socket receive queue
2052 * For connection oriented messages, irrespective of importance,
2053 * default queue limit is 2 MB.
2055 * For connectionless messages, queue limits are based on message
2056 * importance as follows:
2058 * TIPC_LOW_IMPORTANCE (2 MB)
2059 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2060 * TIPC_HIGH_IMPORTANCE (8 MB)
2061 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2063 * Returns overload limit according to corresponding message importance
2065 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2067 struct tipc_sock *tsk = tipc_sk(sk);
2068 struct tipc_msg *hdr = buf_msg(skb);
2070 if (unlikely(msg_in_group(hdr)))
2071 return sk->sk_rcvbuf;
2073 if (unlikely(!msg_connected(hdr)))
2074 return sk->sk_rcvbuf << msg_importance(hdr);
2076 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2077 return sk->sk_rcvbuf;
2079 return FLOWCTL_MSG_LIM;
2083 * tipc_sk_filter_rcv - validate incoming message
2085 * @skb: pointer to message.
2087 * Enqueues message on receive queue if acceptable; optionally handles
2088 * disconnect indication for a connected socket.
2090 * Called with socket lock already taken
2093 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2094 struct sk_buff_head *xmitq)
2096 bool sk_conn = !tipc_sk_type_connectionless(sk);
2097 struct tipc_sock *tsk = tipc_sk(sk);
2098 struct tipc_group *grp = tsk->group;
2099 struct tipc_msg *hdr = buf_msg(skb);
2100 struct net *net = sock_net(sk);
2101 struct sk_buff_head inputq;
2102 int limit, err = TIPC_OK;
2104 TIPC_SKB_CB(skb)->bytes_read = 0;
2105 __skb_queue_head_init(&inputq);
2106 __skb_queue_tail(&inputq, skb);
2108 if (unlikely(!msg_isdata(hdr)))
2109 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2112 tipc_group_filter_msg(grp, &inputq, xmitq);
2114 /* Validate and add to receive buffer if there is space */
2115 while ((skb = __skb_dequeue(&inputq))) {
2117 limit = rcvbuf_limit(sk, skb);
2118 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2119 (!sk_conn && msg_connected(hdr)) ||
2120 (!grp && msg_in_group(hdr)))
2121 err = TIPC_ERR_NO_PORT;
2122 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2123 atomic_inc(&sk->sk_drops);
2124 err = TIPC_ERR_OVERLOAD;
2127 if (unlikely(err)) {
2128 tipc_skb_reject(net, err, skb, xmitq);
2132 __skb_queue_tail(&sk->sk_receive_queue, skb);
2133 skb_set_owner_r(skb, sk);
2134 sk->sk_data_ready(sk);
2139 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2143 * Caller must hold socket lock
2145 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2147 unsigned int before = sk_rmem_alloc_get(sk);
2148 struct sk_buff_head xmitq;
2151 __skb_queue_head_init(&xmitq);
2153 tipc_sk_filter_rcv(sk, skb, &xmitq);
2154 added = sk_rmem_alloc_get(sk) - before;
2155 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2157 /* Send pending response/rejected messages, if any */
2158 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2163 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2164 * inputq and try adding them to socket or backlog queue
2165 * @inputq: list of incoming buffers with potentially different destinations
2166 * @sk: socket where the buffers should be enqueued
2167 * @dport: port number for the socket
2169 * Caller must hold socket lock
2171 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2172 u32 dport, struct sk_buff_head *xmitq)
2174 unsigned long time_limit = jiffies + 2;
2175 struct sk_buff *skb;
2180 while (skb_queue_len(inputq)) {
2181 if (unlikely(time_after_eq(jiffies, time_limit)))
2184 skb = tipc_skb_dequeue(inputq, dport);
2188 /* Add message directly to receive queue if possible */
2189 if (!sock_owned_by_user(sk)) {
2190 tipc_sk_filter_rcv(sk, skb, xmitq);
2194 /* Try backlog, compensating for double-counted bytes */
2195 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2196 if (!sk->sk_backlog.len)
2197 atomic_set(dcnt, 0);
2198 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2199 if (likely(!sk_add_backlog(sk, skb, lim)))
2202 /* Overload => reject message back to sender */
2203 onode = tipc_own_addr(sock_net(sk));
2204 atomic_inc(&sk->sk_drops);
2205 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
2206 __skb_queue_tail(xmitq, skb);
2212 * tipc_sk_rcv - handle a chain of incoming buffers
2213 * @inputq: buffer list containing the buffers
2214 * Consumes all buffers in list until inputq is empty
2215 * Note: may be called in multiple threads referring to the same queue
2217 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2219 struct sk_buff_head xmitq;
2220 u32 dnode, dport = 0;
2222 struct tipc_sock *tsk;
2224 struct sk_buff *skb;
2226 __skb_queue_head_init(&xmitq);
2227 while (skb_queue_len(inputq)) {
2228 dport = tipc_skb_peek_port(inputq, dport);
2229 tsk = tipc_sk_lookup(net, dport);
2233 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2234 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2235 spin_unlock_bh(&sk->sk_lock.slock);
2237 /* Send pending response/rejected messages, if any */
2238 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2242 /* No destination socket => dequeue skb if still there */
2243 skb = tipc_skb_dequeue(inputq, dport);
2247 /* Try secondary lookup if unresolved named message */
2248 err = TIPC_ERR_NO_PORT;
2249 if (tipc_msg_lookup_dest(net, skb, &err))
2252 /* Prepare for message rejection */
2253 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2256 dnode = msg_destnode(buf_msg(skb));
2257 tipc_node_xmit_skb(net, skb, dnode, dport);
2261 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2263 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2264 struct sock *sk = sock->sk;
2268 int err = sock_error(sk);
2273 if (signal_pending(current))
2274 return sock_intr_errno(*timeo_p);
2276 add_wait_queue(sk_sleep(sk), &wait);
2277 done = sk_wait_event(sk, timeo_p,
2278 sk->sk_state != TIPC_CONNECTING, &wait);
2279 remove_wait_queue(sk_sleep(sk), &wait);
2285 * tipc_connect - establish a connection to another TIPC port
2286 * @sock: socket structure
2287 * @dest: socket address for destination port
2288 * @destlen: size of socket address data structure
2289 * @flags: file-related flags associated with socket
2291 * Returns 0 on success, errno otherwise
2293 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2294 int destlen, int flags)
2296 struct sock *sk = sock->sk;
2297 struct tipc_sock *tsk = tipc_sk(sk);
2298 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2299 struct msghdr m = {NULL,};
2300 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2304 if (destlen != sizeof(struct sockaddr_tipc))
2314 if (dst->family == AF_UNSPEC) {
2315 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2316 if (!tipc_sk_type_connectionless(sk))
2319 } else if (dst->family != AF_TIPC) {
2322 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
2327 /* DGRAM/RDM connect(), just save the destaddr */
2328 if (tipc_sk_type_connectionless(sk)) {
2329 memcpy(&tsk->peer, dest, destlen);
2333 previous = sk->sk_state;
2335 switch (sk->sk_state) {
2337 /* Send a 'SYN-' to destination */
2339 m.msg_namelen = destlen;
2341 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2342 * indicate send_msg() is never blocked.
2345 m.msg_flags = MSG_DONTWAIT;
2347 res = __tipc_sendmsg(sock, &m, 0);
2348 if ((res < 0) && (res != -EWOULDBLOCK))
2351 /* Just entered TIPC_CONNECTING state; the only
2352 * difference is that return value in non-blocking
2353 * case is EINPROGRESS, rather than EALREADY.
2357 case TIPC_CONNECTING:
2359 if (previous == TIPC_CONNECTING)
2363 timeout = msecs_to_jiffies(timeout);
2364 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2365 res = tipc_wait_for_connect(sock, &timeout);
2367 case TIPC_ESTABLISHED:
2380 * tipc_listen - allow socket to listen for incoming connections
2381 * @sock: socket structure
2384 * Returns 0 on success, errno otherwise
2386 static int tipc_listen(struct socket *sock, int len)
2388 struct sock *sk = sock->sk;
2392 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2398 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2400 struct sock *sk = sock->sk;
2404 /* True wake-one mechanism for incoming connections: only
2405 * one process gets woken up, not the 'whole herd'.
2406 * Since we do not 'race & poll' for established sockets
2407 * anymore, the common case will execute the loop only once.
2410 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2411 TASK_INTERRUPTIBLE);
2412 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2414 timeo = schedule_timeout(timeo);
2418 if (!skb_queue_empty(&sk->sk_receive_queue))
2423 err = sock_intr_errno(timeo);
2424 if (signal_pending(current))
2427 finish_wait(sk_sleep(sk), &wait);
2432 * tipc_accept - wait for connection request
2433 * @sock: listening socket
2434 * @newsock: new socket that is to be connected
2435 * @flags: file-related flags associated with socket
2437 * Returns 0 on success, errno otherwise
2439 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2442 struct sock *new_sk, *sk = sock->sk;
2443 struct sk_buff *buf;
2444 struct tipc_sock *new_tsock;
2445 struct tipc_msg *msg;
2451 if (sk->sk_state != TIPC_LISTEN) {
2455 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2456 res = tipc_wait_for_accept(sock, timeo);
2460 buf = skb_peek(&sk->sk_receive_queue);
2462 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2465 security_sk_clone(sock->sk, new_sock->sk);
2467 new_sk = new_sock->sk;
2468 new_tsock = tipc_sk(new_sk);
2471 /* we lock on new_sk; but lockdep sees the lock on sk */
2472 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2475 * Reject any stray messages received by new socket
2476 * before the socket lock was taken (very, very unlikely)
2478 tsk_rej_rx_queue(new_sk);
2480 /* Connect new socket to it's peer */
2481 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2483 tsk_set_importance(new_tsock, msg_importance(msg));
2484 if (msg_named(msg)) {
2485 new_tsock->conn_type = msg_nametype(msg);
2486 new_tsock->conn_instance = msg_nameinst(msg);
2490 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2491 * Respond to 'SYN+' by queuing it on new socket.
2493 if (!msg_data_sz(msg)) {
2494 struct msghdr m = {NULL,};
2496 tsk_advance_rx_queue(sk);
2497 __tipc_sendstream(new_sock, &m, 0);
2499 __skb_dequeue(&sk->sk_receive_queue);
2500 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2501 skb_set_owner_r(buf, new_sk);
2503 release_sock(new_sk);
2510 * tipc_shutdown - shutdown socket connection
2511 * @sock: socket structure
2512 * @how: direction to close (must be SHUT_RDWR)
2514 * Terminates connection (if necessary), then purges socket's receive queue.
2516 * Returns 0 on success, errno otherwise
2518 static int tipc_shutdown(struct socket *sock, int how)
2520 struct sock *sk = sock->sk;
2523 if (how != SHUT_RDWR)
2528 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2529 sk->sk_shutdown = SEND_SHUTDOWN;
2531 if (sk->sk_state == TIPC_DISCONNECTING) {
2532 /* Discard any unreceived messages */
2533 __skb_queue_purge(&sk->sk_receive_queue);
2535 /* Wake up anyone sleeping in poll */
2536 sk->sk_state_change(sk);
2546 static void tipc_sk_timeout(struct timer_list *t)
2548 struct sock *sk = from_timer(sk, t, sk_timer);
2549 struct tipc_sock *tsk = tipc_sk(sk);
2550 u32 peer_port = tsk_peer_port(tsk);
2551 u32 peer_node = tsk_peer_node(tsk);
2552 u32 own_node = tsk_own_node(tsk);
2553 u32 own_port = tsk->portid;
2554 struct net *net = sock_net(sk);
2555 struct sk_buff *skb = NULL;
2558 if (!tipc_sk_connected(sk))
2561 /* Try again later if socket is busy */
2562 if (sock_owned_by_user(sk)) {
2563 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2567 if (tsk->probe_unacked) {
2568 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2569 tipc_node_remove_conn(net, peer_node, peer_port);
2570 sk->sk_state_change(sk);
2573 /* Send new probe */
2574 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2575 peer_node, own_node, peer_port, own_port,
2577 tsk->probe_unacked = true;
2578 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2582 tipc_node_xmit_skb(net, skb, peer_node, own_port);
2586 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2587 struct tipc_name_seq const *seq)
2589 struct sock *sk = &tsk->sk;
2590 struct net *net = sock_net(sk);
2591 struct publication *publ;
2594 if (scope != TIPC_NODE_SCOPE)
2595 scope = TIPC_CLUSTER_SCOPE;
2597 if (tipc_sk_connected(sk))
2599 key = tsk->portid + tsk->pub_count + 1;
2600 if (key == tsk->portid)
2603 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2604 scope, tsk->portid, key);
2605 if (unlikely(!publ))
2608 list_add(&publ->binding_sock, &tsk->publications);
2614 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2615 struct tipc_name_seq const *seq)
2617 struct net *net = sock_net(&tsk->sk);
2618 struct publication *publ;
2619 struct publication *safe;
2622 if (scope != TIPC_NODE_SCOPE)
2623 scope = TIPC_CLUSTER_SCOPE;
2625 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2627 if (publ->scope != scope)
2629 if (publ->type != seq->type)
2631 if (publ->lower != seq->lower)
2633 if (publ->upper != seq->upper)
2635 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2636 publ->upper, publ->key);
2640 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2641 publ->upper, publ->key);
2644 if (list_empty(&tsk->publications))
2649 /* tipc_sk_reinit: set non-zero address in all existing sockets
2650 * when we go from standalone to network mode.
2652 void tipc_sk_reinit(struct net *net)
2654 struct tipc_net *tn = net_generic(net, tipc_net_id);
2655 struct rhashtable_iter iter;
2656 struct tipc_sock *tsk;
2657 struct tipc_msg *msg;
2659 rhashtable_walk_enter(&tn->sk_rht, &iter);
2662 rhashtable_walk_start(&iter);
2664 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2665 spin_lock_bh(&tsk->sk.sk_lock.slock);
2667 msg_set_prevnode(msg, tipc_own_addr(net));
2668 msg_set_orignode(msg, tipc_own_addr(net));
2669 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2672 rhashtable_walk_stop(&iter);
2673 } while (tsk == ERR_PTR(-EAGAIN));
2676 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2678 struct tipc_net *tn = net_generic(net, tipc_net_id);
2679 struct tipc_sock *tsk;
2682 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2684 sock_hold(&tsk->sk);
2690 static int tipc_sk_insert(struct tipc_sock *tsk)
2692 struct sock *sk = &tsk->sk;
2693 struct net *net = sock_net(sk);
2694 struct tipc_net *tn = net_generic(net, tipc_net_id);
2695 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2696 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2698 while (remaining--) {
2700 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2701 portid = TIPC_MIN_PORT;
2702 tsk->portid = portid;
2703 sock_hold(&tsk->sk);
2704 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2713 static void tipc_sk_remove(struct tipc_sock *tsk)
2715 struct sock *sk = &tsk->sk;
2716 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2718 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2719 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2724 static const struct rhashtable_params tsk_rht_params = {
2726 .head_offset = offsetof(struct tipc_sock, node),
2727 .key_offset = offsetof(struct tipc_sock, portid),
2728 .key_len = sizeof(u32), /* portid */
2729 .max_size = 1048576,
2731 .automatic_shrinking = true,
2734 int tipc_sk_rht_init(struct net *net)
2736 struct tipc_net *tn = net_generic(net, tipc_net_id);
2738 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2741 void tipc_sk_rht_destroy(struct net *net)
2743 struct tipc_net *tn = net_generic(net, tipc_net_id);
2745 /* Wait for socket readers to complete */
2748 rhashtable_destroy(&tn->sk_rht);
2751 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2753 struct net *net = sock_net(&tsk->sk);
2754 struct tipc_group *grp = tsk->group;
2755 struct tipc_msg *hdr = &tsk->phdr;
2756 struct tipc_name_seq seq;
2759 if (mreq->type < TIPC_RESERVED_TYPES)
2761 if (mreq->scope > TIPC_NODE_SCOPE)
2765 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2769 msg_set_lookup_scope(hdr, mreq->scope);
2770 msg_set_nametype(hdr, mreq->type);
2771 msg_set_dest_droppable(hdr, true);
2772 seq.type = mreq->type;
2773 seq.lower = mreq->instance;
2774 seq.upper = seq.lower;
2775 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2776 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2778 tipc_group_delete(net, grp);
2782 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2783 tsk->mc_method.rcast = true;
2784 tsk->mc_method.mandatory = true;
2785 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2789 static int tipc_sk_leave(struct tipc_sock *tsk)
2791 struct net *net = sock_net(&tsk->sk);
2792 struct tipc_group *grp = tsk->group;
2793 struct tipc_name_seq seq;
2798 tipc_group_self(grp, &seq, &scope);
2799 tipc_group_delete(net, grp);
2801 tipc_sk_withdraw(tsk, scope, &seq);
2806 * tipc_setsockopt - set socket option
2807 * @sock: socket structure
2808 * @lvl: option level
2809 * @opt: option identifier
2810 * @ov: pointer to new option value
2811 * @ol: length of option value
2813 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2814 * (to ease compatibility).
2816 * Returns 0 on success, errno otherwise
2818 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2819 char __user *ov, unsigned int ol)
2821 struct sock *sk = sock->sk;
2822 struct tipc_sock *tsk = tipc_sk(sk);
2823 struct tipc_group_req mreq;
2827 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2829 if (lvl != SOL_TIPC)
2830 return -ENOPROTOOPT;
2833 case TIPC_IMPORTANCE:
2834 case TIPC_SRC_DROPPABLE:
2835 case TIPC_DEST_DROPPABLE:
2836 case TIPC_CONN_TIMEOUT:
2837 if (ol < sizeof(value))
2839 if (get_user(value, (u32 __user *)ov))
2842 case TIPC_GROUP_JOIN:
2843 if (ol < sizeof(mreq))
2845 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2856 case TIPC_IMPORTANCE:
2857 res = tsk_set_importance(tsk, value);
2859 case TIPC_SRC_DROPPABLE:
2860 if (sock->type != SOCK_STREAM)
2861 tsk_set_unreliable(tsk, value);
2865 case TIPC_DEST_DROPPABLE:
2866 tsk_set_unreturnable(tsk, value);
2868 case TIPC_CONN_TIMEOUT:
2869 tipc_sk(sk)->conn_timeout = value;
2871 case TIPC_MCAST_BROADCAST:
2872 tsk->mc_method.rcast = false;
2873 tsk->mc_method.mandatory = true;
2875 case TIPC_MCAST_REPLICAST:
2876 tsk->mc_method.rcast = true;
2877 tsk->mc_method.mandatory = true;
2879 case TIPC_GROUP_JOIN:
2880 res = tipc_sk_join(tsk, &mreq);
2882 case TIPC_GROUP_LEAVE:
2883 res = tipc_sk_leave(tsk);
2895 * tipc_getsockopt - get socket option
2896 * @sock: socket structure
2897 * @lvl: option level
2898 * @opt: option identifier
2899 * @ov: receptacle for option value
2900 * @ol: receptacle for length of option value
2902 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
2903 * (to ease compatibility).
2905 * Returns 0 on success, errno otherwise
2907 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2908 char __user *ov, int __user *ol)
2910 struct sock *sk = sock->sk;
2911 struct tipc_sock *tsk = tipc_sk(sk);
2912 struct tipc_name_seq seq;
2917 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2918 return put_user(0, ol);
2919 if (lvl != SOL_TIPC)
2920 return -ENOPROTOOPT;
2921 res = get_user(len, ol);
2928 case TIPC_IMPORTANCE:
2929 value = tsk_importance(tsk);
2931 case TIPC_SRC_DROPPABLE:
2932 value = tsk_unreliable(tsk);
2934 case TIPC_DEST_DROPPABLE:
2935 value = tsk_unreturnable(tsk);
2937 case TIPC_CONN_TIMEOUT:
2938 value = tsk->conn_timeout;
2939 /* no need to set "res", since already 0 at this point */
2941 case TIPC_NODE_RECVQ_DEPTH:
2942 value = 0; /* was tipc_queue_size, now obsolete */
2944 case TIPC_SOCK_RECVQ_DEPTH:
2945 value = skb_queue_len(&sk->sk_receive_queue);
2947 case TIPC_GROUP_JOIN:
2950 tipc_group_self(tsk->group, &seq, &scope);
2960 return res; /* "get" failed */
2962 if (len < sizeof(value))
2965 if (copy_to_user(ov, &value, sizeof(value)))
2968 return put_user(sizeof(value), ol);
2971 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2973 struct net *net = sock_net(sock->sk);
2974 struct tipc_sioc_nodeid_req nr = {0};
2975 struct tipc_sioc_ln_req lnr;
2976 void __user *argp = (void __user *)arg;
2979 case SIOCGETLINKNAME:
2980 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2982 if (!tipc_node_get_linkname(net,
2983 lnr.bearer_id & 0xffff, lnr.peer,
2984 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2985 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2989 return -EADDRNOTAVAIL;
2991 if (copy_from_user(&nr, argp, sizeof(nr)))
2993 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
2994 return -EADDRNOTAVAIL;
2995 if (copy_to_user(argp, &nr, sizeof(nr)))
2999 return -ENOIOCTLCMD;
3003 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3005 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3006 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3007 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3009 tsk1->peer.family = AF_TIPC;
3010 tsk1->peer.addrtype = TIPC_ADDR_ID;
3011 tsk1->peer.scope = TIPC_NODE_SCOPE;
3012 tsk1->peer.addr.id.ref = tsk2->portid;
3013 tsk1->peer.addr.id.node = onode;
3014 tsk2->peer.family = AF_TIPC;
3015 tsk2->peer.addrtype = TIPC_ADDR_ID;
3016 tsk2->peer.scope = TIPC_NODE_SCOPE;
3017 tsk2->peer.addr.id.ref = tsk1->portid;
3018 tsk2->peer.addr.id.node = onode;
3020 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3021 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3025 /* Protocol switches for the various types of TIPC sockets */
3027 static const struct proto_ops msg_ops = {
3028 .owner = THIS_MODULE,
3030 .release = tipc_release,
3032 .connect = tipc_connect,
3033 .socketpair = tipc_socketpair,
3034 .accept = sock_no_accept,
3035 .getname = tipc_getname,
3036 .poll_mask = tipc_poll_mask,
3037 .ioctl = tipc_ioctl,
3038 .listen = sock_no_listen,
3039 .shutdown = tipc_shutdown,
3040 .setsockopt = tipc_setsockopt,
3041 .getsockopt = tipc_getsockopt,
3042 .sendmsg = tipc_sendmsg,
3043 .recvmsg = tipc_recvmsg,
3044 .mmap = sock_no_mmap,
3045 .sendpage = sock_no_sendpage
3048 static const struct proto_ops packet_ops = {
3049 .owner = THIS_MODULE,
3051 .release = tipc_release,
3053 .connect = tipc_connect,
3054 .socketpair = tipc_socketpair,
3055 .accept = tipc_accept,
3056 .getname = tipc_getname,
3057 .poll_mask = tipc_poll_mask,
3058 .ioctl = tipc_ioctl,
3059 .listen = tipc_listen,
3060 .shutdown = tipc_shutdown,
3061 .setsockopt = tipc_setsockopt,
3062 .getsockopt = tipc_getsockopt,
3063 .sendmsg = tipc_send_packet,
3064 .recvmsg = tipc_recvmsg,
3065 .mmap = sock_no_mmap,
3066 .sendpage = sock_no_sendpage
3069 static const struct proto_ops stream_ops = {
3070 .owner = THIS_MODULE,
3072 .release = tipc_release,
3074 .connect = tipc_connect,
3075 .socketpair = tipc_socketpair,
3076 .accept = tipc_accept,
3077 .getname = tipc_getname,
3078 .poll_mask = tipc_poll_mask,
3079 .ioctl = tipc_ioctl,
3080 .listen = tipc_listen,
3081 .shutdown = tipc_shutdown,
3082 .setsockopt = tipc_setsockopt,
3083 .getsockopt = tipc_getsockopt,
3084 .sendmsg = tipc_sendstream,
3085 .recvmsg = tipc_recvstream,
3086 .mmap = sock_no_mmap,
3087 .sendpage = sock_no_sendpage
3090 static const struct net_proto_family tipc_family_ops = {
3091 .owner = THIS_MODULE,
3093 .create = tipc_sk_create
3096 static struct proto tipc_proto = {
3098 .owner = THIS_MODULE,
3099 .obj_size = sizeof(struct tipc_sock),
3100 .sysctl_rmem = sysctl_tipc_rmem
3104 * tipc_socket_init - initialize TIPC socket interface
3106 * Returns 0 on success, errno otherwise
3108 int tipc_socket_init(void)
3112 res = proto_register(&tipc_proto, 1);
3114 pr_err("Failed to register TIPC protocol type\n");
3118 res = sock_register(&tipc_family_ops);
3120 pr_err("Failed to register TIPC socket type\n");
3121 proto_unregister(&tipc_proto);
3129 * tipc_socket_stop - stop TIPC socket interface
3131 void tipc_socket_stop(void)
3133 sock_unregister(tipc_family_ops.family);
3134 proto_unregister(&tipc_proto);
3137 /* Caller should hold socket lock for the passed tipc socket. */
3138 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3142 struct nlattr *nest;
3144 peer_node = tsk_peer_node(tsk);
3145 peer_port = tsk_peer_port(tsk);
3147 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3149 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3151 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3154 if (tsk->conn_type != 0) {
3155 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3157 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3159 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3162 nla_nest_end(skb, nest);
3167 nla_nest_cancel(skb, nest);
3172 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3175 struct net *net = sock_net(skb->sk);
3176 struct sock *sk = &tsk->sk;
3178 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3179 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3182 if (tipc_sk_connected(sk)) {
3183 if (__tipc_nl_add_sk_con(skb, tsk))
3185 } else if (!list_empty(&tsk->publications)) {
3186 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3192 /* Caller should hold socket lock for the passed tipc socket. */
3193 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3194 struct tipc_sock *tsk)
3196 struct nlattr *attrs;
3199 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3200 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3204 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3206 goto genlmsg_cancel;
3208 if (__tipc_nl_add_sk_info(skb, tsk))
3209 goto attr_msg_cancel;
3211 nla_nest_end(skb, attrs);
3212 genlmsg_end(skb, hdr);
3217 nla_nest_cancel(skb, attrs);
3219 genlmsg_cancel(skb, hdr);
3224 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3225 int (*skb_handler)(struct sk_buff *skb,
3226 struct netlink_callback *cb,
3227 struct tipc_sock *tsk))
3229 struct net *net = sock_net(skb->sk);
3230 struct tipc_net *tn = tipc_net(net);
3231 const struct bucket_table *tbl;
3232 u32 prev_portid = cb->args[1];
3233 u32 tbl_id = cb->args[0];
3234 struct rhash_head *pos;
3235 struct tipc_sock *tsk;
3239 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
3240 for (; tbl_id < tbl->size; tbl_id++) {
3241 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
3242 spin_lock_bh(&tsk->sk.sk_lock.slock);
3243 if (prev_portid && prev_portid != tsk->portid) {
3244 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3248 err = skb_handler(skb, cb, tsk);
3250 prev_portid = tsk->portid;
3251 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3256 spin_unlock_bh(&tsk->sk.sk_lock.slock);
3261 cb->args[0] = tbl_id;
3262 cb->args[1] = prev_portid;
3266 EXPORT_SYMBOL(tipc_nl_sk_walk);
3268 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3269 struct tipc_sock *tsk, u32 sk_filter_state,
3270 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3272 struct sock *sk = &tsk->sk;
3273 struct nlattr *attrs;
3274 struct nlattr *stat;
3276 /*filter response w.r.t sk_state*/
3277 if (!(sk_filter_state & (1 << sk->sk_state)))
3280 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
3284 if (__tipc_nl_add_sk_info(skb, tsk))
3285 goto attr_msg_cancel;
3287 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3288 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3289 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3290 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3291 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3293 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3294 tipc_diag_gen_cookie(sk),
3296 goto attr_msg_cancel;
3298 stat = nla_nest_start(skb, TIPC_NLA_SOCK_STAT);
3300 goto attr_msg_cancel;
3302 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3303 skb_queue_len(&sk->sk_receive_queue)) ||
3304 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3305 skb_queue_len(&sk->sk_write_queue)) ||
3306 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3307 atomic_read(&sk->sk_drops)))
3308 goto stat_msg_cancel;
3310 if (tsk->cong_link_cnt &&
3311 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3312 goto stat_msg_cancel;
3314 if (tsk_conn_cong(tsk) &&
3315 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3316 goto stat_msg_cancel;
3318 nla_nest_end(skb, stat);
3319 nla_nest_end(skb, attrs);
3324 nla_nest_cancel(skb, stat);
3326 nla_nest_cancel(skb, attrs);
3330 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3332 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3334 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3337 /* Caller should hold socket lock for the passed tipc socket. */
3338 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3339 struct netlink_callback *cb,
3340 struct publication *publ)
3343 struct nlattr *attrs;
3345 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3346 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3350 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
3352 goto genlmsg_cancel;
3354 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3355 goto attr_msg_cancel;
3356 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3357 goto attr_msg_cancel;
3358 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3359 goto attr_msg_cancel;
3360 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3361 goto attr_msg_cancel;
3363 nla_nest_end(skb, attrs);
3364 genlmsg_end(skb, hdr);
3369 nla_nest_cancel(skb, attrs);
3371 genlmsg_cancel(skb, hdr);
3376 /* Caller should hold socket lock for the passed tipc socket. */
3377 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3378 struct netlink_callback *cb,
3379 struct tipc_sock *tsk, u32 *last_publ)
3382 struct publication *p;
3385 list_for_each_entry(p, &tsk->publications, binding_sock) {
3386 if (p->key == *last_publ)
3389 if (p->key != *last_publ) {
3390 /* We never set seq or call nl_dump_check_consistent()
3391 * this means that setting prev_seq here will cause the
3392 * consistence check to fail in the netlink callback
3393 * handler. Resulting in the last NLMSG_DONE message
3394 * having the NLM_F_DUMP_INTR flag set.
3401 p = list_first_entry(&tsk->publications, struct publication,
3405 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3406 err = __tipc_nl_add_sk_publ(skb, cb, p);
3408 *last_publ = p->key;
3417 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3420 u32 tsk_portid = cb->args[0];
3421 u32 last_publ = cb->args[1];
3422 u32 done = cb->args[2];
3423 struct net *net = sock_net(skb->sk);
3424 struct tipc_sock *tsk;
3427 struct nlattr **attrs;
3428 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3430 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3434 if (!attrs[TIPC_NLA_SOCK])
3437 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
3438 attrs[TIPC_NLA_SOCK],
3439 tipc_nl_sock_policy, NULL);
3443 if (!sock[TIPC_NLA_SOCK_REF])
3446 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3452 tsk = tipc_sk_lookup(net, tsk_portid);
3456 lock_sock(&tsk->sk);
3457 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3460 release_sock(&tsk->sk);
3463 cb->args[0] = tsk_portid;
3464 cb->args[1] = last_publ;