2 * net/tipc/server.c: TIPC server infrastructure
4 * Copyright (c) 2012-2013, Wind River Systems
5 * Copyright (c) 2017, Ericsson AB
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
44 #include <linux/module.h>
46 /* Number of messages to send before rescheduling */
47 #define MAX_SEND_MSG_COUNT 25
48 #define MAX_RECV_MSG_COUNT 25
49 #define CF_CONNECTED 1
52 #define TIPC_SERVER_NAME_LEN 32
55 * struct tipc_server - TIPC server structure
56 * @conn_idr: identifier set of connection
57 * @idr_lock: protect the connection identifier set
58 * @idr_in_use: amount of allocated identifier entry
59 * @net: network namspace instance
60 * @rcvbuf_cache: memory cache of server receive buffer
61 * @rcv_wq: receive workqueue
62 * @send_wq: send workqueue
63 * @max_rcvbuf_size: maximum permitted receive message length
64 * @tipc_conn_new: callback will be called when new connection is incoming
65 * @tipc_conn_release: callback will be called before releasing the connection
66 * @tipc_conn_recvmsg: callback will be called when message arrives
68 * @imp: message importance
73 spinlock_t idr_lock; /* for idr list */
76 struct work_struct awork;
77 struct workqueue_struct *rcv_wq;
78 struct workqueue_struct *send_wq;
80 struct socket *listener;
81 char name[TIPC_SERVER_NAME_LEN];
85 * struct tipc_conn - TIPC connection structure
86 * @kref: reference counter to connection object
87 * @conid: connection identifier
88 * @sock: socket handler associated with connection
89 * @flags: indicates connection state
90 * @server: pointer to connected server
91 * @sub_list: lsit to all pertaing subscriptions
92 * @sub_lock: lock protecting the subscription list
93 * @outqueue_lock: control access to the outqueue
94 * @rwork: receive work item
95 * @rx_action: what to do when connection socket is active
96 * @outqueue: pointer to first outbound message in queue
97 * @outqueue_lock: control access to the outqueue
98 * @swork: send work item
105 struct tipc_server *server;
106 struct list_head sub_list;
107 spinlock_t sub_lock; /* for subscription list */
108 struct work_struct rwork;
109 struct list_head outqueue;
110 spinlock_t outqueue_lock;
111 struct work_struct swork;
114 /* An entry waiting to be sent */
115 struct outqueue_entry {
117 struct tipc_event evt;
118 struct list_head list;
121 static void tipc_recv_work(struct work_struct *work);
122 static void tipc_send_work(struct work_struct *work);
124 static bool connected(struct tipc_conn *con)
126 return con && test_bit(CF_CONNECTED, &con->flags);
129 static void tipc_conn_kref_release(struct kref *kref)
131 struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
132 struct tipc_server *s = con->server;
133 struct outqueue_entry *e, *safe;
135 spin_lock_bh(&s->idr_lock);
136 idr_remove(&s->conn_idr, con->conid);
138 spin_unlock_bh(&s->idr_lock);
140 sock_release(con->sock);
142 spin_lock_bh(&con->outqueue_lock);
143 list_for_each_entry_safe(e, safe, &con->outqueue, list) {
147 spin_unlock_bh(&con->outqueue_lock);
151 static void conn_put(struct tipc_conn *con)
153 kref_put(&con->kref, tipc_conn_kref_release);
156 static void conn_get(struct tipc_conn *con)
158 kref_get(&con->kref);
161 static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
163 struct tipc_conn *con;
165 spin_lock_bh(&s->idr_lock);
166 con = idr_find(&s->conn_idr, conid);
167 if (!connected(con) || !kref_get_unless_zero(&con->kref))
169 spin_unlock_bh(&s->idr_lock);
173 /* sock_data_ready - interrupt callback indicating the socket has data to read
174 * The queued work is launched into tipc_recv_work()->tipc_recv_from_sock()
176 static void sock_data_ready(struct sock *sk)
178 struct tipc_conn *con;
180 read_lock_bh(&sk->sk_callback_lock);
181 con = sk->sk_user_data;
182 if (connected(con)) {
184 if (!queue_work(con->server->rcv_wq, &con->rwork))
187 read_unlock_bh(&sk->sk_callback_lock);
190 /* sock_write_space - interrupt callback after a sendmsg EAGAIN
191 * Indicates that there now is more space in the send buffer
192 * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
194 static void sock_write_space(struct sock *sk)
196 struct tipc_conn *con;
198 read_lock_bh(&sk->sk_callback_lock);
199 con = sk->sk_user_data;
200 if (connected(con)) {
202 if (!queue_work(con->server->send_wq, &con->swork))
205 read_unlock_bh(&sk->sk_callback_lock);
208 /* tipc_con_delete_sub - delete a specific or all subscriptions
209 * for a given subscriber
211 static void tipc_con_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
213 struct list_head *sub_list = &con->sub_list;
214 struct tipc_net *tn = tipc_net(con->server->net);
215 struct tipc_subscription *sub, *tmp;
217 spin_lock_bh(&con->sub_lock);
218 list_for_each_entry_safe(sub, tmp, sub_list, sub_list) {
219 if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
220 tipc_sub_unsubscribe(sub);
221 atomic_dec(&tn->subscription_count);
226 spin_unlock_bh(&con->sub_lock);
229 static void tipc_close_conn(struct tipc_conn *con)
231 struct sock *sk = con->sock->sk;
232 bool disconnect = false;
234 write_lock_bh(&sk->sk_callback_lock);
235 disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
238 sk->sk_user_data = NULL;
239 tipc_con_delete_sub(con, NULL);
241 write_unlock_bh(&sk->sk_callback_lock);
243 /* Handle concurrent calls from sending and receiving threads */
247 /* Don't flush pending works, -just let them expire */
248 kernel_sock_shutdown(con->sock, SHUT_RDWR);
253 static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
255 struct tipc_conn *con;
258 con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
260 return ERR_PTR(-ENOMEM);
262 kref_init(&con->kref);
263 INIT_LIST_HEAD(&con->outqueue);
264 INIT_LIST_HEAD(&con->sub_list);
265 spin_lock_init(&con->outqueue_lock);
266 spin_lock_init(&con->sub_lock);
267 INIT_WORK(&con->swork, tipc_send_work);
268 INIT_WORK(&con->rwork, tipc_recv_work);
270 spin_lock_bh(&s->idr_lock);
271 ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
274 spin_unlock_bh(&s->idr_lock);
275 return ERR_PTR(-ENOMEM);
279 spin_unlock_bh(&s->idr_lock);
281 set_bit(CF_CONNECTED, &con->flags);
287 static int tipc_con_rcv_sub(struct tipc_server *srv,
288 struct tipc_conn *con,
289 struct tipc_subscr *s)
291 struct tipc_net *tn = tipc_net(srv->net);
292 struct tipc_subscription *sub;
294 if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
295 tipc_con_delete_sub(con, s);
298 if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCR) {
299 pr_warn("Subscription rejected, max (%u)\n", TIPC_MAX_SUBSCR);
302 sub = tipc_sub_subscribe(srv->net, s, con->conid);
305 atomic_inc(&tn->subscription_count);
306 spin_lock_bh(&con->sub_lock);
307 list_add(&sub->sub_list, &con->sub_list);
308 spin_unlock_bh(&con->sub_lock);
312 static int tipc_receive_from_sock(struct tipc_conn *con)
314 struct tipc_server *srv = con->server;
315 struct sock *sk = con->sock->sk;
316 struct msghdr msg = {};
317 struct tipc_subscr s;
322 iov.iov_len = sizeof(s);
324 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
325 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
326 if (ret == -EWOULDBLOCK)
329 read_lock_bh(&sk->sk_callback_lock);
330 ret = tipc_con_rcv_sub(srv, con, &s);
331 read_unlock_bh(&sk->sk_callback_lock);
334 tipc_close_conn(con);
339 /* tipc_conn_queue_evt() - interrupt level call from a subscription instance
340 * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
342 void tipc_conn_queue_evt(struct net *net, int conid,
343 u32 event, struct tipc_event *evt)
345 struct tipc_server *srv = tipc_topsrv(net);
346 struct outqueue_entry *e;
347 struct tipc_conn *con;
349 con = tipc_conn_lookup(srv, conid);
356 e = kmalloc(sizeof(*e), GFP_ATOMIC);
359 e->inactive = (event == TIPC_SUBSCR_TIMEOUT);
360 memcpy(&e->evt, evt, sizeof(*evt));
361 spin_lock_bh(&con->outqueue_lock);
362 list_add_tail(&e->list, &con->outqueue);
363 spin_unlock_bh(&con->outqueue_lock);
365 if (queue_work(srv->send_wq, &con->swork))
371 bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
372 u32 upper, u32 filter, int *conid)
374 struct tipc_subscr sub;
375 struct tipc_conn *con;
379 sub.seq.lower = lower;
380 sub.seq.upper = upper;
381 sub.timeout = TIPC_WAIT_FOREVER;
383 *(u32 *)&sub.usr_handle = port;
385 con = tipc_alloc_conn(tipc_topsrv(net));
391 rc = tipc_con_rcv_sub(tipc_topsrv(net), con, &sub);
393 tipc_close_conn(con);
397 void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
399 struct tipc_conn *con;
401 con = tipc_conn_lookup(tipc_topsrv(net), conid);
405 test_and_clear_bit(CF_CONNECTED, &con->flags);
406 tipc_con_delete_sub(con, NULL);
411 static void tipc_send_kern_top_evt(struct net *net, struct tipc_event *evt)
413 u32 port = *(u32 *)&evt->s.usr_handle;
414 u32 self = tipc_own_addr(net);
415 struct sk_buff_head evtq;
418 skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
419 self, self, port, port, 0);
422 msg_set_dest_droppable(buf_msg(skb), true);
423 memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
424 skb_queue_head_init(&evtq);
425 __skb_queue_tail(&evtq, skb);
426 tipc_sk_rcv(net, &evtq);
429 static void tipc_send_to_sock(struct tipc_conn *con)
431 struct list_head *queue = &con->outqueue;
432 struct tipc_server *srv = con->server;
433 struct outqueue_entry *e;
434 struct tipc_event *evt;
440 spin_lock_bh(&con->outqueue_lock);
442 while (!list_empty(queue)) {
443 e = list_first_entry(queue, struct outqueue_entry, list);
445 spin_unlock_bh(&con->outqueue_lock);
448 tipc_con_delete_sub(con, &evt->s);
450 memset(&msg, 0, sizeof(msg));
451 msg.msg_flags = MSG_DONTWAIT;
453 iov.iov_len = sizeof(*evt);
457 ret = kernel_sendmsg(con->sock, &msg, &iov,
459 if (ret == -EWOULDBLOCK || ret == 0) {
462 } else if (ret < 0) {
463 return tipc_close_conn(con);
466 tipc_send_kern_top_evt(srv->net, evt);
469 /* Don't starve users filling buffers */
470 if (++count >= MAX_SEND_MSG_COUNT) {
474 spin_lock_bh(&con->outqueue_lock);
478 spin_unlock_bh(&con->outqueue_lock);
481 static void tipc_recv_work(struct work_struct *work)
483 struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
486 while (connected(con)) {
487 if (tipc_receive_from_sock(con))
490 /* Don't flood Rx machine */
491 if (++count >= MAX_RECV_MSG_COUNT) {
499 static void tipc_send_work(struct work_struct *work)
501 struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
504 tipc_send_to_sock(con);
509 static void tipc_accept_from_sock(struct work_struct *work)
511 struct tipc_server *srv = container_of(work, struct tipc_server, awork);
512 struct socket *lsock = srv->listener;
513 struct socket *newsock;
514 struct tipc_conn *con;
519 ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
522 con = tipc_alloc_conn(srv);
525 sock_release(newsock);
528 /* Register callbacks */
530 write_lock_bh(&newsk->sk_callback_lock);
531 newsk->sk_data_ready = sock_data_ready;
532 newsk->sk_write_space = sock_write_space;
533 newsk->sk_user_data = con;
535 write_unlock_bh(&newsk->sk_callback_lock);
537 /* Wake up receive process in case of 'SYN+' message */
538 newsk->sk_data_ready(newsk);
542 /* listener_sock_data_ready - interrupt callback indicating new connection
543 * The queued job is launched into tipc_accept_from_sock()
545 static void listener_sock_data_ready(struct sock *sk)
547 struct tipc_server *srv;
549 read_lock_bh(&sk->sk_callback_lock);
550 srv = sk->sk_user_data;
552 queue_work(srv->rcv_wq, &srv->awork);
553 read_unlock_bh(&sk->sk_callback_lock);
556 static int tipc_create_listener_sock(struct tipc_server *srv)
558 int imp = TIPC_CRITICAL_IMPORTANCE;
559 struct socket *lsock = NULL;
560 struct sockaddr_tipc saddr;
564 rc = sock_create_kern(srv->net, AF_TIPC, SOCK_SEQPACKET, 0, &lsock);
568 srv->listener = lsock;
570 write_lock_bh(&sk->sk_callback_lock);
571 sk->sk_data_ready = listener_sock_data_ready;
572 sk->sk_user_data = srv;
573 write_unlock_bh(&sk->sk_callback_lock);
575 rc = kernel_setsockopt(lsock, SOL_TIPC, TIPC_IMPORTANCE,
576 (char *)&imp, sizeof(imp));
580 saddr.family = AF_TIPC;
581 saddr.addrtype = TIPC_ADDR_NAMESEQ;
582 saddr.addr.nameseq.type = TIPC_TOP_SRV;
583 saddr.addr.nameseq.lower = TIPC_TOP_SRV;
584 saddr.addr.nameseq.upper = TIPC_TOP_SRV;
585 saddr.scope = TIPC_NODE_SCOPE;
587 rc = kernel_bind(lsock, (struct sockaddr *)&saddr, sizeof(saddr));
590 rc = kernel_listen(lsock, 0);
594 /* As server's listening socket owner and creator is the same module,
595 * we have to decrease TIPC module reference count to guarantee that
596 * it remains zero after the server socket is created, otherwise,
597 * executing "rmmod" command is unable to make TIPC module deleted
598 * after TIPC module is inserted successfully.
600 * However, the reference count is ever increased twice in
601 * sock_create_kern(): one is to increase the reference count of owner
602 * of TIPC socket's proto_ops struct; another is to increment the
603 * reference count of owner of TIPC proto struct. Therefore, we must
604 * decrement the module reference count twice to ensure that it keeps
605 * zero after server's listening socket is created. Of course, we
606 * must bump the module reference count twice as well before the socket
609 module_put(lsock->ops->owner);
610 module_put(sk->sk_prot_creator->owner);
618 static int tipc_work_start(struct tipc_server *s)
620 s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
622 pr_err("can't start tipc receive workqueue\n");
626 s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
628 pr_err("can't start tipc send workqueue\n");
629 destroy_workqueue(s->rcv_wq);
636 static void tipc_work_stop(struct tipc_server *s)
638 destroy_workqueue(s->rcv_wq);
639 destroy_workqueue(s->send_wq);
642 int tipc_topsrv_start(struct net *net)
644 struct tipc_net *tn = tipc_net(net);
645 const char name[] = "topology_server";
646 struct tipc_server *srv;
649 srv = kzalloc(sizeof(*srv), GFP_ATOMIC);
654 srv->max_rcvbuf_size = sizeof(struct tipc_subscr);
655 INIT_WORK(&srv->awork, tipc_accept_from_sock);
657 strncpy(srv->name, name, strlen(name) + 1);
659 atomic_set(&tn->subscription_count, 0);
661 spin_lock_init(&srv->idr_lock);
662 idr_init(&srv->conn_idr);
665 ret = tipc_work_start(srv);
669 ret = tipc_create_listener_sock(srv);
676 void tipc_topsrv_stop(struct net *net)
678 struct tipc_server *srv = tipc_topsrv(net);
679 struct socket *lsock = srv->listener;
680 struct tipc_conn *con;
683 spin_lock_bh(&srv->idr_lock);
684 for (id = 0; srv->idr_in_use; id++) {
685 con = idr_find(&srv->conn_idr, id);
687 spin_unlock_bh(&srv->idr_lock);
688 tipc_close_conn(con);
689 spin_lock_bh(&srv->idr_lock);
692 __module_get(lsock->ops->owner);
693 __module_get(lsock->sk->sk_prot_creator->owner);
695 srv->listener = NULL;
696 spin_unlock_bh(&srv->idr_lock);
698 idr_destroy(&srv->conn_idr);