1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the "low-level" comms layer.
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
22 * be expanded for the cluster infrastructure then that is its
23 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
37 * lowcomms will choose to use either TCP or SCTP as its transport layer
38 * depending on the configuration variable 'protocol'. This should be set
39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
45 #include <asm/ioctls.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
56 #include "dlm_internal.h"
61 #define NEEDED_RMEM (4*1024*1024)
62 #define CONN_HASH_SIZE 32
64 /* Number of messages to send before rescheduling */
65 #define MAX_SEND_MSG_COUNT 25
66 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
69 struct socket *sock; /* NULL if not connected */
70 uint32_t nodeid; /* So we know who we are in the list */
71 struct mutex sock_mutex;
73 #define CF_READ_PENDING 1
74 #define CF_WRITE_PENDING 2
75 #define CF_INIT_PENDING 4
76 #define CF_IS_OTHERCON 5
78 #define CF_APP_LIMITED 7
81 #define CF_CONNECTED 10
82 struct list_head writequeue; /* List of outgoing writequeue_entries */
83 spinlock_t writequeue_lock;
84 void (*connect_action) (struct connection *); /* What to do to connect */
85 void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
87 #define MAX_CONNECT_RETRIES 3
88 struct hlist_node list;
89 struct connection *othercon;
90 struct work_struct rwork; /* Receive workqueue */
91 struct work_struct swork; /* Send workqueue */
92 wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
93 unsigned char *rx_buf;
98 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
100 struct listen_connection {
102 struct work_struct rwork;
105 /* An entry waiting to be sent */
106 struct writequeue_entry {
107 struct list_head list;
113 struct connection *con;
116 struct dlm_node_addr {
117 struct list_head list;
121 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
124 static struct listen_sock_callbacks {
125 void (*sk_error_report)(struct sock *);
126 void (*sk_data_ready)(struct sock *);
127 void (*sk_state_change)(struct sock *);
128 void (*sk_write_space)(struct sock *);
131 static LIST_HEAD(dlm_node_addrs);
132 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
134 static struct listen_connection listen_con;
135 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
136 static int dlm_local_count;
137 static int dlm_allow_conn;
140 static struct workqueue_struct *recv_workqueue;
141 static struct workqueue_struct *send_workqueue;
143 static struct hlist_head connection_hash[CONN_HASH_SIZE];
144 static DEFINE_SPINLOCK(connections_lock);
145 DEFINE_STATIC_SRCU(connections_srcu);
147 static void process_recv_sockets(struct work_struct *work);
148 static void process_send_sockets(struct work_struct *work);
150 static void sctp_connect_to_sock(struct connection *con);
151 static void tcp_connect_to_sock(struct connection *con);
152 static void dlm_tcp_shutdown(struct connection *con);
154 /* This is deliberately very simple because most clusters have simple
155 sequential nodeids, so we should be able to go straight to a connection
156 struct in the array */
157 static inline int nodeid_hash(int nodeid)
159 return nodeid & (CONN_HASH_SIZE-1);
162 static struct connection *__find_con(int nodeid)
165 struct connection *con;
167 r = nodeid_hash(nodeid);
169 idx = srcu_read_lock(&connections_srcu);
170 hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
171 if (con->nodeid == nodeid) {
172 srcu_read_unlock(&connections_srcu, idx);
176 srcu_read_unlock(&connections_srcu, idx);
181 static int dlm_con_init(struct connection *con, int nodeid)
183 con->rx_buflen = dlm_config.ci_buffer_size;
184 con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
188 con->nodeid = nodeid;
189 mutex_init(&con->sock_mutex);
190 INIT_LIST_HEAD(&con->writequeue);
191 spin_lock_init(&con->writequeue_lock);
192 INIT_WORK(&con->swork, process_send_sockets);
193 INIT_WORK(&con->rwork, process_recv_sockets);
194 init_waitqueue_head(&con->shutdown_wait);
196 if (dlm_config.ci_protocol == 0) {
197 con->connect_action = tcp_connect_to_sock;
198 con->shutdown_action = dlm_tcp_shutdown;
200 con->connect_action = sctp_connect_to_sock;
207 * If 'allocation' is zero then we don't attempt to create a new
208 * connection structure for this node.
210 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
212 struct connection *con, *tmp;
215 con = __find_con(nodeid);
219 con = kzalloc(sizeof(*con), alloc);
223 ret = dlm_con_init(con, nodeid);
229 r = nodeid_hash(nodeid);
231 spin_lock(&connections_lock);
232 /* Because multiple workqueues/threads calls this function it can
233 * race on multiple cpu's. Instead of locking hot path __find_con()
234 * we just check in rare cases of recently added nodes again
235 * under protection of connections_lock. If this is the case we
236 * abort our connection creation and return the existing connection.
238 tmp = __find_con(nodeid);
240 spin_unlock(&connections_lock);
246 hlist_add_head_rcu(&con->list, &connection_hash[r]);
247 spin_unlock(&connections_lock);
252 /* Loop round all connections */
253 static void foreach_conn(void (*conn_func)(struct connection *c))
256 struct connection *con;
258 idx = srcu_read_lock(&connections_srcu);
259 for (i = 0; i < CONN_HASH_SIZE; i++) {
260 hlist_for_each_entry_rcu(con, &connection_hash[i], list)
263 srcu_read_unlock(&connections_srcu, idx);
266 static struct dlm_node_addr *find_node_addr(int nodeid)
268 struct dlm_node_addr *na;
270 list_for_each_entry(na, &dlm_node_addrs, list) {
271 if (na->nodeid == nodeid)
277 static int addr_compare(const struct sockaddr_storage *x,
278 const struct sockaddr_storage *y)
280 switch (x->ss_family) {
282 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
283 struct sockaddr_in *siny = (struct sockaddr_in *)y;
284 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
286 if (sinx->sin_port != siny->sin_port)
291 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
292 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
293 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
295 if (sinx->sin6_port != siny->sin6_port)
305 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
306 struct sockaddr *sa_out, bool try_new_addr)
308 struct sockaddr_storage sas;
309 struct dlm_node_addr *na;
311 if (!dlm_local_count)
314 spin_lock(&dlm_node_addrs_spin);
315 na = find_node_addr(nodeid);
316 if (na && na->addr_count) {
317 memcpy(&sas, na->addr[na->curr_addr_index],
318 sizeof(struct sockaddr_storage));
321 na->curr_addr_index++;
322 if (na->curr_addr_index == na->addr_count)
323 na->curr_addr_index = 0;
326 spin_unlock(&dlm_node_addrs_spin);
335 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
340 if (dlm_local_addr[0]->ss_family == AF_INET) {
341 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
342 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
343 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
345 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
346 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
347 ret6->sin6_addr = in6->sin6_addr;
353 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
355 struct dlm_node_addr *na;
359 spin_lock(&dlm_node_addrs_spin);
360 list_for_each_entry(na, &dlm_node_addrs, list) {
364 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
365 if (addr_compare(na->addr[addr_i], addr)) {
366 *nodeid = na->nodeid;
373 spin_unlock(&dlm_node_addrs_spin);
377 /* caller need to held dlm_node_addrs_spin lock */
378 static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na,
379 const struct sockaddr_storage *addr)
383 for (i = 0; i < na->addr_count; i++) {
384 if (addr_compare(na->addr[i], addr))
391 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
393 struct sockaddr_storage *new_addr;
394 struct dlm_node_addr *new_node, *na;
397 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
401 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
407 memcpy(new_addr, addr, len);
409 spin_lock(&dlm_node_addrs_spin);
410 na = find_node_addr(nodeid);
412 new_node->nodeid = nodeid;
413 new_node->addr[0] = new_addr;
414 new_node->addr_count = 1;
415 list_add(&new_node->list, &dlm_node_addrs);
416 spin_unlock(&dlm_node_addrs_spin);
420 ret = dlm_lowcomms_na_has_addr(na, addr);
422 spin_unlock(&dlm_node_addrs_spin);
428 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
429 spin_unlock(&dlm_node_addrs_spin);
435 na->addr[na->addr_count++] = new_addr;
436 spin_unlock(&dlm_node_addrs_spin);
441 /* Data available on socket or listen socket received a connect */
442 static void lowcomms_data_ready(struct sock *sk)
444 struct connection *con;
446 read_lock_bh(&sk->sk_callback_lock);
448 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
449 queue_work(recv_workqueue, &con->rwork);
450 read_unlock_bh(&sk->sk_callback_lock);
453 static void lowcomms_listen_data_ready(struct sock *sk)
455 queue_work(recv_workqueue, &listen_con.rwork);
458 static void lowcomms_write_space(struct sock *sk)
460 struct connection *con;
462 read_lock_bh(&sk->sk_callback_lock);
467 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
468 log_print("successful connected to node %d", con->nodeid);
469 queue_work(send_workqueue, &con->swork);
473 clear_bit(SOCK_NOSPACE, &con->sock->flags);
475 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
476 con->sock->sk->sk_write_pending--;
477 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
480 queue_work(send_workqueue, &con->swork);
482 read_unlock_bh(&sk->sk_callback_lock);
485 static inline void lowcomms_connect_sock(struct connection *con)
487 if (test_bit(CF_CLOSE, &con->flags))
489 queue_work(send_workqueue, &con->swork);
493 static void lowcomms_state_change(struct sock *sk)
495 /* SCTP layer is not calling sk_data_ready when the connection
496 * is done, so we catch the signal through here. Also, it
497 * doesn't switch socket state when entering shutdown, so we
498 * skip the write in that case.
500 if (sk->sk_shutdown) {
501 if (sk->sk_shutdown == RCV_SHUTDOWN)
502 lowcomms_data_ready(sk);
503 } else if (sk->sk_state == TCP_ESTABLISHED) {
504 lowcomms_write_space(sk);
508 int dlm_lowcomms_connect_node(int nodeid)
510 struct connection *con;
512 if (nodeid == dlm_our_nodeid())
515 con = nodeid2con(nodeid, GFP_NOFS);
518 lowcomms_connect_sock(con);
522 static void lowcomms_error_report(struct sock *sk)
524 struct connection *con;
525 struct sockaddr_storage saddr;
526 void (*orig_report)(struct sock *) = NULL;
528 read_lock_bh(&sk->sk_callback_lock);
533 orig_report = listen_sock.sk_error_report;
534 if (con->sock == NULL ||
535 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) {
536 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
537 "sending to node %d, port %d, "
538 "sk_err=%d/%d\n", dlm_our_nodeid(),
539 con->nodeid, dlm_config.ci_tcp_port,
540 sk->sk_err, sk->sk_err_soft);
541 } else if (saddr.ss_family == AF_INET) {
542 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
544 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
545 "sending to node %d at %pI4, port %d, "
546 "sk_err=%d/%d\n", dlm_our_nodeid(),
547 con->nodeid, &sin4->sin_addr.s_addr,
548 dlm_config.ci_tcp_port, sk->sk_err,
551 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
553 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
554 "sending to node %d at %u.%u.%u.%u, "
555 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
556 con->nodeid, sin6->sin6_addr.s6_addr32[0],
557 sin6->sin6_addr.s6_addr32[1],
558 sin6->sin6_addr.s6_addr32[2],
559 sin6->sin6_addr.s6_addr32[3],
560 dlm_config.ci_tcp_port, sk->sk_err,
564 read_unlock_bh(&sk->sk_callback_lock);
569 /* Note: sk_callback_lock must be locked before calling this function. */
570 static void save_listen_callbacks(struct socket *sock)
572 struct sock *sk = sock->sk;
574 listen_sock.sk_data_ready = sk->sk_data_ready;
575 listen_sock.sk_state_change = sk->sk_state_change;
576 listen_sock.sk_write_space = sk->sk_write_space;
577 listen_sock.sk_error_report = sk->sk_error_report;
580 static void restore_callbacks(struct socket *sock)
582 struct sock *sk = sock->sk;
584 write_lock_bh(&sk->sk_callback_lock);
585 sk->sk_user_data = NULL;
586 sk->sk_data_ready = listen_sock.sk_data_ready;
587 sk->sk_state_change = listen_sock.sk_state_change;
588 sk->sk_write_space = listen_sock.sk_write_space;
589 sk->sk_error_report = listen_sock.sk_error_report;
590 write_unlock_bh(&sk->sk_callback_lock);
593 static void add_listen_sock(struct socket *sock, struct listen_connection *con)
595 struct sock *sk = sock->sk;
597 write_lock_bh(&sk->sk_callback_lock);
598 save_listen_callbacks(sock);
601 sk->sk_user_data = con;
602 sk->sk_allocation = GFP_NOFS;
603 /* Install a data_ready callback */
604 sk->sk_data_ready = lowcomms_listen_data_ready;
605 write_unlock_bh(&sk->sk_callback_lock);
608 /* Make a socket active */
609 static void add_sock(struct socket *sock, struct connection *con)
611 struct sock *sk = sock->sk;
613 write_lock_bh(&sk->sk_callback_lock);
616 sk->sk_user_data = con;
617 /* Install a data_ready callback */
618 sk->sk_data_ready = lowcomms_data_ready;
619 sk->sk_write_space = lowcomms_write_space;
620 sk->sk_state_change = lowcomms_state_change;
621 sk->sk_allocation = GFP_NOFS;
622 sk->sk_error_report = lowcomms_error_report;
623 write_unlock_bh(&sk->sk_callback_lock);
626 /* Add the port number to an IPv6 or 4 sockaddr and return the address
628 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
631 saddr->ss_family = dlm_local_addr[0]->ss_family;
632 if (saddr->ss_family == AF_INET) {
633 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
634 in4_addr->sin_port = cpu_to_be16(port);
635 *addr_len = sizeof(struct sockaddr_in);
636 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
638 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
639 in6_addr->sin6_port = cpu_to_be16(port);
640 *addr_len = sizeof(struct sockaddr_in6);
642 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
645 static void dlm_close_sock(struct socket **sock)
648 restore_callbacks(*sock);
654 /* Close a remote connection and tidy up */
655 static void close_connection(struct connection *con, bool and_other,
658 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
660 if (tx && !closing && cancel_work_sync(&con->swork)) {
661 log_print("canceled swork for node %d", con->nodeid);
662 clear_bit(CF_WRITE_PENDING, &con->flags);
664 if (rx && !closing && cancel_work_sync(&con->rwork)) {
665 log_print("canceled rwork for node %d", con->nodeid);
666 clear_bit(CF_READ_PENDING, &con->flags);
669 mutex_lock(&con->sock_mutex);
670 dlm_close_sock(&con->sock);
672 if (con->othercon && and_other) {
673 /* Will only re-enter once. */
674 close_connection(con->othercon, false, true, true);
677 con->rx_leftover = 0;
679 clear_bit(CF_CONNECTED, &con->flags);
680 mutex_unlock(&con->sock_mutex);
681 clear_bit(CF_CLOSING, &con->flags);
684 static void shutdown_connection(struct connection *con)
688 if (cancel_work_sync(&con->swork)) {
689 log_print("canceled swork for node %d", con->nodeid);
690 clear_bit(CF_WRITE_PENDING, &con->flags);
693 mutex_lock(&con->sock_mutex);
694 /* nothing to shutdown */
696 mutex_unlock(&con->sock_mutex);
700 set_bit(CF_SHUTDOWN, &con->flags);
701 ret = kernel_sock_shutdown(con->sock, SHUT_WR);
702 mutex_unlock(&con->sock_mutex);
704 log_print("Connection %p failed to shutdown: %d will force close",
708 ret = wait_event_timeout(con->shutdown_wait,
709 !test_bit(CF_SHUTDOWN, &con->flags),
710 DLM_SHUTDOWN_WAIT_TIMEOUT);
712 log_print("Connection %p shutdown timed out, will force close",
721 clear_bit(CF_SHUTDOWN, &con->flags);
722 close_connection(con, false, true, true);
725 static void dlm_tcp_shutdown(struct connection *con)
728 shutdown_connection(con->othercon);
729 shutdown_connection(con);
732 static int con_realloc_receive_buf(struct connection *con, int newlen)
734 unsigned char *newbuf;
736 newbuf = kmalloc(newlen, GFP_NOFS);
740 /* copy any leftover from last receive */
741 if (con->rx_leftover)
742 memmove(newbuf, con->rx_buf, con->rx_leftover);
744 /* swap to new buffer space */
746 con->rx_buflen = newlen;
747 con->rx_buf = newbuf;
752 /* Data received from remote end */
753 static int receive_from_sock(struct connection *con)
755 int call_again_soon = 0;
760 mutex_lock(&con->sock_mutex);
762 if (con->sock == NULL) {
767 /* realloc if we get new buffer size to read out */
768 buflen = dlm_config.ci_buffer_size;
769 if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
770 ret = con_realloc_receive_buf(con, buflen);
775 /* calculate new buffer parameter regarding last receive and
776 * possible leftover bytes
778 iov.iov_base = con->rx_buf + con->rx_leftover;
779 iov.iov_len = con->rx_buflen - con->rx_leftover;
781 memset(&msg, 0, sizeof(msg));
782 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
783 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
787 else if (ret == iov.iov_len)
790 /* new buflen according readed bytes and leftover from last receive */
791 buflen = ret + con->rx_leftover;
792 ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
796 /* calculate leftover bytes from process and put it into begin of
797 * the receive buffer, so next receive we have the full message
798 * at the start address of the receive buffer.
800 con->rx_leftover = buflen - ret;
801 if (con->rx_leftover) {
802 memmove(con->rx_buf, con->rx_buf + ret,
804 call_again_soon = true;
810 mutex_unlock(&con->sock_mutex);
814 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
815 queue_work(recv_workqueue, &con->rwork);
816 mutex_unlock(&con->sock_mutex);
820 mutex_unlock(&con->sock_mutex);
821 if (ret != -EAGAIN) {
822 /* Reconnect when there is something to send */
823 close_connection(con, false, true, false);
825 log_print("connection %p got EOF from %d",
827 /* handling for tcp shutdown */
828 clear_bit(CF_SHUTDOWN, &con->flags);
829 wake_up(&con->shutdown_wait);
830 /* signal to breaking receive worker */
837 /* Listening socket is busy, accept a connection */
838 static int accept_from_sock(struct listen_connection *con)
841 struct sockaddr_storage peeraddr;
842 struct socket *newsock;
845 struct connection *newcon;
846 struct connection *addcon;
849 if (!dlm_allow_conn) {
856 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
860 /* Get the connected socket's peer */
861 memset(&peeraddr, 0, sizeof(peeraddr));
862 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
864 result = -ECONNABORTED;
868 /* Get the new node's NODEID */
869 make_sockaddr(&peeraddr, 0, &len);
870 if (addr_to_nodeid(&peeraddr, &nodeid)) {
871 unsigned char *b=(unsigned char *)&peeraddr;
872 log_print("connect from non cluster node");
873 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
874 b, sizeof(struct sockaddr_storage));
875 sock_release(newsock);
879 dlm_comm_mark(nodeid, &mark);
880 sock_set_mark(newsock->sk, mark);
882 log_print("got connection from %d", nodeid);
884 /* Check to see if we already have a connection to this node. This
885 * could happen if the two nodes initiate a connection at roughly
886 * the same time and the connections cross on the wire.
887 * In this case we store the incoming one in "othercon"
889 newcon = nodeid2con(nodeid, GFP_NOFS);
895 mutex_lock(&newcon->sock_mutex);
897 struct connection *othercon = newcon->othercon;
900 othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
902 log_print("failed to allocate incoming socket");
903 mutex_unlock(&newcon->sock_mutex);
908 result = dlm_con_init(othercon, nodeid);
914 newcon->othercon = othercon;
916 /* close other sock con if we have something new */
917 close_connection(othercon, false, true, false);
920 mutex_lock_nested(&othercon->sock_mutex, 1);
921 add_sock(newsock, othercon);
923 mutex_unlock(&othercon->sock_mutex);
926 /* accept copies the sk after we've saved the callbacks, so we
927 don't want to save them a second time or comm errors will
928 result in calling sk_error_report recursively. */
929 add_sock(newsock, newcon);
933 mutex_unlock(&newcon->sock_mutex);
936 * Add it to the active queue in case we got data
937 * between processing the accept adding the socket
938 * to the read_sockets list
940 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
941 queue_work(recv_workqueue, &addcon->rwork);
947 sock_release(newsock);
949 if (result != -EAGAIN)
950 log_print("error accepting connection from node: %d", result);
954 static void free_entry(struct writequeue_entry *e)
956 __free_page(e->page);
961 * writequeue_entry_complete - try to delete and free write queue entry
962 * @e: write queue entry to try to delete
963 * @completed: bytes completed
965 * writequeue_lock must be held.
967 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
969 e->offset += completed;
972 if (e->len == 0 && e->users == 0) {
979 * sctp_bind_addrs - bind a SCTP socket to all our addresses
981 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
983 struct sockaddr_storage localaddr;
984 struct sockaddr *addr = (struct sockaddr *)&localaddr;
985 int i, addr_len, result = 0;
987 for (i = 0; i < dlm_local_count; i++) {
988 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
989 make_sockaddr(&localaddr, port, &addr_len);
992 result = kernel_bind(sock, addr, addr_len);
994 result = sock_bind_add(sock->sk, addr, addr_len);
997 log_print("Can't bind to %d addr number %d, %d.\n",
998 port, i + 1, result);
1005 /* Initiate an SCTP association.
1006 This is a special case of send_to_sock() in that we don't yet have a
1007 peeled-off socket for this association, so we use the listening socket
1008 and add the primary IP address of the remote node.
1010 static void sctp_connect_to_sock(struct connection *con)
1012 struct sockaddr_storage daddr;
1015 struct socket *sock;
1018 dlm_comm_mark(con->nodeid, &mark);
1020 mutex_lock(&con->sock_mutex);
1022 /* Some odd races can cause double-connects, ignore them */
1023 if (con->retries++ > MAX_CONNECT_RETRIES)
1027 log_print("node %d already connected.", con->nodeid);
1031 memset(&daddr, 0, sizeof(daddr));
1032 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
1034 log_print("no address for nodeid %d", con->nodeid);
1038 /* Create a socket to communicate with */
1039 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1040 SOCK_STREAM, IPPROTO_SCTP, &sock);
1044 sock_set_mark(sock->sk, mark);
1046 add_sock(sock, con);
1048 /* Bind to all addresses. */
1049 if (sctp_bind_addrs(con->sock, 0))
1052 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
1054 log_print("connecting to %d", con->nodeid);
1056 /* Turn off Nagle's algorithm */
1057 sctp_sock_set_nodelay(sock->sk);
1060 * Make sock->ops->connect() function return in specified time,
1061 * since O_NONBLOCK argument in connect() function does not work here,
1062 * then, we should restore the default value of this attribute.
1064 sock_set_sndtimeo(sock->sk, 5);
1065 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1067 sock_set_sndtimeo(sock->sk, 0);
1069 if (result == -EINPROGRESS)
1072 if (!test_and_set_bit(CF_CONNECTED, &con->flags))
1073 log_print("successful connected to node %d", con->nodeid);
1083 * Some errors are fatal and this list might need adjusting. For other
1084 * errors we try again until the max number of retries is reached.
1086 if (result != -EHOSTUNREACH &&
1087 result != -ENETUNREACH &&
1088 result != -ENETDOWN &&
1089 result != -EINVAL &&
1090 result != -EPROTONOSUPPORT) {
1091 log_print("connect %d try %d error %d", con->nodeid,
1092 con->retries, result);
1093 mutex_unlock(&con->sock_mutex);
1095 lowcomms_connect_sock(con);
1100 mutex_unlock(&con->sock_mutex);
1103 /* Connect a new socket to its peer */
1104 static void tcp_connect_to_sock(struct connection *con)
1106 struct sockaddr_storage saddr, src_addr;
1108 struct socket *sock = NULL;
1112 dlm_comm_mark(con->nodeid, &mark);
1114 mutex_lock(&con->sock_mutex);
1115 if (con->retries++ > MAX_CONNECT_RETRIES)
1118 /* Some odd races can cause double-connects, ignore them */
1122 /* Create a socket to communicate with */
1123 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1124 SOCK_STREAM, IPPROTO_TCP, &sock);
1128 sock_set_mark(sock->sk, mark);
1130 memset(&saddr, 0, sizeof(saddr));
1131 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1133 log_print("no address for nodeid %d", con->nodeid);
1137 add_sock(sock, con);
1139 /* Bind to our cluster-known address connecting to avoid
1141 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1142 make_sockaddr(&src_addr, 0, &addr_len);
1143 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1146 log_print("could not bind for connect: %d", result);
1147 /* This *may* not indicate a critical error */
1150 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1152 log_print("connecting to %d", con->nodeid);
1154 /* Turn off Nagle's algorithm */
1155 tcp_sock_set_nodelay(sock->sk);
1157 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1159 if (result == -EINPROGRESS)
1166 sock_release(con->sock);
1172 * Some errors are fatal and this list might need adjusting. For other
1173 * errors we try again until the max number of retries is reached.
1175 if (result != -EHOSTUNREACH &&
1176 result != -ENETUNREACH &&
1177 result != -ENETDOWN &&
1178 result != -EINVAL &&
1179 result != -EPROTONOSUPPORT) {
1180 log_print("connect %d try %d error %d", con->nodeid,
1181 con->retries, result);
1182 mutex_unlock(&con->sock_mutex);
1184 lowcomms_connect_sock(con);
1188 mutex_unlock(&con->sock_mutex);
1192 /* On error caller must run dlm_close_sock() for the
1193 * listen connection socket.
1195 static int tcp_create_listen_sock(struct listen_connection *con,
1196 struct sockaddr_storage *saddr)
1198 struct socket *sock = NULL;
1202 if (dlm_local_addr[0]->ss_family == AF_INET)
1203 addr_len = sizeof(struct sockaddr_in);
1205 addr_len = sizeof(struct sockaddr_in6);
1207 /* Create a socket to communicate with */
1208 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1209 SOCK_STREAM, IPPROTO_TCP, &sock);
1211 log_print("Can't create listening comms socket");
1215 sock_set_mark(sock->sk, dlm_config.ci_mark);
1217 /* Turn off Nagle's algorithm */
1218 tcp_sock_set_nodelay(sock->sk);
1220 sock_set_reuseaddr(sock->sk);
1222 add_listen_sock(sock, con);
1224 /* Bind to our port */
1225 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1226 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1228 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1231 sock_set_keepalive(sock->sk);
1233 result = sock->ops->listen(sock, 5);
1235 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1245 /* Get local addresses */
1246 static void init_local(void)
1248 struct sockaddr_storage sas, *addr;
1251 dlm_local_count = 0;
1252 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1253 if (dlm_our_addr(&sas, i))
1256 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1259 dlm_local_addr[dlm_local_count++] = addr;
1263 static void deinit_local(void)
1267 for (i = 0; i < dlm_local_count; i++)
1268 kfree(dlm_local_addr[i]);
1271 /* Initialise SCTP socket and bind to all interfaces
1272 * On error caller must run dlm_close_sock() for the
1273 * listen connection socket.
1275 static int sctp_listen_for_all(struct listen_connection *con)
1277 struct socket *sock = NULL;
1278 int result = -EINVAL;
1280 log_print("Using SCTP for communications");
1282 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1283 SOCK_STREAM, IPPROTO_SCTP, &sock);
1285 log_print("Can't create comms socket, check SCTP is loaded");
1289 sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1290 sock_set_mark(sock->sk, dlm_config.ci_mark);
1291 sctp_sock_set_nodelay(sock->sk);
1293 add_listen_sock(sock, con);
1295 /* Bind to all addresses. */
1296 result = sctp_bind_addrs(con->sock, dlm_config.ci_tcp_port);
1300 result = sock->ops->listen(sock, 5);
1302 log_print("Can't set socket listening");
1312 static int tcp_listen_for_all(void)
1314 /* We don't support multi-homed hosts */
1315 if (dlm_local_count > 1) {
1316 log_print("TCP protocol can't handle multi-homed hosts, "
1321 log_print("Using TCP for communications");
1323 return tcp_create_listen_sock(&listen_con, dlm_local_addr[0]);
1328 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1331 struct writequeue_entry *entry;
1333 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1337 entry->page = alloc_page(allocation);
1352 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1354 struct connection *con;
1355 struct writequeue_entry *e;
1358 if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
1359 BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
1360 log_print("failed to allocate a buffer of size %d", len);
1364 con = nodeid2con(nodeid, allocation);
1368 spin_lock(&con->writequeue_lock);
1369 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1370 if ((&e->list == &con->writequeue) ||
1371 (PAGE_SIZE - e->end < len)) {
1378 spin_unlock(&con->writequeue_lock);
1382 *ppc = page_address(e->page) + offset;
1386 e = new_writequeue_entry(con, allocation);
1388 spin_lock(&con->writequeue_lock);
1392 list_add_tail(&e->list, &con->writequeue);
1393 spin_unlock(&con->writequeue_lock);
1399 void dlm_lowcomms_commit_buffer(void *mh)
1401 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1402 struct connection *con = e->con;
1405 spin_lock(&con->writequeue_lock);
1409 e->len = e->end - e->offset;
1410 spin_unlock(&con->writequeue_lock);
1412 queue_work(send_workqueue, &con->swork);
1416 spin_unlock(&con->writequeue_lock);
1420 /* Send a message */
1421 static void send_to_sock(struct connection *con)
1424 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1425 struct writequeue_entry *e;
1429 mutex_lock(&con->sock_mutex);
1430 if (con->sock == NULL)
1433 spin_lock(&con->writequeue_lock);
1435 e = list_entry(con->writequeue.next, struct writequeue_entry,
1437 if ((struct list_head *) e == &con->writequeue)
1442 BUG_ON(len == 0 && e->users == 0);
1443 spin_unlock(&con->writequeue_lock);
1447 ret = kernel_sendpage(con->sock, e->page, offset, len,
1449 if (ret == -EAGAIN || ret == 0) {
1450 if (ret == -EAGAIN &&
1451 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1452 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1453 /* Notify TCP that we're limited by the
1454 * application window size.
1456 set_bit(SOCK_NOSPACE, &con->sock->flags);
1457 con->sock->sk->sk_write_pending++;
1465 /* Don't starve people filling buffers */
1466 if (++count >= MAX_SEND_MSG_COUNT) {
1471 spin_lock(&con->writequeue_lock);
1472 writequeue_entry_complete(e, ret);
1474 spin_unlock(&con->writequeue_lock);
1476 mutex_unlock(&con->sock_mutex);
1480 mutex_unlock(&con->sock_mutex);
1481 close_connection(con, false, false, true);
1482 /* Requeue the send work. When the work daemon runs again, it will try
1483 a new connection, then call this function again. */
1484 queue_work(send_workqueue, &con->swork);
1488 mutex_unlock(&con->sock_mutex);
1489 queue_work(send_workqueue, &con->swork);
1493 static void clean_one_writequeue(struct connection *con)
1495 struct writequeue_entry *e, *safe;
1497 spin_lock(&con->writequeue_lock);
1498 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1502 spin_unlock(&con->writequeue_lock);
1505 /* Called from recovery when it knows that a node has
1507 int dlm_lowcomms_close(int nodeid)
1509 struct connection *con;
1510 struct dlm_node_addr *na;
1512 log_print("closing connection to node %d", nodeid);
1513 con = nodeid2con(nodeid, 0);
1515 set_bit(CF_CLOSE, &con->flags);
1516 close_connection(con, true, true, true);
1517 clean_one_writequeue(con);
1519 clean_one_writequeue(con->othercon);
1522 spin_lock(&dlm_node_addrs_spin);
1523 na = find_node_addr(nodeid);
1525 list_del(&na->list);
1526 while (na->addr_count--)
1527 kfree(na->addr[na->addr_count]);
1530 spin_unlock(&dlm_node_addrs_spin);
1535 /* Receive workqueue function */
1536 static void process_recv_sockets(struct work_struct *work)
1538 struct connection *con = container_of(work, struct connection, rwork);
1541 clear_bit(CF_READ_PENDING, &con->flags);
1543 err = receive_from_sock(con);
1547 static void process_listen_recv_socket(struct work_struct *work)
1549 accept_from_sock(&listen_con);
1552 /* Send workqueue function */
1553 static void process_send_sockets(struct work_struct *work)
1555 struct connection *con = container_of(work, struct connection, swork);
1557 clear_bit(CF_WRITE_PENDING, &con->flags);
1558 if (con->sock == NULL) /* not mutex protected so check it inside too */
1559 con->connect_action(con);
1560 if (!list_empty(&con->writequeue))
1564 static void work_stop(void)
1567 destroy_workqueue(recv_workqueue);
1569 destroy_workqueue(send_workqueue);
1572 static int work_start(void)
1574 recv_workqueue = alloc_workqueue("dlm_recv",
1575 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1576 if (!recv_workqueue) {
1577 log_print("can't start dlm_recv");
1581 send_workqueue = alloc_workqueue("dlm_send",
1582 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1583 if (!send_workqueue) {
1584 log_print("can't start dlm_send");
1585 destroy_workqueue(recv_workqueue);
1592 static void _stop_conn(struct connection *con, bool and_other)
1594 mutex_lock(&con->sock_mutex);
1595 set_bit(CF_CLOSE, &con->flags);
1596 set_bit(CF_READ_PENDING, &con->flags);
1597 set_bit(CF_WRITE_PENDING, &con->flags);
1598 if (con->sock && con->sock->sk) {
1599 write_lock_bh(&con->sock->sk->sk_callback_lock);
1600 con->sock->sk->sk_user_data = NULL;
1601 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1603 if (con->othercon && and_other)
1604 _stop_conn(con->othercon, false);
1605 mutex_unlock(&con->sock_mutex);
1608 static void stop_conn(struct connection *con)
1610 _stop_conn(con, true);
1613 static void shutdown_conn(struct connection *con)
1615 if (con->shutdown_action)
1616 con->shutdown_action(con);
1619 static void connection_release(struct rcu_head *rcu)
1621 struct connection *con = container_of(rcu, struct connection, rcu);
1627 static void free_conn(struct connection *con)
1629 close_connection(con, true, true, true);
1630 spin_lock(&connections_lock);
1631 hlist_del_rcu(&con->list);
1632 spin_unlock(&connections_lock);
1633 if (con->othercon) {
1634 clean_one_writequeue(con->othercon);
1635 call_srcu(&connections_srcu, &con->othercon->rcu,
1636 connection_release);
1638 clean_one_writequeue(con);
1639 call_srcu(&connections_srcu, &con->rcu, connection_release);
1642 static void work_flush(void)
1646 struct connection *con;
1650 foreach_conn(stop_conn);
1652 flush_workqueue(recv_workqueue);
1654 flush_workqueue(send_workqueue);
1655 idx = srcu_read_lock(&connections_srcu);
1656 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1657 hlist_for_each_entry_rcu(con, &connection_hash[i],
1659 ok &= test_bit(CF_READ_PENDING, &con->flags);
1660 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1661 if (con->othercon) {
1662 ok &= test_bit(CF_READ_PENDING,
1663 &con->othercon->flags);
1664 ok &= test_bit(CF_WRITE_PENDING,
1665 &con->othercon->flags);
1669 srcu_read_unlock(&connections_srcu, idx);
1673 void dlm_lowcomms_stop(void)
1675 /* Set all the flags to prevent any
1681 flush_workqueue(recv_workqueue);
1683 flush_workqueue(send_workqueue);
1685 dlm_close_sock(&listen_con.sock);
1687 foreach_conn(shutdown_conn);
1689 foreach_conn(free_conn);
1694 int dlm_lowcomms_start(void)
1696 int error = -EINVAL;
1699 for (i = 0; i < CONN_HASH_SIZE; i++)
1700 INIT_HLIST_HEAD(&connection_hash[i]);
1703 if (!dlm_local_count) {
1705 log_print("no local IP address has been set");
1709 INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1711 error = work_start();
1717 /* Start listening */
1718 if (dlm_config.ci_protocol == 0)
1719 error = tcp_listen_for_all();
1721 error = sctp_listen_for_all(&listen_con);
1729 dlm_close_sock(&listen_con.sock);
1734 void dlm_lowcomms_exit(void)
1736 struct dlm_node_addr *na, *safe;
1738 spin_lock(&dlm_node_addrs_spin);
1739 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1740 list_del(&na->list);
1741 while (na->addr_count--)
1742 kfree(na->addr[na->addr_count]);
1745 spin_unlock(&dlm_node_addrs_spin);