1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the "low-level" comms layer.
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
22 * be expanded for the cluster infrastructure then that is its
23 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
37 * lowcomms will choose to use either TCP or SCTP as its transport layer
38 * depending on the configuration variable 'protocol'. This should be set
39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
45 #include <asm/ioctls.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
56 #include "dlm_internal.h"
61 #define NEEDED_RMEM (4*1024*1024)
62 #define CONN_HASH_SIZE 32
64 /* Number of messages to send before rescheduling */
65 #define MAX_SEND_MSG_COUNT 25
73 static void cbuf_add(struct cbuf *cb, int n)
78 static int cbuf_data(struct cbuf *cb)
80 return ((cb->base + cb->len) & cb->mask);
83 static void cbuf_init(struct cbuf *cb, int size)
85 cb->base = cb->len = 0;
89 static void cbuf_eat(struct cbuf *cb, int n)
96 static bool cbuf_empty(struct cbuf *cb)
102 struct socket *sock; /* NULL if not connected */
103 uint32_t nodeid; /* So we know who we are in the list */
104 struct mutex sock_mutex;
106 #define CF_READ_PENDING 1
107 #define CF_WRITE_PENDING 2
108 #define CF_INIT_PENDING 4
109 #define CF_IS_OTHERCON 5
111 #define CF_APP_LIMITED 7
113 struct list_head writequeue; /* List of outgoing writequeue_entries */
114 spinlock_t writequeue_lock;
115 int (*rx_action) (struct connection *); /* What to do when active */
116 void (*connect_action) (struct connection *); /* What to do to connect */
117 struct page *rx_page;
120 #define MAX_CONNECT_RETRIES 3
121 struct hlist_node list;
122 struct connection *othercon;
123 struct work_struct rwork; /* Receive workqueue */
124 struct work_struct swork; /* Send workqueue */
126 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
128 /* An entry waiting to be sent */
129 struct writequeue_entry {
130 struct list_head list;
136 struct connection *con;
139 struct dlm_node_addr {
140 struct list_head list;
144 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
147 static struct listen_sock_callbacks {
148 void (*sk_error_report)(struct sock *);
149 void (*sk_data_ready)(struct sock *);
150 void (*sk_state_change)(struct sock *);
151 void (*sk_write_space)(struct sock *);
154 static LIST_HEAD(dlm_node_addrs);
155 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
157 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
158 static int dlm_local_count;
159 static int dlm_allow_conn;
162 static struct workqueue_struct *recv_workqueue;
163 static struct workqueue_struct *send_workqueue;
165 static struct hlist_head connection_hash[CONN_HASH_SIZE];
166 static DEFINE_MUTEX(connections_lock);
167 static struct kmem_cache *con_cache;
169 static void process_recv_sockets(struct work_struct *work);
170 static void process_send_sockets(struct work_struct *work);
173 /* This is deliberately very simple because most clusters have simple
174 sequential nodeids, so we should be able to go straight to a connection
175 struct in the array */
176 static inline int nodeid_hash(int nodeid)
178 return nodeid & (CONN_HASH_SIZE-1);
181 static struct connection *__find_con(int nodeid)
184 struct connection *con;
186 r = nodeid_hash(nodeid);
188 hlist_for_each_entry(con, &connection_hash[r], list) {
189 if (con->nodeid == nodeid)
196 * If 'allocation' is zero then we don't attempt to create a new
197 * connection structure for this node.
199 static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
201 struct connection *con = NULL;
204 con = __find_con(nodeid);
208 con = kmem_cache_zalloc(con_cache, alloc);
212 r = nodeid_hash(nodeid);
213 hlist_add_head(&con->list, &connection_hash[r]);
215 con->nodeid = nodeid;
216 mutex_init(&con->sock_mutex);
217 INIT_LIST_HEAD(&con->writequeue);
218 spin_lock_init(&con->writequeue_lock);
219 INIT_WORK(&con->swork, process_send_sockets);
220 INIT_WORK(&con->rwork, process_recv_sockets);
222 /* Setup action pointers for child sockets */
224 struct connection *zerocon = __find_con(0);
226 con->connect_action = zerocon->connect_action;
228 con->rx_action = zerocon->rx_action;
234 /* Loop round all connections */
235 static void foreach_conn(void (*conn_func)(struct connection *c))
238 struct hlist_node *n;
239 struct connection *con;
241 for (i = 0; i < CONN_HASH_SIZE; i++) {
242 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
247 static struct connection *nodeid2con(int nodeid, gfp_t allocation)
249 struct connection *con;
251 mutex_lock(&connections_lock);
252 con = __nodeid2con(nodeid, allocation);
253 mutex_unlock(&connections_lock);
258 static struct dlm_node_addr *find_node_addr(int nodeid)
260 struct dlm_node_addr *na;
262 list_for_each_entry(na, &dlm_node_addrs, list) {
263 if (na->nodeid == nodeid)
269 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
271 switch (x->ss_family) {
273 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
274 struct sockaddr_in *siny = (struct sockaddr_in *)y;
275 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
277 if (sinx->sin_port != siny->sin_port)
282 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
283 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
284 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
286 if (sinx->sin6_port != siny->sin6_port)
296 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
297 struct sockaddr *sa_out, bool try_new_addr)
299 struct sockaddr_storage sas;
300 struct dlm_node_addr *na;
302 if (!dlm_local_count)
305 spin_lock(&dlm_node_addrs_spin);
306 na = find_node_addr(nodeid);
307 if (na && na->addr_count) {
308 memcpy(&sas, na->addr[na->curr_addr_index],
309 sizeof(struct sockaddr_storage));
312 na->curr_addr_index++;
313 if (na->curr_addr_index == na->addr_count)
314 na->curr_addr_index = 0;
317 spin_unlock(&dlm_node_addrs_spin);
326 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
331 if (dlm_local_addr[0]->ss_family == AF_INET) {
332 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
333 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
334 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
336 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
337 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
338 ret6->sin6_addr = in6->sin6_addr;
344 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
346 struct dlm_node_addr *na;
350 spin_lock(&dlm_node_addrs_spin);
351 list_for_each_entry(na, &dlm_node_addrs, list) {
355 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
356 if (addr_compare(na->addr[addr_i], addr)) {
357 *nodeid = na->nodeid;
364 spin_unlock(&dlm_node_addrs_spin);
368 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
370 struct sockaddr_storage *new_addr;
371 struct dlm_node_addr *new_node, *na;
373 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
377 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
383 memcpy(new_addr, addr, len);
385 spin_lock(&dlm_node_addrs_spin);
386 na = find_node_addr(nodeid);
388 new_node->nodeid = nodeid;
389 new_node->addr[0] = new_addr;
390 new_node->addr_count = 1;
391 list_add(&new_node->list, &dlm_node_addrs);
392 spin_unlock(&dlm_node_addrs_spin);
396 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
397 spin_unlock(&dlm_node_addrs_spin);
403 na->addr[na->addr_count++] = new_addr;
404 spin_unlock(&dlm_node_addrs_spin);
409 /* Data available on socket or listen socket received a connect */
410 static void lowcomms_data_ready(struct sock *sk)
412 struct connection *con;
414 read_lock_bh(&sk->sk_callback_lock);
416 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
417 queue_work(recv_workqueue, &con->rwork);
418 read_unlock_bh(&sk->sk_callback_lock);
421 static void lowcomms_write_space(struct sock *sk)
423 struct connection *con;
425 read_lock_bh(&sk->sk_callback_lock);
430 clear_bit(SOCK_NOSPACE, &con->sock->flags);
432 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
433 con->sock->sk->sk_write_pending--;
434 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
437 queue_work(send_workqueue, &con->swork);
439 read_unlock_bh(&sk->sk_callback_lock);
442 static inline void lowcomms_connect_sock(struct connection *con)
444 if (test_bit(CF_CLOSE, &con->flags))
446 queue_work(send_workqueue, &con->swork);
450 static void lowcomms_state_change(struct sock *sk)
452 /* SCTP layer is not calling sk_data_ready when the connection
453 * is done, so we catch the signal through here. Also, it
454 * doesn't switch socket state when entering shutdown, so we
455 * skip the write in that case.
457 if (sk->sk_shutdown) {
458 if (sk->sk_shutdown == RCV_SHUTDOWN)
459 lowcomms_data_ready(sk);
460 } else if (sk->sk_state == TCP_ESTABLISHED) {
461 lowcomms_write_space(sk);
465 int dlm_lowcomms_connect_node(int nodeid)
467 struct connection *con;
469 if (nodeid == dlm_our_nodeid())
472 con = nodeid2con(nodeid, GFP_NOFS);
475 lowcomms_connect_sock(con);
479 static void lowcomms_error_report(struct sock *sk)
481 struct connection *con;
482 struct sockaddr_storage saddr;
483 void (*orig_report)(struct sock *) = NULL;
485 read_lock_bh(&sk->sk_callback_lock);
490 orig_report = listen_sock.sk_error_report;
491 if (con->sock == NULL ||
492 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) {
493 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
494 "sending to node %d, port %d, "
495 "sk_err=%d/%d\n", dlm_our_nodeid(),
496 con->nodeid, dlm_config.ci_tcp_port,
497 sk->sk_err, sk->sk_err_soft);
498 } else if (saddr.ss_family == AF_INET) {
499 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
501 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
502 "sending to node %d at %pI4, port %d, "
503 "sk_err=%d/%d\n", dlm_our_nodeid(),
504 con->nodeid, &sin4->sin_addr.s_addr,
505 dlm_config.ci_tcp_port, sk->sk_err,
508 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
510 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
511 "sending to node %d at %u.%u.%u.%u, "
512 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
513 con->nodeid, sin6->sin6_addr.s6_addr32[0],
514 sin6->sin6_addr.s6_addr32[1],
515 sin6->sin6_addr.s6_addr32[2],
516 sin6->sin6_addr.s6_addr32[3],
517 dlm_config.ci_tcp_port, sk->sk_err,
521 read_unlock_bh(&sk->sk_callback_lock);
526 /* Note: sk_callback_lock must be locked before calling this function. */
527 static void save_listen_callbacks(struct socket *sock)
529 struct sock *sk = sock->sk;
531 listen_sock.sk_data_ready = sk->sk_data_ready;
532 listen_sock.sk_state_change = sk->sk_state_change;
533 listen_sock.sk_write_space = sk->sk_write_space;
534 listen_sock.sk_error_report = sk->sk_error_report;
537 static void restore_callbacks(struct socket *sock)
539 struct sock *sk = sock->sk;
541 write_lock_bh(&sk->sk_callback_lock);
542 sk->sk_user_data = NULL;
543 sk->sk_data_ready = listen_sock.sk_data_ready;
544 sk->sk_state_change = listen_sock.sk_state_change;
545 sk->sk_write_space = listen_sock.sk_write_space;
546 sk->sk_error_report = listen_sock.sk_error_report;
547 write_unlock_bh(&sk->sk_callback_lock);
550 /* Make a socket active */
551 static void add_sock(struct socket *sock, struct connection *con)
553 struct sock *sk = sock->sk;
555 write_lock_bh(&sk->sk_callback_lock);
558 sk->sk_user_data = con;
559 /* Install a data_ready callback */
560 sk->sk_data_ready = lowcomms_data_ready;
561 sk->sk_write_space = lowcomms_write_space;
562 sk->sk_state_change = lowcomms_state_change;
563 sk->sk_allocation = GFP_NOFS;
564 sk->sk_error_report = lowcomms_error_report;
565 write_unlock_bh(&sk->sk_callback_lock);
568 /* Add the port number to an IPv6 or 4 sockaddr and return the address
570 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
573 saddr->ss_family = dlm_local_addr[0]->ss_family;
574 if (saddr->ss_family == AF_INET) {
575 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
576 in4_addr->sin_port = cpu_to_be16(port);
577 *addr_len = sizeof(struct sockaddr_in);
578 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
580 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
581 in6_addr->sin6_port = cpu_to_be16(port);
582 *addr_len = sizeof(struct sockaddr_in6);
584 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
587 /* Close a remote connection and tidy up */
588 static void close_connection(struct connection *con, bool and_other,
591 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
593 if (tx && !closing && cancel_work_sync(&con->swork)) {
594 log_print("canceled swork for node %d", con->nodeid);
595 clear_bit(CF_WRITE_PENDING, &con->flags);
597 if (rx && !closing && cancel_work_sync(&con->rwork)) {
598 log_print("canceled rwork for node %d", con->nodeid);
599 clear_bit(CF_READ_PENDING, &con->flags);
602 mutex_lock(&con->sock_mutex);
604 restore_callbacks(con->sock);
605 sock_release(con->sock);
608 if (con->othercon && and_other) {
609 /* Will only re-enter once. */
610 close_connection(con->othercon, false, true, true);
613 __free_page(con->rx_page);
618 mutex_unlock(&con->sock_mutex);
619 clear_bit(CF_CLOSING, &con->flags);
622 /* Data received from remote end */
623 static int receive_from_sock(struct connection *con)
626 struct msghdr msg = {};
630 int call_again_soon = 0;
633 mutex_lock(&con->sock_mutex);
635 if (con->sock == NULL) {
639 if (con->nodeid == 0) {
644 if (con->rx_page == NULL) {
646 * This doesn't need to be atomic, but I think it should
647 * improve performance if it is.
649 con->rx_page = alloc_page(GFP_ATOMIC);
650 if (con->rx_page == NULL)
652 cbuf_init(&con->cb, PAGE_SIZE);
656 * iov[0] is the bit of the circular buffer between the current end
657 * point (cb.base + cb.len) and the end of the buffer.
659 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
660 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
665 * iov[1] is the bit of the circular buffer between the start of the
666 * buffer and the start of the currently used section (cb.base)
668 if (cbuf_data(&con->cb) >= con->cb.base) {
669 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
670 iov[1].iov_len = con->cb.base;
671 iov[1].iov_base = page_address(con->rx_page);
674 len = iov[0].iov_len + iov[1].iov_len;
675 iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len);
677 r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL);
683 cbuf_add(&con->cb, ret);
684 ret = dlm_process_incoming_buffer(con->nodeid,
685 page_address(con->rx_page),
686 con->cb.base, con->cb.len,
688 if (ret == -EBADMSG) {
689 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
690 page_address(con->rx_page), con->cb.base,
695 cbuf_eat(&con->cb, ret);
697 if (cbuf_empty(&con->cb) && !call_again_soon) {
698 __free_page(con->rx_page);
704 mutex_unlock(&con->sock_mutex);
708 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
709 queue_work(recv_workqueue, &con->rwork);
710 mutex_unlock(&con->sock_mutex);
714 mutex_unlock(&con->sock_mutex);
715 if (ret != -EAGAIN) {
716 close_connection(con, true, true, false);
717 /* Reconnect when there is something to send */
719 /* Don't return success if we really got EOF */
726 /* Listening socket is busy, accept a connection */
727 static int accept_from_sock(struct connection *con)
730 struct sockaddr_storage peeraddr;
731 struct socket *newsock;
734 struct connection *newcon;
735 struct connection *addcon;
737 mutex_lock(&connections_lock);
738 if (!dlm_allow_conn) {
739 mutex_unlock(&connections_lock);
742 mutex_unlock(&connections_lock);
744 mutex_lock_nested(&con->sock_mutex, 0);
747 mutex_unlock(&con->sock_mutex);
751 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
755 /* Get the connected socket's peer */
756 memset(&peeraddr, 0, sizeof(peeraddr));
757 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
759 result = -ECONNABORTED;
763 /* Get the new node's NODEID */
764 make_sockaddr(&peeraddr, 0, &len);
765 if (addr_to_nodeid(&peeraddr, &nodeid)) {
766 unsigned char *b=(unsigned char *)&peeraddr;
767 log_print("connect from non cluster node");
768 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
769 b, sizeof(struct sockaddr_storage));
770 sock_release(newsock);
771 mutex_unlock(&con->sock_mutex);
775 log_print("got connection from %d", nodeid);
777 /* Check to see if we already have a connection to this node. This
778 * could happen if the two nodes initiate a connection at roughly
779 * the same time and the connections cross on the wire.
780 * In this case we store the incoming one in "othercon"
782 newcon = nodeid2con(nodeid, GFP_NOFS);
787 mutex_lock_nested(&newcon->sock_mutex, 1);
789 struct connection *othercon = newcon->othercon;
792 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
794 log_print("failed to allocate incoming socket");
795 mutex_unlock(&newcon->sock_mutex);
799 othercon->nodeid = nodeid;
800 othercon->rx_action = receive_from_sock;
801 mutex_init(&othercon->sock_mutex);
802 INIT_LIST_HEAD(&othercon->writequeue);
803 spin_lock_init(&othercon->writequeue_lock);
804 INIT_WORK(&othercon->swork, process_send_sockets);
805 INIT_WORK(&othercon->rwork, process_recv_sockets);
806 set_bit(CF_IS_OTHERCON, &othercon->flags);
808 mutex_lock_nested(&othercon->sock_mutex, 2);
809 if (!othercon->sock) {
810 newcon->othercon = othercon;
811 add_sock(newsock, othercon);
813 mutex_unlock(&othercon->sock_mutex);
816 printk("Extra connection from node %d attempted\n", nodeid);
818 mutex_unlock(&othercon->sock_mutex);
819 mutex_unlock(&newcon->sock_mutex);
824 newcon->rx_action = receive_from_sock;
825 /* accept copies the sk after we've saved the callbacks, so we
826 don't want to save them a second time or comm errors will
827 result in calling sk_error_report recursively. */
828 add_sock(newsock, newcon);
832 mutex_unlock(&newcon->sock_mutex);
835 * Add it to the active queue in case we got data
836 * between processing the accept adding the socket
837 * to the read_sockets list
839 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
840 queue_work(recv_workqueue, &addcon->rwork);
841 mutex_unlock(&con->sock_mutex);
846 mutex_unlock(&con->sock_mutex);
848 sock_release(newsock);
850 if (result != -EAGAIN)
851 log_print("error accepting connection from node: %d", result);
855 static void free_entry(struct writequeue_entry *e)
857 __free_page(e->page);
862 * writequeue_entry_complete - try to delete and free write queue entry
863 * @e: write queue entry to try to delete
864 * @completed: bytes completed
866 * writequeue_lock must be held.
868 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
870 e->offset += completed;
873 if (e->len == 0 && e->users == 0) {
880 * sctp_bind_addrs - bind a SCTP socket to all our addresses
882 static int sctp_bind_addrs(struct connection *con, uint16_t port)
884 struct sockaddr_storage localaddr;
885 struct sockaddr *addr = (struct sockaddr *)&localaddr;
886 int i, addr_len, result = 0;
888 for (i = 0; i < dlm_local_count; i++) {
889 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
890 make_sockaddr(&localaddr, port, &addr_len);
893 result = kernel_bind(con->sock, addr, addr_len);
895 result = sock_bind_add(con->sock->sk, addr, addr_len);
898 log_print("Can't bind to %d addr number %d, %d.\n",
899 port, i + 1, result);
906 /* Initiate an SCTP association.
907 This is a special case of send_to_sock() in that we don't yet have a
908 peeled-off socket for this association, so we use the listening socket
909 and add the primary IP address of the remote node.
911 static void sctp_connect_to_sock(struct connection *con)
913 struct sockaddr_storage daddr;
918 if (con->nodeid == 0) {
919 log_print("attempt to connect sock 0 foiled");
923 mutex_lock(&con->sock_mutex);
925 /* Some odd races can cause double-connects, ignore them */
926 if (con->retries++ > MAX_CONNECT_RETRIES)
930 log_print("node %d already connected.", con->nodeid);
934 memset(&daddr, 0, sizeof(daddr));
935 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
937 log_print("no address for nodeid %d", con->nodeid);
941 /* Create a socket to communicate with */
942 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
943 SOCK_STREAM, IPPROTO_SCTP, &sock);
947 con->rx_action = receive_from_sock;
948 con->connect_action = sctp_connect_to_sock;
951 /* Bind to all addresses. */
952 if (sctp_bind_addrs(con, 0))
955 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
957 log_print("connecting to %d", con->nodeid);
959 /* Turn off Nagle's algorithm */
960 sctp_sock_set_nodelay(sock->sk);
963 * Make sock->ops->connect() function return in specified time,
964 * since O_NONBLOCK argument in connect() function does not work here,
965 * then, we should restore the default value of this attribute.
967 sock_set_sndtimeo(sock->sk, 5);
968 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
970 sock_set_sndtimeo(sock->sk, 0);
972 if (result == -EINPROGRESS)
983 * Some errors are fatal and this list might need adjusting. For other
984 * errors we try again until the max number of retries is reached.
986 if (result != -EHOSTUNREACH &&
987 result != -ENETUNREACH &&
988 result != -ENETDOWN &&
990 result != -EPROTONOSUPPORT) {
991 log_print("connect %d try %d error %d", con->nodeid,
992 con->retries, result);
993 mutex_unlock(&con->sock_mutex);
995 lowcomms_connect_sock(con);
1000 mutex_unlock(&con->sock_mutex);
1003 /* Connect a new socket to its peer */
1004 static void tcp_connect_to_sock(struct connection *con)
1006 struct sockaddr_storage saddr, src_addr;
1008 struct socket *sock = NULL;
1011 if (con->nodeid == 0) {
1012 log_print("attempt to connect sock 0 foiled");
1016 mutex_lock(&con->sock_mutex);
1017 if (con->retries++ > MAX_CONNECT_RETRIES)
1020 /* Some odd races can cause double-connects, ignore them */
1024 /* Create a socket to communicate with */
1025 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1026 SOCK_STREAM, IPPROTO_TCP, &sock);
1030 memset(&saddr, 0, sizeof(saddr));
1031 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1033 log_print("no address for nodeid %d", con->nodeid);
1037 con->rx_action = receive_from_sock;
1038 con->connect_action = tcp_connect_to_sock;
1039 add_sock(sock, con);
1041 /* Bind to our cluster-known address connecting to avoid
1043 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1044 make_sockaddr(&src_addr, 0, &addr_len);
1045 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1048 log_print("could not bind for connect: %d", result);
1049 /* This *may* not indicate a critical error */
1052 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1054 log_print("connecting to %d", con->nodeid);
1056 /* Turn off Nagle's algorithm */
1057 tcp_sock_set_nodelay(sock->sk);
1059 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1061 if (result == -EINPROGRESS)
1068 sock_release(con->sock);
1074 * Some errors are fatal and this list might need adjusting. For other
1075 * errors we try again until the max number of retries is reached.
1077 if (result != -EHOSTUNREACH &&
1078 result != -ENETUNREACH &&
1079 result != -ENETDOWN &&
1080 result != -EINVAL &&
1081 result != -EPROTONOSUPPORT) {
1082 log_print("connect %d try %d error %d", con->nodeid,
1083 con->retries, result);
1084 mutex_unlock(&con->sock_mutex);
1086 lowcomms_connect_sock(con);
1090 mutex_unlock(&con->sock_mutex);
1094 static struct socket *tcp_create_listen_sock(struct connection *con,
1095 struct sockaddr_storage *saddr)
1097 struct socket *sock = NULL;
1101 if (dlm_local_addr[0]->ss_family == AF_INET)
1102 addr_len = sizeof(struct sockaddr_in);
1104 addr_len = sizeof(struct sockaddr_in6);
1106 /* Create a socket to communicate with */
1107 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1108 SOCK_STREAM, IPPROTO_TCP, &sock);
1110 log_print("Can't create listening comms socket");
1114 /* Turn off Nagle's algorithm */
1115 tcp_sock_set_nodelay(sock->sk);
1117 sock_set_reuseaddr(sock->sk);
1119 write_lock_bh(&sock->sk->sk_callback_lock);
1120 sock->sk->sk_user_data = con;
1121 save_listen_callbacks(sock);
1122 con->rx_action = accept_from_sock;
1123 con->connect_action = tcp_connect_to_sock;
1124 write_unlock_bh(&sock->sk->sk_callback_lock);
1126 /* Bind to our port */
1127 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1128 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1130 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1136 sock_set_keepalive(sock->sk);
1138 result = sock->ops->listen(sock, 5);
1140 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1150 /* Get local addresses */
1151 static void init_local(void)
1153 struct sockaddr_storage sas, *addr;
1156 dlm_local_count = 0;
1157 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1158 if (dlm_our_addr(&sas, i))
1161 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1164 dlm_local_addr[dlm_local_count++] = addr;
1168 /* Initialise SCTP socket and bind to all interfaces */
1169 static int sctp_listen_for_all(void)
1171 struct socket *sock = NULL;
1172 int result = -EINVAL;
1173 struct connection *con = nodeid2con(0, GFP_NOFS);
1178 log_print("Using SCTP for communications");
1180 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1181 SOCK_STREAM, IPPROTO_SCTP, &sock);
1183 log_print("Can't create comms socket, check SCTP is loaded");
1187 sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1188 sctp_sock_set_nodelay(sock->sk);
1190 write_lock_bh(&sock->sk->sk_callback_lock);
1191 /* Init con struct */
1192 sock->sk->sk_user_data = con;
1193 save_listen_callbacks(sock);
1195 con->sock->sk->sk_data_ready = lowcomms_data_ready;
1196 con->rx_action = accept_from_sock;
1197 con->connect_action = sctp_connect_to_sock;
1199 write_unlock_bh(&sock->sk->sk_callback_lock);
1201 /* Bind to all addresses. */
1202 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
1203 goto create_delsock;
1205 result = sock->ops->listen(sock, 5);
1207 log_print("Can't set socket listening");
1208 goto create_delsock;
1220 static int tcp_listen_for_all(void)
1222 struct socket *sock = NULL;
1223 struct connection *con = nodeid2con(0, GFP_NOFS);
1224 int result = -EINVAL;
1229 /* We don't support multi-homed hosts */
1230 if (dlm_local_addr[1] != NULL) {
1231 log_print("TCP protocol can't handle multi-homed hosts, "
1236 log_print("Using TCP for communications");
1238 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1240 add_sock(sock, con);
1244 result = -EADDRINUSE;
1252 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1255 struct writequeue_entry *entry;
1257 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1261 entry->page = alloc_page(allocation);
1276 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1278 struct connection *con;
1279 struct writequeue_entry *e;
1282 con = nodeid2con(nodeid, allocation);
1286 spin_lock(&con->writequeue_lock);
1287 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1288 if ((&e->list == &con->writequeue) ||
1289 (PAGE_SIZE - e->end < len)) {
1296 spin_unlock(&con->writequeue_lock);
1300 *ppc = page_address(e->page) + offset;
1304 e = new_writequeue_entry(con, allocation);
1306 spin_lock(&con->writequeue_lock);
1310 list_add_tail(&e->list, &con->writequeue);
1311 spin_unlock(&con->writequeue_lock);
1317 void dlm_lowcomms_commit_buffer(void *mh)
1319 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1320 struct connection *con = e->con;
1323 spin_lock(&con->writequeue_lock);
1327 e->len = e->end - e->offset;
1328 spin_unlock(&con->writequeue_lock);
1330 queue_work(send_workqueue, &con->swork);
1334 spin_unlock(&con->writequeue_lock);
1338 /* Send a message */
1339 static void send_to_sock(struct connection *con)
1342 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1343 struct writequeue_entry *e;
1347 mutex_lock(&con->sock_mutex);
1348 if (con->sock == NULL)
1351 spin_lock(&con->writequeue_lock);
1353 e = list_entry(con->writequeue.next, struct writequeue_entry,
1355 if ((struct list_head *) e == &con->writequeue)
1360 BUG_ON(len == 0 && e->users == 0);
1361 spin_unlock(&con->writequeue_lock);
1365 ret = kernel_sendpage(con->sock, e->page, offset, len,
1367 if (ret == -EAGAIN || ret == 0) {
1368 if (ret == -EAGAIN &&
1369 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1370 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1371 /* Notify TCP that we're limited by the
1372 * application window size.
1374 set_bit(SOCK_NOSPACE, &con->sock->flags);
1375 con->sock->sk->sk_write_pending++;
1383 /* Don't starve people filling buffers */
1384 if (++count >= MAX_SEND_MSG_COUNT) {
1389 spin_lock(&con->writequeue_lock);
1390 writequeue_entry_complete(e, ret);
1392 spin_unlock(&con->writequeue_lock);
1394 mutex_unlock(&con->sock_mutex);
1398 mutex_unlock(&con->sock_mutex);
1399 close_connection(con, true, false, true);
1400 /* Requeue the send work. When the work daemon runs again, it will try
1401 a new connection, then call this function again. */
1402 queue_work(send_workqueue, &con->swork);
1406 mutex_unlock(&con->sock_mutex);
1407 queue_work(send_workqueue, &con->swork);
1411 static void clean_one_writequeue(struct connection *con)
1413 struct writequeue_entry *e, *safe;
1415 spin_lock(&con->writequeue_lock);
1416 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1420 spin_unlock(&con->writequeue_lock);
1423 /* Called from recovery when it knows that a node has
1425 int dlm_lowcomms_close(int nodeid)
1427 struct connection *con;
1428 struct dlm_node_addr *na;
1430 log_print("closing connection to node %d", nodeid);
1431 con = nodeid2con(nodeid, 0);
1433 set_bit(CF_CLOSE, &con->flags);
1434 close_connection(con, true, true, true);
1435 clean_one_writequeue(con);
1438 spin_lock(&dlm_node_addrs_spin);
1439 na = find_node_addr(nodeid);
1441 list_del(&na->list);
1442 while (na->addr_count--)
1443 kfree(na->addr[na->addr_count]);
1446 spin_unlock(&dlm_node_addrs_spin);
1451 /* Receive workqueue function */
1452 static void process_recv_sockets(struct work_struct *work)
1454 struct connection *con = container_of(work, struct connection, rwork);
1457 clear_bit(CF_READ_PENDING, &con->flags);
1459 err = con->rx_action(con);
1463 /* Send workqueue function */
1464 static void process_send_sockets(struct work_struct *work)
1466 struct connection *con = container_of(work, struct connection, swork);
1468 clear_bit(CF_WRITE_PENDING, &con->flags);
1469 if (con->sock == NULL) /* not mutex protected so check it inside too */
1470 con->connect_action(con);
1471 if (!list_empty(&con->writequeue))
1476 /* Discard all entries on the write queues */
1477 static void clean_writequeues(void)
1479 foreach_conn(clean_one_writequeue);
1482 static void work_stop(void)
1485 destroy_workqueue(recv_workqueue);
1487 destroy_workqueue(send_workqueue);
1490 static int work_start(void)
1492 recv_workqueue = alloc_workqueue("dlm_recv",
1493 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1494 if (!recv_workqueue) {
1495 log_print("can't start dlm_recv");
1499 send_workqueue = alloc_workqueue("dlm_send",
1500 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1501 if (!send_workqueue) {
1502 log_print("can't start dlm_send");
1503 destroy_workqueue(recv_workqueue);
1510 static void _stop_conn(struct connection *con, bool and_other)
1512 mutex_lock(&con->sock_mutex);
1513 set_bit(CF_CLOSE, &con->flags);
1514 set_bit(CF_READ_PENDING, &con->flags);
1515 set_bit(CF_WRITE_PENDING, &con->flags);
1516 if (con->sock && con->sock->sk) {
1517 write_lock_bh(&con->sock->sk->sk_callback_lock);
1518 con->sock->sk->sk_user_data = NULL;
1519 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1521 if (con->othercon && and_other)
1522 _stop_conn(con->othercon, false);
1523 mutex_unlock(&con->sock_mutex);
1526 static void stop_conn(struct connection *con)
1528 _stop_conn(con, true);
1531 static void free_conn(struct connection *con)
1533 close_connection(con, true, true, true);
1535 kmem_cache_free(con_cache, con->othercon);
1536 hlist_del(&con->list);
1537 kmem_cache_free(con_cache, con);
1540 static void work_flush(void)
1544 struct hlist_node *n;
1545 struct connection *con;
1548 flush_workqueue(recv_workqueue);
1550 flush_workqueue(send_workqueue);
1553 foreach_conn(stop_conn);
1555 flush_workqueue(recv_workqueue);
1557 flush_workqueue(send_workqueue);
1558 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1559 hlist_for_each_entry_safe(con, n,
1560 &connection_hash[i], list) {
1561 ok &= test_bit(CF_READ_PENDING, &con->flags);
1562 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1563 if (con->othercon) {
1564 ok &= test_bit(CF_READ_PENDING,
1565 &con->othercon->flags);
1566 ok &= test_bit(CF_WRITE_PENDING,
1567 &con->othercon->flags);
1574 void dlm_lowcomms_stop(void)
1576 /* Set all the flags to prevent any
1579 mutex_lock(&connections_lock);
1581 mutex_unlock(&connections_lock);
1583 clean_writequeues();
1584 foreach_conn(free_conn);
1587 kmem_cache_destroy(con_cache);
1590 int dlm_lowcomms_start(void)
1592 int error = -EINVAL;
1593 struct connection *con;
1596 for (i = 0; i < CONN_HASH_SIZE; i++)
1597 INIT_HLIST_HEAD(&connection_hash[i]);
1600 if (!dlm_local_count) {
1602 log_print("no local IP address has been set");
1607 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1608 __alignof__(struct connection), 0,
1613 error = work_start();
1619 /* Start listening */
1620 if (dlm_config.ci_protocol == 0)
1621 error = tcp_listen_for_all();
1623 error = sctp_listen_for_all();
1631 con = nodeid2con(0,0);
1633 close_connection(con, false, true, true);
1634 kmem_cache_free(con_cache, con);
1637 kmem_cache_destroy(con_cache);
1642 void dlm_lowcomms_exit(void)
1644 struct dlm_node_addr *na, *safe;
1646 spin_lock(&dlm_node_addrs_spin);
1647 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1648 list_del(&na->list);
1649 while (na->addr_count--)
1650 kfree(na->addr[na->addr_count]);
1653 spin_unlock(&dlm_node_addrs_spin);