1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
17 * This is the "low-level" comms layer.
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is its
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
39 * lowcomms will choose to use either TCP or SCTP as its transport layer
40 * depending on the configuration variable 'protocol'. This should be set
41 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
42 * cluster-wide mechanism as it must be the same on all nodes of the cluster
43 * for the DLM to function.
47 #include <asm/ioctls.h>
50 #include <linux/pagemap.h>
51 #include <linux/file.h>
52 #include <linux/mutex.h>
53 #include <linux/sctp.h>
54 #include <linux/slab.h>
55 #include <net/sctp/sctp.h>
58 #include "dlm_internal.h"
63 #define NEEDED_RMEM (4*1024*1024)
64 #define CONN_HASH_SIZE 32
66 /* Number of messages to send before rescheduling */
67 #define MAX_SEND_MSG_COUNT 25
75 static void cbuf_add(struct cbuf *cb, int n)
80 static int cbuf_data(struct cbuf *cb)
82 return ((cb->base + cb->len) & cb->mask);
85 static void cbuf_init(struct cbuf *cb, int size)
87 cb->base = cb->len = 0;
91 static void cbuf_eat(struct cbuf *cb, int n)
98 static bool cbuf_empty(struct cbuf *cb)
104 struct socket *sock; /* NULL if not connected */
105 uint32_t nodeid; /* So we know who we are in the list */
106 struct mutex sock_mutex;
108 #define CF_READ_PENDING 1
109 #define CF_WRITE_PENDING 2
110 #define CF_INIT_PENDING 4
111 #define CF_IS_OTHERCON 5
113 #define CF_APP_LIMITED 7
115 struct list_head writequeue; /* List of outgoing writequeue_entries */
116 spinlock_t writequeue_lock;
117 int (*rx_action) (struct connection *); /* What to do when active */
118 void (*connect_action) (struct connection *); /* What to do to connect */
119 struct page *rx_page;
122 #define MAX_CONNECT_RETRIES 3
123 struct hlist_node list;
124 struct connection *othercon;
125 struct work_struct rwork; /* Receive workqueue */
126 struct work_struct swork; /* Send workqueue */
128 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
130 /* An entry waiting to be sent */
131 struct writequeue_entry {
132 struct list_head list;
138 struct connection *con;
141 struct dlm_node_addr {
142 struct list_head list;
146 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
149 static struct listen_sock_callbacks {
150 void (*sk_error_report)(struct sock *);
151 void (*sk_data_ready)(struct sock *);
152 void (*sk_state_change)(struct sock *);
153 void (*sk_write_space)(struct sock *);
156 static LIST_HEAD(dlm_node_addrs);
157 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
159 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
160 static int dlm_local_count;
161 static int dlm_allow_conn;
164 static struct workqueue_struct *recv_workqueue;
165 static struct workqueue_struct *send_workqueue;
167 static struct hlist_head connection_hash[CONN_HASH_SIZE];
168 static DEFINE_MUTEX(connections_lock);
169 static struct kmem_cache *con_cache;
171 static void process_recv_sockets(struct work_struct *work);
172 static void process_send_sockets(struct work_struct *work);
175 /* This is deliberately very simple because most clusters have simple
176 sequential nodeids, so we should be able to go straight to a connection
177 struct in the array */
178 static inline int nodeid_hash(int nodeid)
180 return nodeid & (CONN_HASH_SIZE-1);
183 static struct connection *__find_con(int nodeid)
186 struct connection *con;
188 r = nodeid_hash(nodeid);
190 hlist_for_each_entry(con, &connection_hash[r], list) {
191 if (con->nodeid == nodeid)
198 * If 'allocation' is zero then we don't attempt to create a new
199 * connection structure for this node.
201 static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
203 struct connection *con = NULL;
206 con = __find_con(nodeid);
210 con = kmem_cache_zalloc(con_cache, alloc);
214 r = nodeid_hash(nodeid);
215 hlist_add_head(&con->list, &connection_hash[r]);
217 con->nodeid = nodeid;
218 mutex_init(&con->sock_mutex);
219 INIT_LIST_HEAD(&con->writequeue);
220 spin_lock_init(&con->writequeue_lock);
221 INIT_WORK(&con->swork, process_send_sockets);
222 INIT_WORK(&con->rwork, process_recv_sockets);
224 /* Setup action pointers for child sockets */
226 struct connection *zerocon = __find_con(0);
228 con->connect_action = zerocon->connect_action;
230 con->rx_action = zerocon->rx_action;
236 /* Loop round all connections */
237 static void foreach_conn(void (*conn_func)(struct connection *c))
240 struct hlist_node *n;
241 struct connection *con;
243 for (i = 0; i < CONN_HASH_SIZE; i++) {
244 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
249 static struct connection *nodeid2con(int nodeid, gfp_t allocation)
251 struct connection *con;
253 mutex_lock(&connections_lock);
254 con = __nodeid2con(nodeid, allocation);
255 mutex_unlock(&connections_lock);
260 static struct dlm_node_addr *find_node_addr(int nodeid)
262 struct dlm_node_addr *na;
264 list_for_each_entry(na, &dlm_node_addrs, list) {
265 if (na->nodeid == nodeid)
271 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
273 switch (x->ss_family) {
275 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
276 struct sockaddr_in *siny = (struct sockaddr_in *)y;
277 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
279 if (sinx->sin_port != siny->sin_port)
284 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
285 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
286 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
288 if (sinx->sin6_port != siny->sin6_port)
298 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
299 struct sockaddr *sa_out, bool try_new_addr)
301 struct sockaddr_storage sas;
302 struct dlm_node_addr *na;
304 if (!dlm_local_count)
307 spin_lock(&dlm_node_addrs_spin);
308 na = find_node_addr(nodeid);
309 if (na && na->addr_count) {
310 memcpy(&sas, na->addr[na->curr_addr_index],
311 sizeof(struct sockaddr_storage));
314 na->curr_addr_index++;
315 if (na->curr_addr_index == na->addr_count)
316 na->curr_addr_index = 0;
319 spin_unlock(&dlm_node_addrs_spin);
328 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
333 if (dlm_local_addr[0]->ss_family == AF_INET) {
334 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
335 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
336 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
338 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
339 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
340 ret6->sin6_addr = in6->sin6_addr;
346 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
348 struct dlm_node_addr *na;
352 spin_lock(&dlm_node_addrs_spin);
353 list_for_each_entry(na, &dlm_node_addrs, list) {
357 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
358 if (addr_compare(na->addr[addr_i], addr)) {
359 *nodeid = na->nodeid;
366 spin_unlock(&dlm_node_addrs_spin);
370 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
372 struct sockaddr_storage *new_addr;
373 struct dlm_node_addr *new_node, *na;
375 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
379 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
385 memcpy(new_addr, addr, len);
387 spin_lock(&dlm_node_addrs_spin);
388 na = find_node_addr(nodeid);
390 new_node->nodeid = nodeid;
391 new_node->addr[0] = new_addr;
392 new_node->addr_count = 1;
393 list_add(&new_node->list, &dlm_node_addrs);
394 spin_unlock(&dlm_node_addrs_spin);
398 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
399 spin_unlock(&dlm_node_addrs_spin);
405 na->addr[na->addr_count++] = new_addr;
406 spin_unlock(&dlm_node_addrs_spin);
411 /* Data available on socket or listen socket received a connect */
412 static void lowcomms_data_ready(struct sock *sk)
414 struct connection *con;
416 read_lock_bh(&sk->sk_callback_lock);
418 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
419 queue_work(recv_workqueue, &con->rwork);
420 read_unlock_bh(&sk->sk_callback_lock);
423 static void lowcomms_write_space(struct sock *sk)
425 struct connection *con;
427 read_lock_bh(&sk->sk_callback_lock);
432 clear_bit(SOCK_NOSPACE, &con->sock->flags);
434 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
435 con->sock->sk->sk_write_pending--;
436 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
439 queue_work(send_workqueue, &con->swork);
441 read_unlock_bh(&sk->sk_callback_lock);
444 static inline void lowcomms_connect_sock(struct connection *con)
446 if (test_bit(CF_CLOSE, &con->flags))
448 queue_work(send_workqueue, &con->swork);
452 static void lowcomms_state_change(struct sock *sk)
454 /* SCTP layer is not calling sk_data_ready when the connection
455 * is done, so we catch the signal through here. Also, it
456 * doesn't switch socket state when entering shutdown, so we
457 * skip the write in that case.
459 if (sk->sk_shutdown) {
460 if (sk->sk_shutdown == RCV_SHUTDOWN)
461 lowcomms_data_ready(sk);
462 } else if (sk->sk_state == TCP_ESTABLISHED) {
463 lowcomms_write_space(sk);
467 int dlm_lowcomms_connect_node(int nodeid)
469 struct connection *con;
471 if (nodeid == dlm_our_nodeid())
474 con = nodeid2con(nodeid, GFP_NOFS);
477 lowcomms_connect_sock(con);
481 static void lowcomms_error_report(struct sock *sk)
483 struct connection *con;
484 struct sockaddr_storage saddr;
486 void (*orig_report)(struct sock *) = NULL;
488 read_lock_bh(&sk->sk_callback_lock);
493 orig_report = listen_sock.sk_error_report;
494 if (con->sock == NULL ||
495 kernel_getpeername(con->sock, (struct sockaddr *)&saddr, &buflen)) {
496 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
497 "sending to node %d, port %d, "
498 "sk_err=%d/%d\n", dlm_our_nodeid(),
499 con->nodeid, dlm_config.ci_tcp_port,
500 sk->sk_err, sk->sk_err_soft);
501 } else if (saddr.ss_family == AF_INET) {
502 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
504 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
505 "sending to node %d at %pI4, port %d, "
506 "sk_err=%d/%d\n", dlm_our_nodeid(),
507 con->nodeid, &sin4->sin_addr.s_addr,
508 dlm_config.ci_tcp_port, sk->sk_err,
511 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
513 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
514 "sending to node %d at %u.%u.%u.%u, "
515 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
516 con->nodeid, sin6->sin6_addr.s6_addr32[0],
517 sin6->sin6_addr.s6_addr32[1],
518 sin6->sin6_addr.s6_addr32[2],
519 sin6->sin6_addr.s6_addr32[3],
520 dlm_config.ci_tcp_port, sk->sk_err,
524 read_unlock_bh(&sk->sk_callback_lock);
529 /* Note: sk_callback_lock must be locked before calling this function. */
530 static void save_listen_callbacks(struct socket *sock)
532 struct sock *sk = sock->sk;
534 listen_sock.sk_data_ready = sk->sk_data_ready;
535 listen_sock.sk_state_change = sk->sk_state_change;
536 listen_sock.sk_write_space = sk->sk_write_space;
537 listen_sock.sk_error_report = sk->sk_error_report;
540 static void restore_callbacks(struct socket *sock)
542 struct sock *sk = sock->sk;
544 write_lock_bh(&sk->sk_callback_lock);
545 sk->sk_user_data = NULL;
546 sk->sk_data_ready = listen_sock.sk_data_ready;
547 sk->sk_state_change = listen_sock.sk_state_change;
548 sk->sk_write_space = listen_sock.sk_write_space;
549 sk->sk_error_report = listen_sock.sk_error_report;
550 write_unlock_bh(&sk->sk_callback_lock);
553 /* Make a socket active */
554 static void add_sock(struct socket *sock, struct connection *con)
556 struct sock *sk = sock->sk;
558 write_lock_bh(&sk->sk_callback_lock);
561 sk->sk_user_data = con;
562 /* Install a data_ready callback */
563 sk->sk_data_ready = lowcomms_data_ready;
564 sk->sk_write_space = lowcomms_write_space;
565 sk->sk_state_change = lowcomms_state_change;
566 sk->sk_allocation = GFP_NOFS;
567 sk->sk_error_report = lowcomms_error_report;
568 write_unlock_bh(&sk->sk_callback_lock);
571 /* Add the port number to an IPv6 or 4 sockaddr and return the address
573 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
576 saddr->ss_family = dlm_local_addr[0]->ss_family;
577 if (saddr->ss_family == AF_INET) {
578 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
579 in4_addr->sin_port = cpu_to_be16(port);
580 *addr_len = sizeof(struct sockaddr_in);
581 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
583 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
584 in6_addr->sin6_port = cpu_to_be16(port);
585 *addr_len = sizeof(struct sockaddr_in6);
587 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
590 /* Close a remote connection and tidy up */
591 static void close_connection(struct connection *con, bool and_other,
594 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
596 if (tx && !closing && cancel_work_sync(&con->swork))
597 log_print("canceled swork for node %d", con->nodeid);
598 if (rx && !closing && cancel_work_sync(&con->rwork))
599 log_print("canceled rwork for node %d", con->nodeid);
601 mutex_lock(&con->sock_mutex);
603 restore_callbacks(con->sock);
604 sock_release(con->sock);
607 if (con->othercon && and_other) {
608 /* Will only re-enter once. */
609 close_connection(con->othercon, false, true, true);
612 __free_page(con->rx_page);
617 mutex_unlock(&con->sock_mutex);
618 clear_bit(CF_CLOSING, &con->flags);
621 /* Data received from remote end */
622 static int receive_from_sock(struct connection *con)
625 struct msghdr msg = {};
629 int call_again_soon = 0;
632 mutex_lock(&con->sock_mutex);
634 if (con->sock == NULL) {
638 if (con->nodeid == 0) {
643 if (con->rx_page == NULL) {
645 * This doesn't need to be atomic, but I think it should
646 * improve performance if it is.
648 con->rx_page = alloc_page(GFP_ATOMIC);
649 if (con->rx_page == NULL)
651 cbuf_init(&con->cb, PAGE_SIZE);
655 * iov[0] is the bit of the circular buffer between the current end
656 * point (cb.base + cb.len) and the end of the buffer.
658 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
659 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
664 * iov[1] is the bit of the circular buffer between the start of the
665 * buffer and the start of the currently used section (cb.base)
667 if (cbuf_data(&con->cb) >= con->cb.base) {
668 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
669 iov[1].iov_len = con->cb.base;
670 iov[1].iov_base = page_address(con->rx_page);
673 len = iov[0].iov_len + iov[1].iov_len;
675 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
676 MSG_DONTWAIT | MSG_NOSIGNAL);
682 cbuf_add(&con->cb, ret);
683 ret = dlm_process_incoming_buffer(con->nodeid,
684 page_address(con->rx_page),
685 con->cb.base, con->cb.len,
687 if (ret == -EBADMSG) {
688 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
689 page_address(con->rx_page), con->cb.base,
694 cbuf_eat(&con->cb, ret);
696 if (cbuf_empty(&con->cb) && !call_again_soon) {
697 __free_page(con->rx_page);
703 mutex_unlock(&con->sock_mutex);
707 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
708 queue_work(recv_workqueue, &con->rwork);
709 mutex_unlock(&con->sock_mutex);
713 mutex_unlock(&con->sock_mutex);
714 if (ret != -EAGAIN) {
715 close_connection(con, true, true, false);
716 /* Reconnect when there is something to send */
718 /* Don't return success if we really got EOF */
725 /* Listening socket is busy, accept a connection */
726 static int tcp_accept_from_sock(struct connection *con)
729 struct sockaddr_storage peeraddr;
730 struct socket *newsock;
733 struct connection *newcon;
734 struct connection *addcon;
736 mutex_lock(&connections_lock);
737 if (!dlm_allow_conn) {
738 mutex_unlock(&connections_lock);
741 mutex_unlock(&connections_lock);
743 mutex_lock_nested(&con->sock_mutex, 0);
746 mutex_unlock(&con->sock_mutex);
750 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
754 /* Get the connected socket's peer */
755 memset(&peeraddr, 0, sizeof(peeraddr));
756 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
758 result = -ECONNABORTED;
762 /* Get the new node's NODEID */
763 make_sockaddr(&peeraddr, 0, &len);
764 if (addr_to_nodeid(&peeraddr, &nodeid)) {
765 unsigned char *b=(unsigned char *)&peeraddr;
766 log_print("connect from non cluster node");
767 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
768 b, sizeof(struct sockaddr_storage));
769 sock_release(newsock);
770 mutex_unlock(&con->sock_mutex);
774 log_print("got connection from %d", nodeid);
776 /* Check to see if we already have a connection to this node. This
777 * could happen if the two nodes initiate a connection at roughly
778 * the same time and the connections cross on the wire.
779 * In this case we store the incoming one in "othercon"
781 newcon = nodeid2con(nodeid, GFP_NOFS);
786 mutex_lock_nested(&newcon->sock_mutex, 1);
788 struct connection *othercon = newcon->othercon;
791 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
793 log_print("failed to allocate incoming socket");
794 mutex_unlock(&newcon->sock_mutex);
798 othercon->nodeid = nodeid;
799 othercon->rx_action = receive_from_sock;
800 mutex_init(&othercon->sock_mutex);
801 INIT_WORK(&othercon->swork, process_send_sockets);
802 INIT_WORK(&othercon->rwork, process_recv_sockets);
803 set_bit(CF_IS_OTHERCON, &othercon->flags);
805 mutex_lock_nested(&othercon->sock_mutex, 2);
806 if (!othercon->sock) {
807 newcon->othercon = othercon;
808 add_sock(newsock, othercon);
810 mutex_unlock(&othercon->sock_mutex);
813 printk("Extra connection from node %d attempted\n", nodeid);
815 mutex_unlock(&othercon->sock_mutex);
816 mutex_unlock(&newcon->sock_mutex);
821 newcon->rx_action = receive_from_sock;
822 /* accept copies the sk after we've saved the callbacks, so we
823 don't want to save them a second time or comm errors will
824 result in calling sk_error_report recursively. */
825 add_sock(newsock, newcon);
829 mutex_unlock(&newcon->sock_mutex);
832 * Add it to the active queue in case we got data
833 * between processing the accept adding the socket
834 * to the read_sockets list
836 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
837 queue_work(recv_workqueue, &addcon->rwork);
838 mutex_unlock(&con->sock_mutex);
843 mutex_unlock(&con->sock_mutex);
845 sock_release(newsock);
847 if (result != -EAGAIN)
848 log_print("error accepting connection from node: %d", result);
852 static int sctp_accept_from_sock(struct connection *con)
854 /* Check that the new node is in the lockspace */
855 struct sctp_prim prim;
859 struct connection *newcon;
860 struct connection *addcon;
861 struct socket *newsock;
863 mutex_lock(&connections_lock);
864 if (!dlm_allow_conn) {
865 mutex_unlock(&connections_lock);
868 mutex_unlock(&connections_lock);
870 mutex_lock_nested(&con->sock_mutex, 0);
872 ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
876 memset(&prim, 0, sizeof(struct sctp_prim));
877 prim_len = sizeof(struct sctp_prim);
879 ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
880 (char *)&prim, &prim_len);
882 log_print("getsockopt/sctp_primary_addr failed: %d", ret);
886 make_sockaddr(&prim.ssp_addr, 0, &addr_len);
887 ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
889 unsigned char *b = (unsigned char *)&prim.ssp_addr;
891 log_print("reject connect from unknown addr");
892 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
893 b, sizeof(struct sockaddr_storage));
897 newcon = nodeid2con(nodeid, GFP_NOFS);
903 mutex_lock_nested(&newcon->sock_mutex, 1);
906 struct connection *othercon = newcon->othercon;
909 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
911 log_print("failed to allocate incoming socket");
912 mutex_unlock(&newcon->sock_mutex);
916 othercon->nodeid = nodeid;
917 othercon->rx_action = receive_from_sock;
918 mutex_init(&othercon->sock_mutex);
919 INIT_WORK(&othercon->swork, process_send_sockets);
920 INIT_WORK(&othercon->rwork, process_recv_sockets);
921 set_bit(CF_IS_OTHERCON, &othercon->flags);
923 mutex_lock_nested(&othercon->sock_mutex, 2);
924 if (!othercon->sock) {
925 newcon->othercon = othercon;
926 add_sock(newsock, othercon);
928 mutex_unlock(&othercon->sock_mutex);
930 printk("Extra connection from node %d attempted\n", nodeid);
932 mutex_unlock(&othercon->sock_mutex);
933 mutex_unlock(&newcon->sock_mutex);
937 newcon->rx_action = receive_from_sock;
938 add_sock(newsock, newcon);
942 log_print("connected to %d", nodeid);
944 mutex_unlock(&newcon->sock_mutex);
947 * Add it to the active queue in case we got data
948 * between processing the accept adding the socket
949 * to the read_sockets list
951 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
952 queue_work(recv_workqueue, &addcon->rwork);
953 mutex_unlock(&con->sock_mutex);
958 mutex_unlock(&con->sock_mutex);
960 sock_release(newsock);
962 log_print("error accepting connection from node: %d", ret);
967 static void free_entry(struct writequeue_entry *e)
969 __free_page(e->page);
974 * writequeue_entry_complete - try to delete and free write queue entry
975 * @e: write queue entry to try to delete
976 * @completed: bytes completed
978 * writequeue_lock must be held.
980 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
982 e->offset += completed;
985 if (e->len == 0 && e->users == 0) {
992 * sctp_bind_addrs - bind a SCTP socket to all our addresses
994 static int sctp_bind_addrs(struct connection *con, uint16_t port)
996 struct sockaddr_storage localaddr;
997 int i, addr_len, result = 0;
999 for (i = 0; i < dlm_local_count; i++) {
1000 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1001 make_sockaddr(&localaddr, port, &addr_len);
1004 result = kernel_bind(con->sock,
1005 (struct sockaddr *)&localaddr,
1008 result = kernel_setsockopt(con->sock, SOL_SCTP,
1009 SCTP_SOCKOPT_BINDX_ADD,
1010 (char *)&localaddr, addr_len);
1013 log_print("Can't bind to %d addr number %d, %d.\n",
1014 port, i + 1, result);
1021 /* Initiate an SCTP association.
1022 This is a special case of send_to_sock() in that we don't yet have a
1023 peeled-off socket for this association, so we use the listening socket
1024 and add the primary IP address of the remote node.
1026 static void sctp_connect_to_sock(struct connection *con)
1028 struct sockaddr_storage daddr;
1032 struct socket *sock;
1034 if (con->nodeid == 0) {
1035 log_print("attempt to connect sock 0 foiled");
1039 mutex_lock(&con->sock_mutex);
1041 /* Some odd races can cause double-connects, ignore them */
1042 if (con->retries++ > MAX_CONNECT_RETRIES)
1046 log_print("node %d already connected.", con->nodeid);
1050 memset(&daddr, 0, sizeof(daddr));
1051 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
1053 log_print("no address for nodeid %d", con->nodeid);
1057 /* Create a socket to communicate with */
1058 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1059 SOCK_STREAM, IPPROTO_SCTP, &sock);
1063 con->rx_action = receive_from_sock;
1064 con->connect_action = sctp_connect_to_sock;
1065 add_sock(sock, con);
1067 /* Bind to all addresses. */
1068 if (sctp_bind_addrs(con, 0))
1071 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
1073 log_print("connecting to %d", con->nodeid);
1075 /* Turn off Nagle's algorithm */
1076 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1079 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1081 if (result == -EINPROGRESS)
1092 * Some errors are fatal and this list might need adjusting. For other
1093 * errors we try again until the max number of retries is reached.
1095 if (result != -EHOSTUNREACH &&
1096 result != -ENETUNREACH &&
1097 result != -ENETDOWN &&
1098 result != -EINVAL &&
1099 result != -EPROTONOSUPPORT) {
1100 log_print("connect %d try %d error %d", con->nodeid,
1101 con->retries, result);
1102 mutex_unlock(&con->sock_mutex);
1104 lowcomms_connect_sock(con);
1109 mutex_unlock(&con->sock_mutex);
1112 /* Connect a new socket to its peer */
1113 static void tcp_connect_to_sock(struct connection *con)
1115 struct sockaddr_storage saddr, src_addr;
1117 struct socket *sock = NULL;
1121 if (con->nodeid == 0) {
1122 log_print("attempt to connect sock 0 foiled");
1126 mutex_lock(&con->sock_mutex);
1127 if (con->retries++ > MAX_CONNECT_RETRIES)
1130 /* Some odd races can cause double-connects, ignore them */
1134 /* Create a socket to communicate with */
1135 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1136 SOCK_STREAM, IPPROTO_TCP, &sock);
1140 memset(&saddr, 0, sizeof(saddr));
1141 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1143 log_print("no address for nodeid %d", con->nodeid);
1147 con->rx_action = receive_from_sock;
1148 con->connect_action = tcp_connect_to_sock;
1149 add_sock(sock, con);
1151 /* Bind to our cluster-known address connecting to avoid
1153 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1154 make_sockaddr(&src_addr, 0, &addr_len);
1155 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1158 log_print("could not bind for connect: %d", result);
1159 /* This *may* not indicate a critical error */
1162 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1164 log_print("connecting to %d", con->nodeid);
1166 /* Turn off Nagle's algorithm */
1167 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1170 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1172 if (result == -EINPROGRESS)
1179 sock_release(con->sock);
1185 * Some errors are fatal and this list might need adjusting. For other
1186 * errors we try again until the max number of retries is reached.
1188 if (result != -EHOSTUNREACH &&
1189 result != -ENETUNREACH &&
1190 result != -ENETDOWN &&
1191 result != -EINVAL &&
1192 result != -EPROTONOSUPPORT) {
1193 log_print("connect %d try %d error %d", con->nodeid,
1194 con->retries, result);
1195 mutex_unlock(&con->sock_mutex);
1197 lowcomms_connect_sock(con);
1201 mutex_unlock(&con->sock_mutex);
1205 static struct socket *tcp_create_listen_sock(struct connection *con,
1206 struct sockaddr_storage *saddr)
1208 struct socket *sock = NULL;
1213 if (dlm_local_addr[0]->ss_family == AF_INET)
1214 addr_len = sizeof(struct sockaddr_in);
1216 addr_len = sizeof(struct sockaddr_in6);
1218 /* Create a socket to communicate with */
1219 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1220 SOCK_STREAM, IPPROTO_TCP, &sock);
1222 log_print("Can't create listening comms socket");
1226 /* Turn off Nagle's algorithm */
1227 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1230 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
1231 (char *)&one, sizeof(one));
1234 log_print("Failed to set SO_REUSEADDR on socket: %d", result);
1236 write_lock_bh(&sock->sk->sk_callback_lock);
1237 sock->sk->sk_user_data = con;
1238 save_listen_callbacks(sock);
1239 con->rx_action = tcp_accept_from_sock;
1240 con->connect_action = tcp_connect_to_sock;
1241 write_unlock_bh(&sock->sk->sk_callback_lock);
1243 /* Bind to our port */
1244 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1245 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1247 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1253 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
1254 (char *)&one, sizeof(one));
1256 log_print("Set keepalive failed: %d", result);
1259 result = sock->ops->listen(sock, 5);
1261 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1271 /* Get local addresses */
1272 static void init_local(void)
1274 struct sockaddr_storage sas, *addr;
1277 dlm_local_count = 0;
1278 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1279 if (dlm_our_addr(&sas, i))
1282 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1285 dlm_local_addr[dlm_local_count++] = addr;
1289 /* Initialise SCTP socket and bind to all interfaces */
1290 static int sctp_listen_for_all(void)
1292 struct socket *sock = NULL;
1293 int result = -EINVAL;
1294 struct connection *con = nodeid2con(0, GFP_NOFS);
1295 int bufsize = NEEDED_RMEM;
1301 log_print("Using SCTP for communications");
1303 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1304 SOCK_STREAM, IPPROTO_SCTP, &sock);
1306 log_print("Can't create comms socket, check SCTP is loaded");
1310 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
1311 (char *)&bufsize, sizeof(bufsize));
1313 log_print("Error increasing buffer space on socket %d", result);
1315 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
1318 log_print("Could not set SCTP NODELAY error %d\n", result);
1320 write_lock_bh(&sock->sk->sk_callback_lock);
1321 /* Init con struct */
1322 sock->sk->sk_user_data = con;
1323 save_listen_callbacks(sock);
1325 con->sock->sk->sk_data_ready = lowcomms_data_ready;
1326 con->rx_action = sctp_accept_from_sock;
1327 con->connect_action = sctp_connect_to_sock;
1329 write_unlock_bh(&sock->sk->sk_callback_lock);
1331 /* Bind to all addresses. */
1332 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
1333 goto create_delsock;
1335 result = sock->ops->listen(sock, 5);
1337 log_print("Can't set socket listening");
1338 goto create_delsock;
1350 static int tcp_listen_for_all(void)
1352 struct socket *sock = NULL;
1353 struct connection *con = nodeid2con(0, GFP_NOFS);
1354 int result = -EINVAL;
1359 /* We don't support multi-homed hosts */
1360 if (dlm_local_addr[1] != NULL) {
1361 log_print("TCP protocol can't handle multi-homed hosts, "
1366 log_print("Using TCP for communications");
1368 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1370 add_sock(sock, con);
1374 result = -EADDRINUSE;
1382 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1385 struct writequeue_entry *entry;
1387 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1391 entry->page = alloc_page(allocation);
1406 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1408 struct connection *con;
1409 struct writequeue_entry *e;
1412 con = nodeid2con(nodeid, allocation);
1416 spin_lock(&con->writequeue_lock);
1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1418 if ((&e->list == &con->writequeue) ||
1419 (PAGE_SIZE - e->end < len)) {
1426 spin_unlock(&con->writequeue_lock);
1430 *ppc = page_address(e->page) + offset;
1434 e = new_writequeue_entry(con, allocation);
1436 spin_lock(&con->writequeue_lock);
1440 list_add_tail(&e->list, &con->writequeue);
1441 spin_unlock(&con->writequeue_lock);
1447 void dlm_lowcomms_commit_buffer(void *mh)
1449 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1450 struct connection *con = e->con;
1453 spin_lock(&con->writequeue_lock);
1457 e->len = e->end - e->offset;
1458 spin_unlock(&con->writequeue_lock);
1460 queue_work(send_workqueue, &con->swork);
1464 spin_unlock(&con->writequeue_lock);
1468 /* Send a message */
1469 static void send_to_sock(struct connection *con)
1472 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1473 struct writequeue_entry *e;
1477 mutex_lock(&con->sock_mutex);
1478 if (con->sock == NULL)
1481 spin_lock(&con->writequeue_lock);
1483 e = list_entry(con->writequeue.next, struct writequeue_entry,
1485 if ((struct list_head *) e == &con->writequeue)
1490 BUG_ON(len == 0 && e->users == 0);
1491 spin_unlock(&con->writequeue_lock);
1495 ret = kernel_sendpage(con->sock, e->page, offset, len,
1497 if (ret == -EAGAIN || ret == 0) {
1498 if (ret == -EAGAIN &&
1499 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1500 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1501 /* Notify TCP that we're limited by the
1502 * application window size.
1504 set_bit(SOCK_NOSPACE, &con->sock->flags);
1505 con->sock->sk->sk_write_pending++;
1513 /* Don't starve people filling buffers */
1514 if (++count >= MAX_SEND_MSG_COUNT) {
1519 spin_lock(&con->writequeue_lock);
1520 writequeue_entry_complete(e, ret);
1522 spin_unlock(&con->writequeue_lock);
1524 mutex_unlock(&con->sock_mutex);
1528 mutex_unlock(&con->sock_mutex);
1529 close_connection(con, true, false, true);
1530 /* Requeue the send work. When the work daemon runs again, it will try
1531 a new connection, then call this function again. */
1532 queue_work(send_workqueue, &con->swork);
1536 mutex_unlock(&con->sock_mutex);
1537 queue_work(send_workqueue, &con->swork);
1541 static void clean_one_writequeue(struct connection *con)
1543 struct writequeue_entry *e, *safe;
1545 spin_lock(&con->writequeue_lock);
1546 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1550 spin_unlock(&con->writequeue_lock);
1553 /* Called from recovery when it knows that a node has
1555 int dlm_lowcomms_close(int nodeid)
1557 struct connection *con;
1558 struct dlm_node_addr *na;
1560 log_print("closing connection to node %d", nodeid);
1561 con = nodeid2con(nodeid, 0);
1563 set_bit(CF_CLOSE, &con->flags);
1564 close_connection(con, true, true, true);
1565 clean_one_writequeue(con);
1568 spin_lock(&dlm_node_addrs_spin);
1569 na = find_node_addr(nodeid);
1571 list_del(&na->list);
1572 while (na->addr_count--)
1573 kfree(na->addr[na->addr_count]);
1576 spin_unlock(&dlm_node_addrs_spin);
1581 /* Receive workqueue function */
1582 static void process_recv_sockets(struct work_struct *work)
1584 struct connection *con = container_of(work, struct connection, rwork);
1587 clear_bit(CF_READ_PENDING, &con->flags);
1589 err = con->rx_action(con);
1593 /* Send workqueue function */
1594 static void process_send_sockets(struct work_struct *work)
1596 struct connection *con = container_of(work, struct connection, swork);
1598 clear_bit(CF_WRITE_PENDING, &con->flags);
1599 if (con->sock == NULL) /* not mutex protected so check it inside too */
1600 con->connect_action(con);
1601 if (!list_empty(&con->writequeue))
1606 /* Discard all entries on the write queues */
1607 static void clean_writequeues(void)
1609 foreach_conn(clean_one_writequeue);
1612 static void work_stop(void)
1614 destroy_workqueue(recv_workqueue);
1615 destroy_workqueue(send_workqueue);
1618 static int work_start(void)
1620 recv_workqueue = alloc_workqueue("dlm_recv",
1621 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1622 if (!recv_workqueue) {
1623 log_print("can't start dlm_recv");
1627 send_workqueue = alloc_workqueue("dlm_send",
1628 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1629 if (!send_workqueue) {
1630 log_print("can't start dlm_send");
1631 destroy_workqueue(recv_workqueue);
1638 static void _stop_conn(struct connection *con, bool and_other)
1640 mutex_lock(&con->sock_mutex);
1641 set_bit(CF_CLOSE, &con->flags);
1642 set_bit(CF_READ_PENDING, &con->flags);
1643 set_bit(CF_WRITE_PENDING, &con->flags);
1644 if (con->sock && con->sock->sk) {
1645 write_lock_bh(&con->sock->sk->sk_callback_lock);
1646 con->sock->sk->sk_user_data = NULL;
1647 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1649 if (con->othercon && and_other)
1650 _stop_conn(con->othercon, false);
1651 mutex_unlock(&con->sock_mutex);
1654 static void stop_conn(struct connection *con)
1656 _stop_conn(con, true);
1659 static void free_conn(struct connection *con)
1661 close_connection(con, true, true, true);
1663 kmem_cache_free(con_cache, con->othercon);
1664 hlist_del(&con->list);
1665 kmem_cache_free(con_cache, con);
1668 static void work_flush(void)
1672 struct hlist_node *n;
1673 struct connection *con;
1675 flush_workqueue(recv_workqueue);
1676 flush_workqueue(send_workqueue);
1679 foreach_conn(stop_conn);
1680 flush_workqueue(recv_workqueue);
1681 flush_workqueue(send_workqueue);
1682 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1683 hlist_for_each_entry_safe(con, n,
1684 &connection_hash[i], list) {
1685 ok &= test_bit(CF_READ_PENDING, &con->flags);
1686 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1687 if (con->othercon) {
1688 ok &= test_bit(CF_READ_PENDING,
1689 &con->othercon->flags);
1690 ok &= test_bit(CF_WRITE_PENDING,
1691 &con->othercon->flags);
1698 void dlm_lowcomms_stop(void)
1700 /* Set all the flags to prevent any
1703 mutex_lock(&connections_lock);
1705 mutex_unlock(&connections_lock);
1707 clean_writequeues();
1708 foreach_conn(free_conn);
1711 kmem_cache_destroy(con_cache);
1714 int dlm_lowcomms_start(void)
1716 int error = -EINVAL;
1717 struct connection *con;
1720 for (i = 0; i < CONN_HASH_SIZE; i++)
1721 INIT_HLIST_HEAD(&connection_hash[i]);
1724 if (!dlm_local_count) {
1726 log_print("no local IP address has been set");
1731 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1732 __alignof__(struct connection), 0,
1737 error = work_start();
1743 /* Start listening */
1744 if (dlm_config.ci_protocol == 0)
1745 error = tcp_listen_for_all();
1747 error = sctp_listen_for_all();
1755 con = nodeid2con(0,0);
1757 close_connection(con, false, true, true);
1758 kmem_cache_free(con_cache, con);
1761 kmem_cache_destroy(con_cache);
1766 void dlm_lowcomms_exit(void)
1768 struct dlm_node_addr *na, *safe;
1770 spin_lock(&dlm_node_addrs_spin);
1771 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1772 list_del(&na->list);
1773 while (na->addr_count--)
1774 kfree(na->addr[na->addr_count]);
1777 spin_unlock(&dlm_node_addrs_spin);