1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
17 * This is the "low-level" comms layer.
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is its
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
39 * lowcomms will choose to use either TCP or SCTP as its transport layer
40 * depending on the configuration variable 'protocol'. This should be set
41 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
42 * cluster-wide mechanism as it must be the same on all nodes of the cluster
43 * for the DLM to function.
47 #include <asm/ioctls.h>
50 #include <linux/pagemap.h>
51 #include <linux/file.h>
52 #include <linux/mutex.h>
53 #include <linux/sctp.h>
54 #include <linux/slab.h>
55 #include <net/sctp/sctp.h>
58 #include "dlm_internal.h"
63 #define NEEDED_RMEM (4*1024*1024)
64 #define CONN_HASH_SIZE 32
66 /* Number of messages to send before rescheduling */
67 #define MAX_SEND_MSG_COUNT 25
75 static void cbuf_add(struct cbuf *cb, int n)
80 static int cbuf_data(struct cbuf *cb)
82 return ((cb->base + cb->len) & cb->mask);
85 static void cbuf_init(struct cbuf *cb, int size)
87 cb->base = cb->len = 0;
91 static void cbuf_eat(struct cbuf *cb, int n)
98 static bool cbuf_empty(struct cbuf *cb)
104 struct socket *sock; /* NULL if not connected */
105 uint32_t nodeid; /* So we know who we are in the list */
106 struct mutex sock_mutex;
108 #define CF_READ_PENDING 1
109 #define CF_WRITE_PENDING 2
110 #define CF_INIT_PENDING 4
111 #define CF_IS_OTHERCON 5
113 #define CF_APP_LIMITED 7
115 struct list_head writequeue; /* List of outgoing writequeue_entries */
116 spinlock_t writequeue_lock;
117 int (*rx_action) (struct connection *); /* What to do when active */
118 void (*connect_action) (struct connection *); /* What to do to connect */
119 struct page *rx_page;
122 #define MAX_CONNECT_RETRIES 3
123 struct hlist_node list;
124 struct connection *othercon;
125 struct work_struct rwork; /* Receive workqueue */
126 struct work_struct swork; /* Send workqueue */
128 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
130 /* An entry waiting to be sent */
131 struct writequeue_entry {
132 struct list_head list;
138 struct connection *con;
141 struct dlm_node_addr {
142 struct list_head list;
146 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
149 static struct listen_sock_callbacks {
150 void (*sk_error_report)(struct sock *);
151 void (*sk_data_ready)(struct sock *);
152 void (*sk_state_change)(struct sock *);
153 void (*sk_write_space)(struct sock *);
156 static LIST_HEAD(dlm_node_addrs);
157 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
159 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
160 static int dlm_local_count;
161 static int dlm_allow_conn;
164 static struct workqueue_struct *recv_workqueue;
165 static struct workqueue_struct *send_workqueue;
167 static struct hlist_head connection_hash[CONN_HASH_SIZE];
168 static DEFINE_MUTEX(connections_lock);
169 static struct kmem_cache *con_cache;
171 static void process_recv_sockets(struct work_struct *work);
172 static void process_send_sockets(struct work_struct *work);
175 /* This is deliberately very simple because most clusters have simple
176 sequential nodeids, so we should be able to go straight to a connection
177 struct in the array */
178 static inline int nodeid_hash(int nodeid)
180 return nodeid & (CONN_HASH_SIZE-1);
183 static struct connection *__find_con(int nodeid)
186 struct connection *con;
188 r = nodeid_hash(nodeid);
190 hlist_for_each_entry(con, &connection_hash[r], list) {
191 if (con->nodeid == nodeid)
198 * If 'allocation' is zero then we don't attempt to create a new
199 * connection structure for this node.
201 static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
203 struct connection *con = NULL;
206 con = __find_con(nodeid);
210 con = kmem_cache_zalloc(con_cache, alloc);
214 r = nodeid_hash(nodeid);
215 hlist_add_head(&con->list, &connection_hash[r]);
217 con->nodeid = nodeid;
218 mutex_init(&con->sock_mutex);
219 INIT_LIST_HEAD(&con->writequeue);
220 spin_lock_init(&con->writequeue_lock);
221 INIT_WORK(&con->swork, process_send_sockets);
222 INIT_WORK(&con->rwork, process_recv_sockets);
224 /* Setup action pointers for child sockets */
226 struct connection *zerocon = __find_con(0);
228 con->connect_action = zerocon->connect_action;
230 con->rx_action = zerocon->rx_action;
236 /* Loop round all connections */
237 static void foreach_conn(void (*conn_func)(struct connection *c))
240 struct hlist_node *n;
241 struct connection *con;
243 for (i = 0; i < CONN_HASH_SIZE; i++) {
244 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
249 static struct connection *nodeid2con(int nodeid, gfp_t allocation)
251 struct connection *con;
253 mutex_lock(&connections_lock);
254 con = __nodeid2con(nodeid, allocation);
255 mutex_unlock(&connections_lock);
260 static struct dlm_node_addr *find_node_addr(int nodeid)
262 struct dlm_node_addr *na;
264 list_for_each_entry(na, &dlm_node_addrs, list) {
265 if (na->nodeid == nodeid)
271 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
273 switch (x->ss_family) {
275 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
276 struct sockaddr_in *siny = (struct sockaddr_in *)y;
277 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
279 if (sinx->sin_port != siny->sin_port)
284 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
285 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
286 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
288 if (sinx->sin6_port != siny->sin6_port)
298 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
299 struct sockaddr *sa_out, bool try_new_addr)
301 struct sockaddr_storage sas;
302 struct dlm_node_addr *na;
304 if (!dlm_local_count)
307 spin_lock(&dlm_node_addrs_spin);
308 na = find_node_addr(nodeid);
309 if (na && na->addr_count) {
310 memcpy(&sas, na->addr[na->curr_addr_index],
311 sizeof(struct sockaddr_storage));
314 na->curr_addr_index++;
315 if (na->curr_addr_index == na->addr_count)
316 na->curr_addr_index = 0;
319 spin_unlock(&dlm_node_addrs_spin);
328 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
333 if (dlm_local_addr[0]->ss_family == AF_INET) {
334 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
335 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
336 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
338 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
339 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
340 ret6->sin6_addr = in6->sin6_addr;
346 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
348 struct dlm_node_addr *na;
352 spin_lock(&dlm_node_addrs_spin);
353 list_for_each_entry(na, &dlm_node_addrs, list) {
357 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
358 if (addr_compare(na->addr[addr_i], addr)) {
359 *nodeid = na->nodeid;
366 spin_unlock(&dlm_node_addrs_spin);
370 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
372 struct sockaddr_storage *new_addr;
373 struct dlm_node_addr *new_node, *na;
375 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
379 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
385 memcpy(new_addr, addr, len);
387 spin_lock(&dlm_node_addrs_spin);
388 na = find_node_addr(nodeid);
390 new_node->nodeid = nodeid;
391 new_node->addr[0] = new_addr;
392 new_node->addr_count = 1;
393 list_add(&new_node->list, &dlm_node_addrs);
394 spin_unlock(&dlm_node_addrs_spin);
398 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
399 spin_unlock(&dlm_node_addrs_spin);
405 na->addr[na->addr_count++] = new_addr;
406 spin_unlock(&dlm_node_addrs_spin);
411 /* Data available on socket or listen socket received a connect */
412 static void lowcomms_data_ready(struct sock *sk)
414 struct connection *con;
416 read_lock_bh(&sk->sk_callback_lock);
418 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
419 queue_work(recv_workqueue, &con->rwork);
420 read_unlock_bh(&sk->sk_callback_lock);
423 static void lowcomms_write_space(struct sock *sk)
425 struct connection *con;
427 read_lock_bh(&sk->sk_callback_lock);
432 clear_bit(SOCK_NOSPACE, &con->sock->flags);
434 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
435 con->sock->sk->sk_write_pending--;
436 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
439 queue_work(send_workqueue, &con->swork);
441 read_unlock_bh(&sk->sk_callback_lock);
444 static inline void lowcomms_connect_sock(struct connection *con)
446 if (test_bit(CF_CLOSE, &con->flags))
448 queue_work(send_workqueue, &con->swork);
452 static void lowcomms_state_change(struct sock *sk)
454 /* SCTP layer is not calling sk_data_ready when the connection
455 * is done, so we catch the signal through here. Also, it
456 * doesn't switch socket state when entering shutdown, so we
457 * skip the write in that case.
459 if (sk->sk_shutdown) {
460 if (sk->sk_shutdown == RCV_SHUTDOWN)
461 lowcomms_data_ready(sk);
462 } else if (sk->sk_state == TCP_ESTABLISHED) {
463 lowcomms_write_space(sk);
467 int dlm_lowcomms_connect_node(int nodeid)
469 struct connection *con;
471 if (nodeid == dlm_our_nodeid())
474 con = nodeid2con(nodeid, GFP_NOFS);
477 lowcomms_connect_sock(con);
481 static void lowcomms_error_report(struct sock *sk)
483 struct connection *con;
484 struct sockaddr_storage saddr;
486 void (*orig_report)(struct sock *) = NULL;
488 read_lock_bh(&sk->sk_callback_lock);
493 orig_report = listen_sock.sk_error_report;
494 if (con->sock == NULL ||
495 kernel_getpeername(con->sock, (struct sockaddr *)&saddr, &buflen)) {
496 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
497 "sending to node %d, port %d, "
498 "sk_err=%d/%d\n", dlm_our_nodeid(),
499 con->nodeid, dlm_config.ci_tcp_port,
500 sk->sk_err, sk->sk_err_soft);
501 } else if (saddr.ss_family == AF_INET) {
502 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
504 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
505 "sending to node %d at %pI4, port %d, "
506 "sk_err=%d/%d\n", dlm_our_nodeid(),
507 con->nodeid, &sin4->sin_addr.s_addr,
508 dlm_config.ci_tcp_port, sk->sk_err,
511 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
513 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
514 "sending to node %d at %u.%u.%u.%u, "
515 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
516 con->nodeid, sin6->sin6_addr.s6_addr32[0],
517 sin6->sin6_addr.s6_addr32[1],
518 sin6->sin6_addr.s6_addr32[2],
519 sin6->sin6_addr.s6_addr32[3],
520 dlm_config.ci_tcp_port, sk->sk_err,
524 read_unlock_bh(&sk->sk_callback_lock);
529 /* Note: sk_callback_lock must be locked before calling this function. */
530 static void save_listen_callbacks(struct socket *sock)
532 struct sock *sk = sock->sk;
534 listen_sock.sk_data_ready = sk->sk_data_ready;
535 listen_sock.sk_state_change = sk->sk_state_change;
536 listen_sock.sk_write_space = sk->sk_write_space;
537 listen_sock.sk_error_report = sk->sk_error_report;
540 static void restore_callbacks(struct socket *sock)
542 struct sock *sk = sock->sk;
544 write_lock_bh(&sk->sk_callback_lock);
545 sk->sk_user_data = NULL;
546 sk->sk_data_ready = listen_sock.sk_data_ready;
547 sk->sk_state_change = listen_sock.sk_state_change;
548 sk->sk_write_space = listen_sock.sk_write_space;
549 sk->sk_error_report = listen_sock.sk_error_report;
550 write_unlock_bh(&sk->sk_callback_lock);
553 /* Make a socket active */
554 static void add_sock(struct socket *sock, struct connection *con)
556 struct sock *sk = sock->sk;
558 write_lock_bh(&sk->sk_callback_lock);
561 sk->sk_user_data = con;
562 /* Install a data_ready callback */
563 sk->sk_data_ready = lowcomms_data_ready;
564 sk->sk_write_space = lowcomms_write_space;
565 sk->sk_state_change = lowcomms_state_change;
566 sk->sk_allocation = GFP_NOFS;
567 sk->sk_error_report = lowcomms_error_report;
568 write_unlock_bh(&sk->sk_callback_lock);
571 /* Add the port number to an IPv6 or 4 sockaddr and return the address
573 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
576 saddr->ss_family = dlm_local_addr[0]->ss_family;
577 if (saddr->ss_family == AF_INET) {
578 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
579 in4_addr->sin_port = cpu_to_be16(port);
580 *addr_len = sizeof(struct sockaddr_in);
581 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
583 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
584 in6_addr->sin6_port = cpu_to_be16(port);
585 *addr_len = sizeof(struct sockaddr_in6);
587 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
590 /* Close a remote connection and tidy up */
591 static void close_connection(struct connection *con, bool and_other,
594 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
596 if (tx && !closing && cancel_work_sync(&con->swork)) {
597 log_print("canceled swork for node %d", con->nodeid);
598 clear_bit(CF_WRITE_PENDING, &con->flags);
600 if (rx && !closing && cancel_work_sync(&con->rwork)) {
601 log_print("canceled rwork for node %d", con->nodeid);
602 clear_bit(CF_READ_PENDING, &con->flags);
605 mutex_lock(&con->sock_mutex);
607 restore_callbacks(con->sock);
608 sock_release(con->sock);
611 if (con->othercon && and_other) {
612 /* Will only re-enter once. */
613 close_connection(con->othercon, false, true, true);
616 __free_page(con->rx_page);
621 mutex_unlock(&con->sock_mutex);
622 clear_bit(CF_CLOSING, &con->flags);
625 /* Data received from remote end */
626 static int receive_from_sock(struct connection *con)
629 struct msghdr msg = {};
633 int call_again_soon = 0;
636 mutex_lock(&con->sock_mutex);
638 if (con->sock == NULL) {
642 if (con->nodeid == 0) {
647 if (con->rx_page == NULL) {
649 * This doesn't need to be atomic, but I think it should
650 * improve performance if it is.
652 con->rx_page = alloc_page(GFP_ATOMIC);
653 if (con->rx_page == NULL)
655 cbuf_init(&con->cb, PAGE_SIZE);
659 * iov[0] is the bit of the circular buffer between the current end
660 * point (cb.base + cb.len) and the end of the buffer.
662 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
663 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
668 * iov[1] is the bit of the circular buffer between the start of the
669 * buffer and the start of the currently used section (cb.base)
671 if (cbuf_data(&con->cb) >= con->cb.base) {
672 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
673 iov[1].iov_len = con->cb.base;
674 iov[1].iov_base = page_address(con->rx_page);
677 len = iov[0].iov_len + iov[1].iov_len;
679 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
680 MSG_DONTWAIT | MSG_NOSIGNAL);
686 cbuf_add(&con->cb, ret);
687 ret = dlm_process_incoming_buffer(con->nodeid,
688 page_address(con->rx_page),
689 con->cb.base, con->cb.len,
691 if (ret == -EBADMSG) {
692 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
693 page_address(con->rx_page), con->cb.base,
698 cbuf_eat(&con->cb, ret);
700 if (cbuf_empty(&con->cb) && !call_again_soon) {
701 __free_page(con->rx_page);
707 mutex_unlock(&con->sock_mutex);
711 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
712 queue_work(recv_workqueue, &con->rwork);
713 mutex_unlock(&con->sock_mutex);
717 mutex_unlock(&con->sock_mutex);
718 if (ret != -EAGAIN) {
719 close_connection(con, true, true, false);
720 /* Reconnect when there is something to send */
722 /* Don't return success if we really got EOF */
729 /* Listening socket is busy, accept a connection */
730 static int tcp_accept_from_sock(struct connection *con)
733 struct sockaddr_storage peeraddr;
734 struct socket *newsock;
737 struct connection *newcon;
738 struct connection *addcon;
740 mutex_lock(&connections_lock);
741 if (!dlm_allow_conn) {
742 mutex_unlock(&connections_lock);
745 mutex_unlock(&connections_lock);
747 mutex_lock_nested(&con->sock_mutex, 0);
750 mutex_unlock(&con->sock_mutex);
754 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
758 /* Get the connected socket's peer */
759 memset(&peeraddr, 0, sizeof(peeraddr));
760 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
762 result = -ECONNABORTED;
766 /* Get the new node's NODEID */
767 make_sockaddr(&peeraddr, 0, &len);
768 if (addr_to_nodeid(&peeraddr, &nodeid)) {
769 unsigned char *b=(unsigned char *)&peeraddr;
770 log_print("connect from non cluster node");
771 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
772 b, sizeof(struct sockaddr_storage));
773 sock_release(newsock);
774 mutex_unlock(&con->sock_mutex);
778 log_print("got connection from %d", nodeid);
780 /* Check to see if we already have a connection to this node. This
781 * could happen if the two nodes initiate a connection at roughly
782 * the same time and the connections cross on the wire.
783 * In this case we store the incoming one in "othercon"
785 newcon = nodeid2con(nodeid, GFP_NOFS);
790 mutex_lock_nested(&newcon->sock_mutex, 1);
792 struct connection *othercon = newcon->othercon;
795 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
797 log_print("failed to allocate incoming socket");
798 mutex_unlock(&newcon->sock_mutex);
802 othercon->nodeid = nodeid;
803 othercon->rx_action = receive_from_sock;
804 mutex_init(&othercon->sock_mutex);
805 INIT_WORK(&othercon->swork, process_send_sockets);
806 INIT_WORK(&othercon->rwork, process_recv_sockets);
807 set_bit(CF_IS_OTHERCON, &othercon->flags);
809 mutex_lock_nested(&othercon->sock_mutex, 2);
810 if (!othercon->sock) {
811 newcon->othercon = othercon;
812 add_sock(newsock, othercon);
814 mutex_unlock(&othercon->sock_mutex);
817 printk("Extra connection from node %d attempted\n", nodeid);
819 mutex_unlock(&othercon->sock_mutex);
820 mutex_unlock(&newcon->sock_mutex);
825 newcon->rx_action = receive_from_sock;
826 /* accept copies the sk after we've saved the callbacks, so we
827 don't want to save them a second time or comm errors will
828 result in calling sk_error_report recursively. */
829 add_sock(newsock, newcon);
833 mutex_unlock(&newcon->sock_mutex);
836 * Add it to the active queue in case we got data
837 * between processing the accept adding the socket
838 * to the read_sockets list
840 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
841 queue_work(recv_workqueue, &addcon->rwork);
842 mutex_unlock(&con->sock_mutex);
847 mutex_unlock(&con->sock_mutex);
849 sock_release(newsock);
851 if (result != -EAGAIN)
852 log_print("error accepting connection from node: %d", result);
856 static int sctp_accept_from_sock(struct connection *con)
858 /* Check that the new node is in the lockspace */
859 struct sctp_prim prim;
863 struct connection *newcon;
864 struct connection *addcon;
865 struct socket *newsock;
867 mutex_lock(&connections_lock);
868 if (!dlm_allow_conn) {
869 mutex_unlock(&connections_lock);
872 mutex_unlock(&connections_lock);
874 mutex_lock_nested(&con->sock_mutex, 0);
876 ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
880 memset(&prim, 0, sizeof(struct sctp_prim));
881 prim_len = sizeof(struct sctp_prim);
883 ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
884 (char *)&prim, &prim_len);
886 log_print("getsockopt/sctp_primary_addr failed: %d", ret);
890 make_sockaddr(&prim.ssp_addr, 0, &addr_len);
891 ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
893 unsigned char *b = (unsigned char *)&prim.ssp_addr;
895 log_print("reject connect from unknown addr");
896 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
897 b, sizeof(struct sockaddr_storage));
901 newcon = nodeid2con(nodeid, GFP_NOFS);
907 mutex_lock_nested(&newcon->sock_mutex, 1);
910 struct connection *othercon = newcon->othercon;
913 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
915 log_print("failed to allocate incoming socket");
916 mutex_unlock(&newcon->sock_mutex);
920 othercon->nodeid = nodeid;
921 othercon->rx_action = receive_from_sock;
922 mutex_init(&othercon->sock_mutex);
923 INIT_WORK(&othercon->swork, process_send_sockets);
924 INIT_WORK(&othercon->rwork, process_recv_sockets);
925 set_bit(CF_IS_OTHERCON, &othercon->flags);
927 mutex_lock_nested(&othercon->sock_mutex, 2);
928 if (!othercon->sock) {
929 newcon->othercon = othercon;
930 add_sock(newsock, othercon);
932 mutex_unlock(&othercon->sock_mutex);
934 printk("Extra connection from node %d attempted\n", nodeid);
936 mutex_unlock(&othercon->sock_mutex);
937 mutex_unlock(&newcon->sock_mutex);
941 newcon->rx_action = receive_from_sock;
942 add_sock(newsock, newcon);
946 log_print("connected to %d", nodeid);
948 mutex_unlock(&newcon->sock_mutex);
951 * Add it to the active queue in case we got data
952 * between processing the accept adding the socket
953 * to the read_sockets list
955 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
956 queue_work(recv_workqueue, &addcon->rwork);
957 mutex_unlock(&con->sock_mutex);
962 mutex_unlock(&con->sock_mutex);
964 sock_release(newsock);
966 log_print("error accepting connection from node: %d", ret);
971 static void free_entry(struct writequeue_entry *e)
973 __free_page(e->page);
978 * writequeue_entry_complete - try to delete and free write queue entry
979 * @e: write queue entry to try to delete
980 * @completed: bytes completed
982 * writequeue_lock must be held.
984 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
986 e->offset += completed;
989 if (e->len == 0 && e->users == 0) {
996 * sctp_bind_addrs - bind a SCTP socket to all our addresses
998 static int sctp_bind_addrs(struct connection *con, uint16_t port)
1000 struct sockaddr_storage localaddr;
1001 int i, addr_len, result = 0;
1003 for (i = 0; i < dlm_local_count; i++) {
1004 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1005 make_sockaddr(&localaddr, port, &addr_len);
1008 result = kernel_bind(con->sock,
1009 (struct sockaddr *)&localaddr,
1012 result = kernel_setsockopt(con->sock, SOL_SCTP,
1013 SCTP_SOCKOPT_BINDX_ADD,
1014 (char *)&localaddr, addr_len);
1017 log_print("Can't bind to %d addr number %d, %d.\n",
1018 port, i + 1, result);
1025 /* Initiate an SCTP association.
1026 This is a special case of send_to_sock() in that we don't yet have a
1027 peeled-off socket for this association, so we use the listening socket
1028 and add the primary IP address of the remote node.
1030 static void sctp_connect_to_sock(struct connection *con)
1032 struct sockaddr_storage daddr;
1036 struct socket *sock;
1038 if (con->nodeid == 0) {
1039 log_print("attempt to connect sock 0 foiled");
1043 mutex_lock(&con->sock_mutex);
1045 /* Some odd races can cause double-connects, ignore them */
1046 if (con->retries++ > MAX_CONNECT_RETRIES)
1050 log_print("node %d already connected.", con->nodeid);
1054 memset(&daddr, 0, sizeof(daddr));
1055 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
1057 log_print("no address for nodeid %d", con->nodeid);
1061 /* Create a socket to communicate with */
1062 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1063 SOCK_STREAM, IPPROTO_SCTP, &sock);
1067 con->rx_action = receive_from_sock;
1068 con->connect_action = sctp_connect_to_sock;
1069 add_sock(sock, con);
1071 /* Bind to all addresses. */
1072 if (sctp_bind_addrs(con, 0))
1075 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
1077 log_print("connecting to %d", con->nodeid);
1079 /* Turn off Nagle's algorithm */
1080 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1083 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1085 if (result == -EINPROGRESS)
1096 * Some errors are fatal and this list might need adjusting. For other
1097 * errors we try again until the max number of retries is reached.
1099 if (result != -EHOSTUNREACH &&
1100 result != -ENETUNREACH &&
1101 result != -ENETDOWN &&
1102 result != -EINVAL &&
1103 result != -EPROTONOSUPPORT) {
1104 log_print("connect %d try %d error %d", con->nodeid,
1105 con->retries, result);
1106 mutex_unlock(&con->sock_mutex);
1108 lowcomms_connect_sock(con);
1113 mutex_unlock(&con->sock_mutex);
1116 /* Connect a new socket to its peer */
1117 static void tcp_connect_to_sock(struct connection *con)
1119 struct sockaddr_storage saddr, src_addr;
1121 struct socket *sock = NULL;
1125 if (con->nodeid == 0) {
1126 log_print("attempt to connect sock 0 foiled");
1130 mutex_lock(&con->sock_mutex);
1131 if (con->retries++ > MAX_CONNECT_RETRIES)
1134 /* Some odd races can cause double-connects, ignore them */
1138 /* Create a socket to communicate with */
1139 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1140 SOCK_STREAM, IPPROTO_TCP, &sock);
1144 memset(&saddr, 0, sizeof(saddr));
1145 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1147 log_print("no address for nodeid %d", con->nodeid);
1151 con->rx_action = receive_from_sock;
1152 con->connect_action = tcp_connect_to_sock;
1153 add_sock(sock, con);
1155 /* Bind to our cluster-known address connecting to avoid
1157 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1158 make_sockaddr(&src_addr, 0, &addr_len);
1159 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1162 log_print("could not bind for connect: %d", result);
1163 /* This *may* not indicate a critical error */
1166 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1168 log_print("connecting to %d", con->nodeid);
1170 /* Turn off Nagle's algorithm */
1171 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1174 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1176 if (result == -EINPROGRESS)
1183 sock_release(con->sock);
1189 * Some errors are fatal and this list might need adjusting. For other
1190 * errors we try again until the max number of retries is reached.
1192 if (result != -EHOSTUNREACH &&
1193 result != -ENETUNREACH &&
1194 result != -ENETDOWN &&
1195 result != -EINVAL &&
1196 result != -EPROTONOSUPPORT) {
1197 log_print("connect %d try %d error %d", con->nodeid,
1198 con->retries, result);
1199 mutex_unlock(&con->sock_mutex);
1201 lowcomms_connect_sock(con);
1205 mutex_unlock(&con->sock_mutex);
1209 static struct socket *tcp_create_listen_sock(struct connection *con,
1210 struct sockaddr_storage *saddr)
1212 struct socket *sock = NULL;
1217 if (dlm_local_addr[0]->ss_family == AF_INET)
1218 addr_len = sizeof(struct sockaddr_in);
1220 addr_len = sizeof(struct sockaddr_in6);
1222 /* Create a socket to communicate with */
1223 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1224 SOCK_STREAM, IPPROTO_TCP, &sock);
1226 log_print("Can't create listening comms socket");
1230 /* Turn off Nagle's algorithm */
1231 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1234 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
1235 (char *)&one, sizeof(one));
1238 log_print("Failed to set SO_REUSEADDR on socket: %d", result);
1240 write_lock_bh(&sock->sk->sk_callback_lock);
1241 sock->sk->sk_user_data = con;
1242 save_listen_callbacks(sock);
1243 con->rx_action = tcp_accept_from_sock;
1244 con->connect_action = tcp_connect_to_sock;
1245 write_unlock_bh(&sock->sk->sk_callback_lock);
1247 /* Bind to our port */
1248 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1249 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1251 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1257 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
1258 (char *)&one, sizeof(one));
1260 log_print("Set keepalive failed: %d", result);
1263 result = sock->ops->listen(sock, 5);
1265 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1275 /* Get local addresses */
1276 static void init_local(void)
1278 struct sockaddr_storage sas, *addr;
1281 dlm_local_count = 0;
1282 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1283 if (dlm_our_addr(&sas, i))
1286 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1289 dlm_local_addr[dlm_local_count++] = addr;
1293 /* Initialise SCTP socket and bind to all interfaces */
1294 static int sctp_listen_for_all(void)
1296 struct socket *sock = NULL;
1297 int result = -EINVAL;
1298 struct connection *con = nodeid2con(0, GFP_NOFS);
1299 int bufsize = NEEDED_RMEM;
1305 log_print("Using SCTP for communications");
1307 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1308 SOCK_STREAM, IPPROTO_SCTP, &sock);
1310 log_print("Can't create comms socket, check SCTP is loaded");
1314 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
1315 (char *)&bufsize, sizeof(bufsize));
1317 log_print("Error increasing buffer space on socket %d", result);
1319 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
1322 log_print("Could not set SCTP NODELAY error %d\n", result);
1324 write_lock_bh(&sock->sk->sk_callback_lock);
1325 /* Init con struct */
1326 sock->sk->sk_user_data = con;
1327 save_listen_callbacks(sock);
1329 con->sock->sk->sk_data_ready = lowcomms_data_ready;
1330 con->rx_action = sctp_accept_from_sock;
1331 con->connect_action = sctp_connect_to_sock;
1333 write_unlock_bh(&sock->sk->sk_callback_lock);
1335 /* Bind to all addresses. */
1336 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
1337 goto create_delsock;
1339 result = sock->ops->listen(sock, 5);
1341 log_print("Can't set socket listening");
1342 goto create_delsock;
1354 static int tcp_listen_for_all(void)
1356 struct socket *sock = NULL;
1357 struct connection *con = nodeid2con(0, GFP_NOFS);
1358 int result = -EINVAL;
1363 /* We don't support multi-homed hosts */
1364 if (dlm_local_addr[1] != NULL) {
1365 log_print("TCP protocol can't handle multi-homed hosts, "
1370 log_print("Using TCP for communications");
1372 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1374 add_sock(sock, con);
1378 result = -EADDRINUSE;
1386 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1389 struct writequeue_entry *entry;
1391 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1395 entry->page = alloc_page(allocation);
1410 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1412 struct connection *con;
1413 struct writequeue_entry *e;
1416 con = nodeid2con(nodeid, allocation);
1420 spin_lock(&con->writequeue_lock);
1421 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1422 if ((&e->list == &con->writequeue) ||
1423 (PAGE_SIZE - e->end < len)) {
1430 spin_unlock(&con->writequeue_lock);
1434 *ppc = page_address(e->page) + offset;
1438 e = new_writequeue_entry(con, allocation);
1440 spin_lock(&con->writequeue_lock);
1444 list_add_tail(&e->list, &con->writequeue);
1445 spin_unlock(&con->writequeue_lock);
1451 void dlm_lowcomms_commit_buffer(void *mh)
1453 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1454 struct connection *con = e->con;
1457 spin_lock(&con->writequeue_lock);
1461 e->len = e->end - e->offset;
1462 spin_unlock(&con->writequeue_lock);
1464 queue_work(send_workqueue, &con->swork);
1468 spin_unlock(&con->writequeue_lock);
1472 /* Send a message */
1473 static void send_to_sock(struct connection *con)
1476 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1477 struct writequeue_entry *e;
1481 mutex_lock(&con->sock_mutex);
1482 if (con->sock == NULL)
1485 spin_lock(&con->writequeue_lock);
1487 e = list_entry(con->writequeue.next, struct writequeue_entry,
1489 if ((struct list_head *) e == &con->writequeue)
1494 BUG_ON(len == 0 && e->users == 0);
1495 spin_unlock(&con->writequeue_lock);
1499 ret = kernel_sendpage(con->sock, e->page, offset, len,
1501 if (ret == -EAGAIN || ret == 0) {
1502 if (ret == -EAGAIN &&
1503 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1504 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1505 /* Notify TCP that we're limited by the
1506 * application window size.
1508 set_bit(SOCK_NOSPACE, &con->sock->flags);
1509 con->sock->sk->sk_write_pending++;
1517 /* Don't starve people filling buffers */
1518 if (++count >= MAX_SEND_MSG_COUNT) {
1523 spin_lock(&con->writequeue_lock);
1524 writequeue_entry_complete(e, ret);
1526 spin_unlock(&con->writequeue_lock);
1528 mutex_unlock(&con->sock_mutex);
1532 mutex_unlock(&con->sock_mutex);
1533 close_connection(con, true, false, true);
1534 /* Requeue the send work. When the work daemon runs again, it will try
1535 a new connection, then call this function again. */
1536 queue_work(send_workqueue, &con->swork);
1540 mutex_unlock(&con->sock_mutex);
1541 queue_work(send_workqueue, &con->swork);
1545 static void clean_one_writequeue(struct connection *con)
1547 struct writequeue_entry *e, *safe;
1549 spin_lock(&con->writequeue_lock);
1550 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1554 spin_unlock(&con->writequeue_lock);
1557 /* Called from recovery when it knows that a node has
1559 int dlm_lowcomms_close(int nodeid)
1561 struct connection *con;
1562 struct dlm_node_addr *na;
1564 log_print("closing connection to node %d", nodeid);
1565 con = nodeid2con(nodeid, 0);
1567 set_bit(CF_CLOSE, &con->flags);
1568 close_connection(con, true, true, true);
1569 clean_one_writequeue(con);
1572 spin_lock(&dlm_node_addrs_spin);
1573 na = find_node_addr(nodeid);
1575 list_del(&na->list);
1576 while (na->addr_count--)
1577 kfree(na->addr[na->addr_count]);
1580 spin_unlock(&dlm_node_addrs_spin);
1585 /* Receive workqueue function */
1586 static void process_recv_sockets(struct work_struct *work)
1588 struct connection *con = container_of(work, struct connection, rwork);
1591 clear_bit(CF_READ_PENDING, &con->flags);
1593 err = con->rx_action(con);
1597 /* Send workqueue function */
1598 static void process_send_sockets(struct work_struct *work)
1600 struct connection *con = container_of(work, struct connection, swork);
1602 clear_bit(CF_WRITE_PENDING, &con->flags);
1603 if (con->sock == NULL) /* not mutex protected so check it inside too */
1604 con->connect_action(con);
1605 if (!list_empty(&con->writequeue))
1610 /* Discard all entries on the write queues */
1611 static void clean_writequeues(void)
1613 foreach_conn(clean_one_writequeue);
1616 static void work_stop(void)
1618 destroy_workqueue(recv_workqueue);
1619 destroy_workqueue(send_workqueue);
1622 static int work_start(void)
1624 recv_workqueue = alloc_workqueue("dlm_recv",
1625 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1626 if (!recv_workqueue) {
1627 log_print("can't start dlm_recv");
1631 send_workqueue = alloc_workqueue("dlm_send",
1632 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1633 if (!send_workqueue) {
1634 log_print("can't start dlm_send");
1635 destroy_workqueue(recv_workqueue);
1642 static void _stop_conn(struct connection *con, bool and_other)
1644 mutex_lock(&con->sock_mutex);
1645 set_bit(CF_CLOSE, &con->flags);
1646 set_bit(CF_READ_PENDING, &con->flags);
1647 set_bit(CF_WRITE_PENDING, &con->flags);
1648 if (con->sock && con->sock->sk) {
1649 write_lock_bh(&con->sock->sk->sk_callback_lock);
1650 con->sock->sk->sk_user_data = NULL;
1651 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1653 if (con->othercon && and_other)
1654 _stop_conn(con->othercon, false);
1655 mutex_unlock(&con->sock_mutex);
1658 static void stop_conn(struct connection *con)
1660 _stop_conn(con, true);
1663 static void free_conn(struct connection *con)
1665 close_connection(con, true, true, true);
1667 kmem_cache_free(con_cache, con->othercon);
1668 hlist_del(&con->list);
1669 kmem_cache_free(con_cache, con);
1672 static void work_flush(void)
1676 struct hlist_node *n;
1677 struct connection *con;
1679 flush_workqueue(recv_workqueue);
1680 flush_workqueue(send_workqueue);
1683 foreach_conn(stop_conn);
1684 flush_workqueue(recv_workqueue);
1685 flush_workqueue(send_workqueue);
1686 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1687 hlist_for_each_entry_safe(con, n,
1688 &connection_hash[i], list) {
1689 ok &= test_bit(CF_READ_PENDING, &con->flags);
1690 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1691 if (con->othercon) {
1692 ok &= test_bit(CF_READ_PENDING,
1693 &con->othercon->flags);
1694 ok &= test_bit(CF_WRITE_PENDING,
1695 &con->othercon->flags);
1702 void dlm_lowcomms_stop(void)
1704 /* Set all the flags to prevent any
1707 mutex_lock(&connections_lock);
1709 mutex_unlock(&connections_lock);
1711 clean_writequeues();
1712 foreach_conn(free_conn);
1715 kmem_cache_destroy(con_cache);
1718 int dlm_lowcomms_start(void)
1720 int error = -EINVAL;
1721 struct connection *con;
1724 for (i = 0; i < CONN_HASH_SIZE; i++)
1725 INIT_HLIST_HEAD(&connection_hash[i]);
1728 if (!dlm_local_count) {
1730 log_print("no local IP address has been set");
1735 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1736 __alignof__(struct connection), 0,
1741 error = work_start();
1747 /* Start listening */
1748 if (dlm_config.ci_protocol == 0)
1749 error = tcp_listen_for_all();
1751 error = sctp_listen_for_all();
1759 con = nodeid2con(0,0);
1761 close_connection(con, false, true, true);
1762 kmem_cache_free(con_cache, con);
1765 kmem_cache_destroy(con_cache);
1770 void dlm_lowcomms_exit(void)
1772 struct dlm_node_addr *na, *safe;
1774 spin_lock(&dlm_node_addrs_spin);
1775 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1776 list_del(&na->list);
1777 while (na->addr_count--)
1778 kfree(na->addr[na->addr_count]);
1781 spin_unlock(&dlm_node_addrs_spin);