1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the "low-level" comms layer.
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
22 * be expanded for the cluster infrastructure then that is its
23 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
37 * lowcomms will choose to use either TCP or SCTP as its transport layer
38 * depending on the configuration variable 'protocol'. This should be set
39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
45 #include <asm/ioctls.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
56 #include "dlm_internal.h"
61 #define NEEDED_RMEM (4*1024*1024)
62 #define CONN_HASH_SIZE 32
64 /* Number of messages to send before rescheduling */
65 #define MAX_SEND_MSG_COUNT 25
66 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
69 struct socket *sock; /* NULL if not connected */
70 uint32_t nodeid; /* So we know who we are in the list */
71 struct mutex sock_mutex;
73 #define CF_READ_PENDING 1
74 #define CF_WRITE_PENDING 2
75 #define CF_INIT_PENDING 4
76 #define CF_IS_OTHERCON 5
78 #define CF_APP_LIMITED 7
81 #define CF_CONNECTED 10
82 struct list_head writequeue; /* List of outgoing writequeue_entries */
83 spinlock_t writequeue_lock;
84 void (*connect_action) (struct connection *); /* What to do to connect */
85 void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
87 #define MAX_CONNECT_RETRIES 3
88 struct hlist_node list;
89 struct connection *othercon;
90 struct work_struct rwork; /* Receive workqueue */
91 struct work_struct swork; /* Send workqueue */
92 wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
93 unsigned char *rx_buf;
98 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
100 struct listen_connection {
102 struct work_struct rwork;
105 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
106 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
108 /* An entry waiting to be sent */
109 struct writequeue_entry {
110 struct list_head list;
116 struct connection *con;
119 struct dlm_node_addr {
120 struct list_head list;
125 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
128 static struct listen_sock_callbacks {
129 void (*sk_error_report)(struct sock *);
130 void (*sk_data_ready)(struct sock *);
131 void (*sk_state_change)(struct sock *);
132 void (*sk_write_space)(struct sock *);
135 static LIST_HEAD(dlm_node_addrs);
136 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
138 static struct listen_connection listen_con;
139 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
140 static int dlm_local_count;
144 static struct workqueue_struct *recv_workqueue;
145 static struct workqueue_struct *send_workqueue;
147 static struct hlist_head connection_hash[CONN_HASH_SIZE];
148 static DEFINE_SPINLOCK(connections_lock);
149 DEFINE_STATIC_SRCU(connections_srcu);
151 static void process_recv_sockets(struct work_struct *work);
152 static void process_send_sockets(struct work_struct *work);
154 static void sctp_connect_to_sock(struct connection *con);
155 static void tcp_connect_to_sock(struct connection *con);
156 static void dlm_tcp_shutdown(struct connection *con);
158 /* This is deliberately very simple because most clusters have simple
159 sequential nodeids, so we should be able to go straight to a connection
160 struct in the array */
161 static inline int nodeid_hash(int nodeid)
163 return nodeid & (CONN_HASH_SIZE-1);
166 static struct connection *__find_con(int nodeid)
169 struct connection *con;
171 r = nodeid_hash(nodeid);
173 idx = srcu_read_lock(&connections_srcu);
174 hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
175 if (con->nodeid == nodeid) {
176 srcu_read_unlock(&connections_srcu, idx);
180 srcu_read_unlock(&connections_srcu, idx);
185 static int dlm_con_init(struct connection *con, int nodeid)
187 con->rx_buflen = dlm_config.ci_buffer_size;
188 con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
192 con->nodeid = nodeid;
193 mutex_init(&con->sock_mutex);
194 INIT_LIST_HEAD(&con->writequeue);
195 spin_lock_init(&con->writequeue_lock);
196 INIT_WORK(&con->swork, process_send_sockets);
197 INIT_WORK(&con->rwork, process_recv_sockets);
198 init_waitqueue_head(&con->shutdown_wait);
200 if (dlm_config.ci_protocol == 0) {
201 con->connect_action = tcp_connect_to_sock;
202 con->shutdown_action = dlm_tcp_shutdown;
204 con->connect_action = sctp_connect_to_sock;
211 * If 'allocation' is zero then we don't attempt to create a new
212 * connection structure for this node.
214 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
216 struct connection *con, *tmp;
219 con = __find_con(nodeid);
223 con = kzalloc(sizeof(*con), alloc);
227 ret = dlm_con_init(con, nodeid);
233 r = nodeid_hash(nodeid);
235 spin_lock(&connections_lock);
236 /* Because multiple workqueues/threads calls this function it can
237 * race on multiple cpu's. Instead of locking hot path __find_con()
238 * we just check in rare cases of recently added nodes again
239 * under protection of connections_lock. If this is the case we
240 * abort our connection creation and return the existing connection.
242 tmp = __find_con(nodeid);
244 spin_unlock(&connections_lock);
250 hlist_add_head_rcu(&con->list, &connection_hash[r]);
251 spin_unlock(&connections_lock);
256 /* Loop round all connections */
257 static void foreach_conn(void (*conn_func)(struct connection *c))
260 struct connection *con;
262 idx = srcu_read_lock(&connections_srcu);
263 for (i = 0; i < CONN_HASH_SIZE; i++) {
264 hlist_for_each_entry_rcu(con, &connection_hash[i], list)
267 srcu_read_unlock(&connections_srcu, idx);
270 static struct dlm_node_addr *find_node_addr(int nodeid)
272 struct dlm_node_addr *na;
274 list_for_each_entry(na, &dlm_node_addrs, list) {
275 if (na->nodeid == nodeid)
281 static int addr_compare(const struct sockaddr_storage *x,
282 const struct sockaddr_storage *y)
284 switch (x->ss_family) {
286 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
287 struct sockaddr_in *siny = (struct sockaddr_in *)y;
288 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
290 if (sinx->sin_port != siny->sin_port)
295 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
296 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
297 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
299 if (sinx->sin6_port != siny->sin6_port)
309 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
310 struct sockaddr *sa_out, bool try_new_addr,
313 struct sockaddr_storage sas;
314 struct dlm_node_addr *na;
316 if (!dlm_local_count)
319 spin_lock(&dlm_node_addrs_spin);
320 na = find_node_addr(nodeid);
321 if (na && na->addr_count) {
322 memcpy(&sas, na->addr[na->curr_addr_index],
323 sizeof(struct sockaddr_storage));
326 na->curr_addr_index++;
327 if (na->curr_addr_index == na->addr_count)
328 na->curr_addr_index = 0;
331 spin_unlock(&dlm_node_addrs_spin);
342 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
347 if (dlm_local_addr[0]->ss_family == AF_INET) {
348 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
349 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
350 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
352 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
353 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
354 ret6->sin6_addr = in6->sin6_addr;
360 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
363 struct dlm_node_addr *na;
367 spin_lock(&dlm_node_addrs_spin);
368 list_for_each_entry(na, &dlm_node_addrs, list) {
372 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
373 if (addr_compare(na->addr[addr_i], addr)) {
374 *nodeid = na->nodeid;
382 spin_unlock(&dlm_node_addrs_spin);
386 /* caller need to held dlm_node_addrs_spin lock */
387 static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na,
388 const struct sockaddr_storage *addr)
392 for (i = 0; i < na->addr_count; i++) {
393 if (addr_compare(na->addr[i], addr))
400 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
402 struct sockaddr_storage *new_addr;
403 struct dlm_node_addr *new_node, *na;
406 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
410 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
416 memcpy(new_addr, addr, len);
418 spin_lock(&dlm_node_addrs_spin);
419 na = find_node_addr(nodeid);
421 new_node->nodeid = nodeid;
422 new_node->addr[0] = new_addr;
423 new_node->addr_count = 1;
424 new_node->mark = dlm_config.ci_mark;
425 list_add(&new_node->list, &dlm_node_addrs);
426 spin_unlock(&dlm_node_addrs_spin);
430 ret = dlm_lowcomms_na_has_addr(na, addr);
432 spin_unlock(&dlm_node_addrs_spin);
438 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
439 spin_unlock(&dlm_node_addrs_spin);
445 na->addr[na->addr_count++] = new_addr;
446 spin_unlock(&dlm_node_addrs_spin);
451 /* Data available on socket or listen socket received a connect */
452 static void lowcomms_data_ready(struct sock *sk)
454 struct connection *con;
456 read_lock_bh(&sk->sk_callback_lock);
458 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
459 queue_work(recv_workqueue, &con->rwork);
460 read_unlock_bh(&sk->sk_callback_lock);
463 static void lowcomms_listen_data_ready(struct sock *sk)
465 queue_work(recv_workqueue, &listen_con.rwork);
468 static void lowcomms_write_space(struct sock *sk)
470 struct connection *con;
472 read_lock_bh(&sk->sk_callback_lock);
477 if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
478 log_print("successful connected to node %d", con->nodeid);
479 queue_work(send_workqueue, &con->swork);
483 clear_bit(SOCK_NOSPACE, &con->sock->flags);
485 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
486 con->sock->sk->sk_write_pending--;
487 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
490 queue_work(send_workqueue, &con->swork);
492 read_unlock_bh(&sk->sk_callback_lock);
495 static inline void lowcomms_connect_sock(struct connection *con)
497 if (test_bit(CF_CLOSE, &con->flags))
499 queue_work(send_workqueue, &con->swork);
503 static void lowcomms_state_change(struct sock *sk)
505 /* SCTP layer is not calling sk_data_ready when the connection
506 * is done, so we catch the signal through here. Also, it
507 * doesn't switch socket state when entering shutdown, so we
508 * skip the write in that case.
510 if (sk->sk_shutdown) {
511 if (sk->sk_shutdown == RCV_SHUTDOWN)
512 lowcomms_data_ready(sk);
513 } else if (sk->sk_state == TCP_ESTABLISHED) {
514 lowcomms_write_space(sk);
518 int dlm_lowcomms_connect_node(int nodeid)
520 struct connection *con;
522 if (nodeid == dlm_our_nodeid())
525 con = nodeid2con(nodeid, GFP_NOFS);
528 lowcomms_connect_sock(con);
532 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
534 struct dlm_node_addr *na;
536 spin_lock(&dlm_node_addrs_spin);
537 na = find_node_addr(nodeid);
539 spin_unlock(&dlm_node_addrs_spin);
544 spin_unlock(&dlm_node_addrs_spin);
549 static void lowcomms_error_report(struct sock *sk)
551 struct connection *con;
552 struct sockaddr_storage saddr;
553 void (*orig_report)(struct sock *) = NULL;
555 read_lock_bh(&sk->sk_callback_lock);
560 orig_report = listen_sock.sk_error_report;
561 if (con->sock == NULL ||
562 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) {
563 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
564 "sending to node %d, port %d, "
565 "sk_err=%d/%d\n", dlm_our_nodeid(),
566 con->nodeid, dlm_config.ci_tcp_port,
567 sk->sk_err, sk->sk_err_soft);
568 } else if (saddr.ss_family == AF_INET) {
569 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
571 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
572 "sending to node %d at %pI4, port %d, "
573 "sk_err=%d/%d\n", dlm_our_nodeid(),
574 con->nodeid, &sin4->sin_addr.s_addr,
575 dlm_config.ci_tcp_port, sk->sk_err,
578 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
580 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
581 "sending to node %d at %u.%u.%u.%u, "
582 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
583 con->nodeid, sin6->sin6_addr.s6_addr32[0],
584 sin6->sin6_addr.s6_addr32[1],
585 sin6->sin6_addr.s6_addr32[2],
586 sin6->sin6_addr.s6_addr32[3],
587 dlm_config.ci_tcp_port, sk->sk_err,
591 read_unlock_bh(&sk->sk_callback_lock);
596 /* Note: sk_callback_lock must be locked before calling this function. */
597 static void save_listen_callbacks(struct socket *sock)
599 struct sock *sk = sock->sk;
601 listen_sock.sk_data_ready = sk->sk_data_ready;
602 listen_sock.sk_state_change = sk->sk_state_change;
603 listen_sock.sk_write_space = sk->sk_write_space;
604 listen_sock.sk_error_report = sk->sk_error_report;
607 static void restore_callbacks(struct socket *sock)
609 struct sock *sk = sock->sk;
611 write_lock_bh(&sk->sk_callback_lock);
612 sk->sk_user_data = NULL;
613 sk->sk_data_ready = listen_sock.sk_data_ready;
614 sk->sk_state_change = listen_sock.sk_state_change;
615 sk->sk_write_space = listen_sock.sk_write_space;
616 sk->sk_error_report = listen_sock.sk_error_report;
617 write_unlock_bh(&sk->sk_callback_lock);
620 static void add_listen_sock(struct socket *sock, struct listen_connection *con)
622 struct sock *sk = sock->sk;
624 write_lock_bh(&sk->sk_callback_lock);
625 save_listen_callbacks(sock);
628 sk->sk_user_data = con;
629 sk->sk_allocation = GFP_NOFS;
630 /* Install a data_ready callback */
631 sk->sk_data_ready = lowcomms_listen_data_ready;
632 write_unlock_bh(&sk->sk_callback_lock);
635 /* Make a socket active */
636 static void add_sock(struct socket *sock, struct connection *con)
638 struct sock *sk = sock->sk;
640 write_lock_bh(&sk->sk_callback_lock);
643 sk->sk_user_data = con;
644 /* Install a data_ready callback */
645 sk->sk_data_ready = lowcomms_data_ready;
646 sk->sk_write_space = lowcomms_write_space;
647 sk->sk_state_change = lowcomms_state_change;
648 sk->sk_allocation = GFP_NOFS;
649 sk->sk_error_report = lowcomms_error_report;
650 write_unlock_bh(&sk->sk_callback_lock);
653 /* Add the port number to an IPv6 or 4 sockaddr and return the address
655 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
658 saddr->ss_family = dlm_local_addr[0]->ss_family;
659 if (saddr->ss_family == AF_INET) {
660 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
661 in4_addr->sin_port = cpu_to_be16(port);
662 *addr_len = sizeof(struct sockaddr_in);
663 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
665 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
666 in6_addr->sin6_port = cpu_to_be16(port);
667 *addr_len = sizeof(struct sockaddr_in6);
669 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
672 static void dlm_close_sock(struct socket **sock)
675 restore_callbacks(*sock);
681 /* Close a remote connection and tidy up */
682 static void close_connection(struct connection *con, bool and_other,
685 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
687 if (tx && !closing && cancel_work_sync(&con->swork)) {
688 log_print("canceled swork for node %d", con->nodeid);
689 clear_bit(CF_WRITE_PENDING, &con->flags);
691 if (rx && !closing && cancel_work_sync(&con->rwork)) {
692 log_print("canceled rwork for node %d", con->nodeid);
693 clear_bit(CF_READ_PENDING, &con->flags);
696 mutex_lock(&con->sock_mutex);
697 dlm_close_sock(&con->sock);
699 if (con->othercon && and_other) {
700 /* Will only re-enter once. */
701 close_connection(con->othercon, false, true, true);
704 con->rx_leftover = 0;
706 clear_bit(CF_CONNECTED, &con->flags);
707 mutex_unlock(&con->sock_mutex);
708 clear_bit(CF_CLOSING, &con->flags);
711 static void shutdown_connection(struct connection *con)
715 flush_work(&con->swork);
717 mutex_lock(&con->sock_mutex);
718 /* nothing to shutdown */
720 mutex_unlock(&con->sock_mutex);
724 set_bit(CF_SHUTDOWN, &con->flags);
725 ret = kernel_sock_shutdown(con->sock, SHUT_WR);
726 mutex_unlock(&con->sock_mutex);
728 log_print("Connection %p failed to shutdown: %d will force close",
732 ret = wait_event_timeout(con->shutdown_wait,
733 !test_bit(CF_SHUTDOWN, &con->flags),
734 DLM_SHUTDOWN_WAIT_TIMEOUT);
736 log_print("Connection %p shutdown timed out, will force close",
745 clear_bit(CF_SHUTDOWN, &con->flags);
746 close_connection(con, false, true, true);
749 static void dlm_tcp_shutdown(struct connection *con)
752 shutdown_connection(con->othercon);
753 shutdown_connection(con);
756 static int con_realloc_receive_buf(struct connection *con, int newlen)
758 unsigned char *newbuf;
760 newbuf = kmalloc(newlen, GFP_NOFS);
764 /* copy any leftover from last receive */
765 if (con->rx_leftover)
766 memmove(newbuf, con->rx_buf, con->rx_leftover);
768 /* swap to new buffer space */
770 con->rx_buflen = newlen;
771 con->rx_buf = newbuf;
776 /* Data received from remote end */
777 static int receive_from_sock(struct connection *con)
779 int call_again_soon = 0;
784 mutex_lock(&con->sock_mutex);
786 if (con->sock == NULL) {
791 /* realloc if we get new buffer size to read out */
792 buflen = dlm_config.ci_buffer_size;
793 if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
794 ret = con_realloc_receive_buf(con, buflen);
799 /* calculate new buffer parameter regarding last receive and
800 * possible leftover bytes
802 iov.iov_base = con->rx_buf + con->rx_leftover;
803 iov.iov_len = con->rx_buflen - con->rx_leftover;
805 memset(&msg, 0, sizeof(msg));
806 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
807 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
811 else if (ret == iov.iov_len)
814 /* new buflen according readed bytes and leftover from last receive */
815 buflen = ret + con->rx_leftover;
816 ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
820 /* calculate leftover bytes from process and put it into begin of
821 * the receive buffer, so next receive we have the full message
822 * at the start address of the receive buffer.
824 con->rx_leftover = buflen - ret;
825 if (con->rx_leftover) {
826 memmove(con->rx_buf, con->rx_buf + ret,
828 call_again_soon = true;
834 mutex_unlock(&con->sock_mutex);
838 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
839 queue_work(recv_workqueue, &con->rwork);
840 mutex_unlock(&con->sock_mutex);
844 mutex_unlock(&con->sock_mutex);
845 if (ret != -EAGAIN) {
846 /* Reconnect when there is something to send */
847 close_connection(con, false, true, false);
849 log_print("connection %p got EOF from %d",
851 /* handling for tcp shutdown */
852 clear_bit(CF_SHUTDOWN, &con->flags);
853 wake_up(&con->shutdown_wait);
854 /* signal to breaking receive worker */
861 /* Listening socket is busy, accept a connection */
862 static int accept_from_sock(struct listen_connection *con)
865 struct sockaddr_storage peeraddr;
866 struct socket *newsock;
869 struct connection *newcon;
870 struct connection *addcon;
873 if (!dlm_allow_conn) {
880 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
884 /* Get the connected socket's peer */
885 memset(&peeraddr, 0, sizeof(peeraddr));
886 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
888 result = -ECONNABORTED;
892 /* Get the new node's NODEID */
893 make_sockaddr(&peeraddr, 0, &len);
894 if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
895 unsigned char *b=(unsigned char *)&peeraddr;
896 log_print("connect from non cluster node");
897 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
898 b, sizeof(struct sockaddr_storage));
899 sock_release(newsock);
903 log_print("got connection from %d", nodeid);
905 /* Check to see if we already have a connection to this node. This
906 * could happen if the two nodes initiate a connection at roughly
907 * the same time and the connections cross on the wire.
908 * In this case we store the incoming one in "othercon"
910 newcon = nodeid2con(nodeid, GFP_NOFS);
916 sock_set_mark(newsock->sk, mark);
918 mutex_lock(&newcon->sock_mutex);
920 struct connection *othercon = newcon->othercon;
923 othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
925 log_print("failed to allocate incoming socket");
926 mutex_unlock(&newcon->sock_mutex);
931 result = dlm_con_init(othercon, nodeid);
934 mutex_unlock(&newcon->sock_mutex);
938 lockdep_set_subclass(&othercon->sock_mutex, 1);
939 newcon->othercon = othercon;
941 /* close other sock con if we have something new */
942 close_connection(othercon, false, true, false);
945 mutex_lock(&othercon->sock_mutex);
946 add_sock(newsock, othercon);
948 mutex_unlock(&othercon->sock_mutex);
951 /* accept copies the sk after we've saved the callbacks, so we
952 don't want to save them a second time or comm errors will
953 result in calling sk_error_report recursively. */
954 add_sock(newsock, newcon);
958 set_bit(CF_CONNECTED, &addcon->flags);
959 mutex_unlock(&newcon->sock_mutex);
962 * Add it to the active queue in case we got data
963 * between processing the accept adding the socket
964 * to the read_sockets list
966 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
967 queue_work(recv_workqueue, &addcon->rwork);
973 sock_release(newsock);
975 if (result != -EAGAIN)
976 log_print("error accepting connection from node: %d", result);
980 static void free_entry(struct writequeue_entry *e)
982 __free_page(e->page);
987 * writequeue_entry_complete - try to delete and free write queue entry
988 * @e: write queue entry to try to delete
989 * @completed: bytes completed
991 * writequeue_lock must be held.
993 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
995 e->offset += completed;
998 if (e->len == 0 && e->users == 0) {
1005 * sctp_bind_addrs - bind a SCTP socket to all our addresses
1007 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1009 struct sockaddr_storage localaddr;
1010 struct sockaddr *addr = (struct sockaddr *)&localaddr;
1011 int i, addr_len, result = 0;
1013 for (i = 0; i < dlm_local_count; i++) {
1014 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1015 make_sockaddr(&localaddr, port, &addr_len);
1018 result = kernel_bind(sock, addr, addr_len);
1020 result = sock_bind_add(sock->sk, addr, addr_len);
1023 log_print("Can't bind to %d addr number %d, %d.\n",
1024 port, i + 1, result);
1031 /* Initiate an SCTP association.
1032 This is a special case of send_to_sock() in that we don't yet have a
1033 peeled-off socket for this association, so we use the listening socket
1034 and add the primary IP address of the remote node.
1036 static void sctp_connect_to_sock(struct connection *con)
1038 struct sockaddr_storage daddr;
1041 struct socket *sock;
1044 mutex_lock(&con->sock_mutex);
1046 /* Some odd races can cause double-connects, ignore them */
1047 if (con->retries++ > MAX_CONNECT_RETRIES)
1051 log_print("node %d already connected.", con->nodeid);
1055 memset(&daddr, 0, sizeof(daddr));
1056 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
1058 log_print("no address for nodeid %d", con->nodeid);
1062 /* Create a socket to communicate with */
1063 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1064 SOCK_STREAM, IPPROTO_SCTP, &sock);
1068 sock_set_mark(sock->sk, mark);
1070 add_sock(sock, con);
1072 /* Bind to all addresses. */
1073 if (sctp_bind_addrs(con->sock, 0))
1076 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
1078 log_print("connecting to %d", con->nodeid);
1080 /* Turn off Nagle's algorithm */
1081 sctp_sock_set_nodelay(sock->sk);
1084 * Make sock->ops->connect() function return in specified time,
1085 * since O_NONBLOCK argument in connect() function does not work here,
1086 * then, we should restore the default value of this attribute.
1088 sock_set_sndtimeo(sock->sk, 5);
1089 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1091 sock_set_sndtimeo(sock->sk, 0);
1093 if (result == -EINPROGRESS)
1096 if (!test_and_set_bit(CF_CONNECTED, &con->flags))
1097 log_print("successful connected to node %d", con->nodeid);
1107 * Some errors are fatal and this list might need adjusting. For other
1108 * errors we try again until the max number of retries is reached.
1110 if (result != -EHOSTUNREACH &&
1111 result != -ENETUNREACH &&
1112 result != -ENETDOWN &&
1113 result != -EINVAL &&
1114 result != -EPROTONOSUPPORT) {
1115 log_print("connect %d try %d error %d", con->nodeid,
1116 con->retries, result);
1117 mutex_unlock(&con->sock_mutex);
1119 lowcomms_connect_sock(con);
1124 mutex_unlock(&con->sock_mutex);
1127 /* Connect a new socket to its peer */
1128 static void tcp_connect_to_sock(struct connection *con)
1130 struct sockaddr_storage saddr, src_addr;
1133 struct socket *sock = NULL;
1136 mutex_lock(&con->sock_mutex);
1137 if (con->retries++ > MAX_CONNECT_RETRIES)
1140 /* Some odd races can cause double-connects, ignore them */
1144 /* Create a socket to communicate with */
1145 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1146 SOCK_STREAM, IPPROTO_TCP, &sock);
1150 memset(&saddr, 0, sizeof(saddr));
1151 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
1153 log_print("no address for nodeid %d", con->nodeid);
1157 sock_set_mark(sock->sk, mark);
1159 add_sock(sock, con);
1161 /* Bind to our cluster-known address connecting to avoid
1163 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1164 make_sockaddr(&src_addr, 0, &addr_len);
1165 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1168 log_print("could not bind for connect: %d", result);
1169 /* This *may* not indicate a critical error */
1172 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1174 log_print("connecting to %d", con->nodeid);
1176 /* Turn off Nagle's algorithm */
1177 tcp_sock_set_nodelay(sock->sk);
1179 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1181 if (result == -EINPROGRESS)
1188 sock_release(con->sock);
1194 * Some errors are fatal and this list might need adjusting. For other
1195 * errors we try again until the max number of retries is reached.
1197 if (result != -EHOSTUNREACH &&
1198 result != -ENETUNREACH &&
1199 result != -ENETDOWN &&
1200 result != -EINVAL &&
1201 result != -EPROTONOSUPPORT) {
1202 log_print("connect %d try %d error %d", con->nodeid,
1203 con->retries, result);
1204 mutex_unlock(&con->sock_mutex);
1206 lowcomms_connect_sock(con);
1210 mutex_unlock(&con->sock_mutex);
1214 /* On error caller must run dlm_close_sock() for the
1215 * listen connection socket.
1217 static int tcp_create_listen_sock(struct listen_connection *con,
1218 struct sockaddr_storage *saddr)
1220 struct socket *sock = NULL;
1224 if (dlm_local_addr[0]->ss_family == AF_INET)
1225 addr_len = sizeof(struct sockaddr_in);
1227 addr_len = sizeof(struct sockaddr_in6);
1229 /* Create a socket to communicate with */
1230 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1231 SOCK_STREAM, IPPROTO_TCP, &sock);
1233 log_print("Can't create listening comms socket");
1237 sock_set_mark(sock->sk, dlm_config.ci_mark);
1239 /* Turn off Nagle's algorithm */
1240 tcp_sock_set_nodelay(sock->sk);
1242 sock_set_reuseaddr(sock->sk);
1244 add_listen_sock(sock, con);
1246 /* Bind to our port */
1247 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1248 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1250 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1253 sock_set_keepalive(sock->sk);
1255 result = sock->ops->listen(sock, 5);
1257 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1267 /* Get local addresses */
1268 static void init_local(void)
1270 struct sockaddr_storage sas, *addr;
1273 dlm_local_count = 0;
1274 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1275 if (dlm_our_addr(&sas, i))
1278 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1281 dlm_local_addr[dlm_local_count++] = addr;
1285 static void deinit_local(void)
1289 for (i = 0; i < dlm_local_count; i++)
1290 kfree(dlm_local_addr[i]);
1293 /* Initialise SCTP socket and bind to all interfaces
1294 * On error caller must run dlm_close_sock() for the
1295 * listen connection socket.
1297 static int sctp_listen_for_all(struct listen_connection *con)
1299 struct socket *sock = NULL;
1300 int result = -EINVAL;
1302 log_print("Using SCTP for communications");
1304 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1305 SOCK_STREAM, IPPROTO_SCTP, &sock);
1307 log_print("Can't create comms socket, check SCTP is loaded");
1311 sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1312 sock_set_mark(sock->sk, dlm_config.ci_mark);
1313 sctp_sock_set_nodelay(sock->sk);
1315 add_listen_sock(sock, con);
1317 /* Bind to all addresses. */
1318 result = sctp_bind_addrs(con->sock, dlm_config.ci_tcp_port);
1322 result = sock->ops->listen(sock, 5);
1324 log_print("Can't set socket listening");
1334 static int tcp_listen_for_all(void)
1336 /* We don't support multi-homed hosts */
1337 if (dlm_local_count > 1) {
1338 log_print("TCP protocol can't handle multi-homed hosts, "
1343 log_print("Using TCP for communications");
1345 return tcp_create_listen_sock(&listen_con, dlm_local_addr[0]);
1350 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1353 struct writequeue_entry *entry;
1355 entry = kzalloc(sizeof(*entry), allocation);
1359 entry->page = alloc_page(allocation | __GFP_ZERO);
1371 static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
1372 gfp_t allocation, char **ppc)
1374 struct writequeue_entry *e;
1376 spin_lock(&con->writequeue_lock);
1377 if (!list_empty(&con->writequeue)) {
1378 e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1379 if (DLM_WQ_REMAIN_BYTES(e) >= len) {
1380 *ppc = page_address(e->page) + e->end;
1383 spin_unlock(&con->writequeue_lock);
1388 spin_unlock(&con->writequeue_lock);
1390 e = new_writequeue_entry(con, allocation);
1394 *ppc = page_address(e->page);
1397 spin_lock(&con->writequeue_lock);
1398 list_add_tail(&e->list, &con->writequeue);
1399 spin_unlock(&con->writequeue_lock);
1404 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1406 struct connection *con;
1408 if (len > DEFAULT_BUFFER_SIZE ||
1409 len < sizeof(struct dlm_header)) {
1410 BUILD_BUG_ON(PAGE_SIZE < DEFAULT_BUFFER_SIZE);
1411 log_print("failed to allocate a buffer of size %d", len);
1416 con = nodeid2con(nodeid, allocation);
1420 return new_wq_entry(con, len, allocation, ppc);
1423 void dlm_lowcomms_commit_buffer(void *mh)
1425 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1426 struct connection *con = e->con;
1429 spin_lock(&con->writequeue_lock);
1434 e->len = DLM_WQ_LENGTH_BYTES(e);
1435 spin_unlock(&con->writequeue_lock);
1437 queue_work(send_workqueue, &con->swork);
1441 spin_unlock(&con->writequeue_lock);
1445 /* Send a message */
1446 static void send_to_sock(struct connection *con)
1449 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1450 struct writequeue_entry *e;
1454 mutex_lock(&con->sock_mutex);
1455 if (con->sock == NULL)
1458 spin_lock(&con->writequeue_lock);
1460 if (list_empty(&con->writequeue))
1463 e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
1466 BUG_ON(len == 0 && e->users == 0);
1467 spin_unlock(&con->writequeue_lock);
1471 ret = kernel_sendpage(con->sock, e->page, offset, len,
1473 if (ret == -EAGAIN || ret == 0) {
1474 if (ret == -EAGAIN &&
1475 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1476 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1477 /* Notify TCP that we're limited by the
1478 * application window size.
1480 set_bit(SOCK_NOSPACE, &con->sock->flags);
1481 con->sock->sk->sk_write_pending++;
1489 /* Don't starve people filling buffers */
1490 if (++count >= MAX_SEND_MSG_COUNT) {
1495 spin_lock(&con->writequeue_lock);
1496 writequeue_entry_complete(e, ret);
1498 spin_unlock(&con->writequeue_lock);
1500 mutex_unlock(&con->sock_mutex);
1504 mutex_unlock(&con->sock_mutex);
1505 close_connection(con, false, false, true);
1506 /* Requeue the send work. When the work daemon runs again, it will try
1507 a new connection, then call this function again. */
1508 queue_work(send_workqueue, &con->swork);
1512 mutex_unlock(&con->sock_mutex);
1513 queue_work(send_workqueue, &con->swork);
1517 static void clean_one_writequeue(struct connection *con)
1519 struct writequeue_entry *e, *safe;
1521 spin_lock(&con->writequeue_lock);
1522 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1526 spin_unlock(&con->writequeue_lock);
1529 /* Called from recovery when it knows that a node has
1531 int dlm_lowcomms_close(int nodeid)
1533 struct connection *con;
1534 struct dlm_node_addr *na;
1536 log_print("closing connection to node %d", nodeid);
1537 con = nodeid2con(nodeid, 0);
1539 set_bit(CF_CLOSE, &con->flags);
1540 close_connection(con, true, true, true);
1541 clean_one_writequeue(con);
1543 clean_one_writequeue(con->othercon);
1546 spin_lock(&dlm_node_addrs_spin);
1547 na = find_node_addr(nodeid);
1549 list_del(&na->list);
1550 while (na->addr_count--)
1551 kfree(na->addr[na->addr_count]);
1554 spin_unlock(&dlm_node_addrs_spin);
1559 /* Receive workqueue function */
1560 static void process_recv_sockets(struct work_struct *work)
1562 struct connection *con = container_of(work, struct connection, rwork);
1565 clear_bit(CF_READ_PENDING, &con->flags);
1567 err = receive_from_sock(con);
1571 static void process_listen_recv_socket(struct work_struct *work)
1573 accept_from_sock(&listen_con);
1576 /* Send workqueue function */
1577 static void process_send_sockets(struct work_struct *work)
1579 struct connection *con = container_of(work, struct connection, swork);
1581 clear_bit(CF_WRITE_PENDING, &con->flags);
1582 if (con->sock == NULL) /* not mutex protected so check it inside too */
1583 con->connect_action(con);
1584 if (!list_empty(&con->writequeue))
1588 static void work_stop(void)
1591 destroy_workqueue(recv_workqueue);
1593 destroy_workqueue(send_workqueue);
1596 static int work_start(void)
1598 recv_workqueue = alloc_workqueue("dlm_recv",
1599 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1600 if (!recv_workqueue) {
1601 log_print("can't start dlm_recv");
1605 send_workqueue = alloc_workqueue("dlm_send",
1606 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1607 if (!send_workqueue) {
1608 log_print("can't start dlm_send");
1609 destroy_workqueue(recv_workqueue);
1616 static void shutdown_conn(struct connection *con)
1618 if (con->shutdown_action)
1619 con->shutdown_action(con);
1622 void dlm_lowcomms_shutdown(void)
1624 /* Set all the flags to prevent any
1630 flush_workqueue(recv_workqueue);
1632 flush_workqueue(send_workqueue);
1634 dlm_close_sock(&listen_con.sock);
1636 foreach_conn(shutdown_conn);
1639 static void _stop_conn(struct connection *con, bool and_other)
1641 mutex_lock(&con->sock_mutex);
1642 set_bit(CF_CLOSE, &con->flags);
1643 set_bit(CF_READ_PENDING, &con->flags);
1644 set_bit(CF_WRITE_PENDING, &con->flags);
1645 if (con->sock && con->sock->sk) {
1646 write_lock_bh(&con->sock->sk->sk_callback_lock);
1647 con->sock->sk->sk_user_data = NULL;
1648 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1650 if (con->othercon && and_other)
1651 _stop_conn(con->othercon, false);
1652 mutex_unlock(&con->sock_mutex);
1655 static void stop_conn(struct connection *con)
1657 _stop_conn(con, true);
1660 static void connection_release(struct rcu_head *rcu)
1662 struct connection *con = container_of(rcu, struct connection, rcu);
1668 static void free_conn(struct connection *con)
1670 close_connection(con, true, true, true);
1671 spin_lock(&connections_lock);
1672 hlist_del_rcu(&con->list);
1673 spin_unlock(&connections_lock);
1674 if (con->othercon) {
1675 clean_one_writequeue(con->othercon);
1676 call_srcu(&connections_srcu, &con->othercon->rcu,
1677 connection_release);
1679 clean_one_writequeue(con);
1680 call_srcu(&connections_srcu, &con->rcu, connection_release);
1683 static void work_flush(void)
1687 struct connection *con;
1691 foreach_conn(stop_conn);
1693 flush_workqueue(recv_workqueue);
1695 flush_workqueue(send_workqueue);
1696 idx = srcu_read_lock(&connections_srcu);
1697 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1698 hlist_for_each_entry_rcu(con, &connection_hash[i],
1700 ok &= test_bit(CF_READ_PENDING, &con->flags);
1701 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1702 if (con->othercon) {
1703 ok &= test_bit(CF_READ_PENDING,
1704 &con->othercon->flags);
1705 ok &= test_bit(CF_WRITE_PENDING,
1706 &con->othercon->flags);
1710 srcu_read_unlock(&connections_srcu, idx);
1714 void dlm_lowcomms_stop(void)
1717 foreach_conn(free_conn);
1722 int dlm_lowcomms_start(void)
1724 int error = -EINVAL;
1727 for (i = 0; i < CONN_HASH_SIZE; i++)
1728 INIT_HLIST_HEAD(&connection_hash[i]);
1731 if (!dlm_local_count) {
1733 log_print("no local IP address has been set");
1737 INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1739 error = work_start();
1745 /* Start listening */
1746 if (dlm_config.ci_protocol == 0)
1747 error = tcp_listen_for_all();
1749 error = sctp_listen_for_all(&listen_con);
1757 dlm_close_sock(&listen_con.sock);
1762 void dlm_lowcomms_exit(void)
1764 struct dlm_node_addr *na, *safe;
1766 spin_lock(&dlm_node_addrs_spin);
1767 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1768 list_del(&na->list);
1769 while (na->addr_count--)
1770 kfree(na->addr[na->addr_count]);
1773 spin_unlock(&dlm_node_addrs_spin);