1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/crc32c.h>
5 #include <linux/ctype.h>
6 #include <linux/highmem.h>
7 #include <linux/inet.h>
8 #include <linux/kthread.h>
10 #include <linux/nsproxy.h>
11 #include <linux/sched/mm.h>
12 #include <linux/slab.h>
13 #include <linux/socket.h>
14 #include <linux/string.h>
16 #include <linux/bio.h>
17 #endif /* CONFIG_BLOCK */
18 #include <linux/dns_resolver.h>
21 #include <linux/ceph/ceph_features.h>
22 #include <linux/ceph/libceph.h>
23 #include <linux/ceph/messenger.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/pagelist.h>
26 #include <linux/export.h>
29 * Ceph uses the messenger to exchange ceph_msg messages with other
30 * hosts in the system. The messenger provides ordered and reliable
31 * delivery. We tolerate TCP disconnects by reconnecting (with
32 * exponential backoff) in the case of a fault (disconnection, bad
33 * crc, protocol error). Acks allow sent messages to be discarded by
38 * We track the state of the socket on a given connection using
39 * values defined below. The transition to a new socket state is
40 * handled by a function which verifies we aren't coming from an
44 * | NEW* | transient initial state
46 * | con_sock_state_init()
49 * | CLOSED | initialized, but no socket (and no
50 * ---------- TCP connection)
52 * | \ con_sock_state_connecting()
53 * | ----------------------
55 * + con_sock_state_closed() \
56 * |+--------------------------- \
59 * | | CLOSING | socket event; \ \
60 * | ----------- await close \ \
63 * | + con_sock_state_closing() \ |
65 * | / --------------- | |
68 * | / -----------------| CONNECTING | socket created, TCP
69 * | | / -------------- connect initiated
70 * | | | con_sock_state_connected()
73 * | CONNECTED | TCP connection established
76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
96 * ceph_connection flag bits
98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
99 * messages on errors */
100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
105 static bool con_flag_valid(unsigned long con_flag)
108 case CON_FLAG_LOSSYTX:
109 case CON_FLAG_KEEPALIVE_PENDING:
110 case CON_FLAG_WRITE_PENDING:
111 case CON_FLAG_SOCK_CLOSED:
112 case CON_FLAG_BACKOFF:
119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
121 BUG_ON(!con_flag_valid(con_flag));
123 clear_bit(con_flag, &con->flags);
126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
128 BUG_ON(!con_flag_valid(con_flag));
130 set_bit(con_flag, &con->flags);
133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
135 BUG_ON(!con_flag_valid(con_flag));
137 return test_bit(con_flag, &con->flags);
140 static bool con_flag_test_and_clear(struct ceph_connection *con,
141 unsigned long con_flag)
143 BUG_ON(!con_flag_valid(con_flag));
145 return test_and_clear_bit(con_flag, &con->flags);
148 static bool con_flag_test_and_set(struct ceph_connection *con,
149 unsigned long con_flag)
151 BUG_ON(!con_flag_valid(con_flag));
153 return test_and_set_bit(con_flag, &con->flags);
156 /* Slab caches for frequently-allocated structures */
158 static struct kmem_cache *ceph_msg_cache;
160 /* static tag bytes (protocol control messages) */
161 static char tag_msg = CEPH_MSGR_TAG_MSG;
162 static char tag_ack = CEPH_MSGR_TAG_ACK;
163 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
164 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
166 #ifdef CONFIG_LOCKDEP
167 static struct lock_class_key socket_class;
170 static void queue_con(struct ceph_connection *con);
171 static void cancel_con(struct ceph_connection *con);
172 static void ceph_con_workfn(struct work_struct *);
173 static void con_fault(struct ceph_connection *con);
176 * Nicely render a sockaddr as a string. An array of formatted
177 * strings is used, to approximate reentrancy.
179 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
180 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
181 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
182 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
184 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
185 static atomic_t addr_str_seq = ATOMIC_INIT(0);
187 static struct page *zero_page; /* used in certain error cases */
189 const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
193 struct sockaddr_storage ss = addr->in_addr; /* align */
194 struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
195 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
197 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
200 switch (ss.ss_family) {
202 snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
203 le32_to_cpu(addr->type), &in4->sin_addr,
204 ntohs(in4->sin_port));
208 snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
209 le32_to_cpu(addr->type), &in6->sin6_addr,
210 ntohs(in6->sin6_port));
214 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
220 EXPORT_SYMBOL(ceph_pr_addr);
222 static void encode_my_addr(struct ceph_messenger *msgr)
224 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
225 ceph_encode_banner_addr(&msgr->my_enc_addr);
229 * work queue for all reading and writing to/from the socket.
231 static struct workqueue_struct *ceph_msgr_wq;
233 static int ceph_msgr_slab_init(void)
235 BUG_ON(ceph_msg_cache);
236 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
243 static void ceph_msgr_slab_exit(void)
245 BUG_ON(!ceph_msg_cache);
246 kmem_cache_destroy(ceph_msg_cache);
247 ceph_msg_cache = NULL;
250 static void _ceph_msgr_exit(void)
253 destroy_workqueue(ceph_msgr_wq);
257 BUG_ON(zero_page == NULL);
261 ceph_msgr_slab_exit();
264 int __init ceph_msgr_init(void)
266 if (ceph_msgr_slab_init())
269 BUG_ON(zero_page != NULL);
270 zero_page = ZERO_PAGE(0);
274 * The number of active work items is limited by the number of
275 * connections, so leave @max_active at default.
277 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
281 pr_err("msgr_init failed to create workqueue\n");
287 void ceph_msgr_exit(void)
289 BUG_ON(ceph_msgr_wq == NULL);
294 void ceph_msgr_flush(void)
296 flush_workqueue(ceph_msgr_wq);
298 EXPORT_SYMBOL(ceph_msgr_flush);
300 /* Connection socket state transition functions */
302 static void con_sock_state_init(struct ceph_connection *con)
306 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
307 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
308 printk("%s: unexpected old state %d\n", __func__, old_state);
309 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
310 CON_SOCK_STATE_CLOSED);
313 static void con_sock_state_connecting(struct ceph_connection *con)
317 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
318 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
319 printk("%s: unexpected old state %d\n", __func__, old_state);
320 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
321 CON_SOCK_STATE_CONNECTING);
324 static void con_sock_state_connected(struct ceph_connection *con)
328 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
329 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
330 printk("%s: unexpected old state %d\n", __func__, old_state);
331 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
332 CON_SOCK_STATE_CONNECTED);
335 static void con_sock_state_closing(struct ceph_connection *con)
339 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
340 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
341 old_state != CON_SOCK_STATE_CONNECTED &&
342 old_state != CON_SOCK_STATE_CLOSING))
343 printk("%s: unexpected old state %d\n", __func__, old_state);
344 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
345 CON_SOCK_STATE_CLOSING);
348 static void con_sock_state_closed(struct ceph_connection *con)
352 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
353 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
354 old_state != CON_SOCK_STATE_CLOSING &&
355 old_state != CON_SOCK_STATE_CONNECTING &&
356 old_state != CON_SOCK_STATE_CLOSED))
357 printk("%s: unexpected old state %d\n", __func__, old_state);
358 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
359 CON_SOCK_STATE_CLOSED);
363 * socket callback functions
366 /* data available on socket, or listen socket received a connect */
367 static void ceph_sock_data_ready(struct sock *sk)
369 struct ceph_connection *con = sk->sk_user_data;
370 if (atomic_read(&con->msgr->stopping)) {
374 if (sk->sk_state != TCP_CLOSE_WAIT) {
375 dout("%s on %p state = %lu, queueing work\n", __func__,
381 /* socket has buffer space for writing */
382 static void ceph_sock_write_space(struct sock *sk)
384 struct ceph_connection *con = sk->sk_user_data;
386 /* only queue to workqueue if there is data we want to write,
387 * and there is sufficient space in the socket buffer to accept
388 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
389 * doesn't get called again until try_write() fills the socket
390 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
391 * and net/core/stream.c:sk_stream_write_space().
393 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
394 if (sk_stream_is_writeable(sk)) {
395 dout("%s %p queueing write work\n", __func__, con);
396 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
400 dout("%s %p nothing to write\n", __func__, con);
404 /* socket's state has changed */
405 static void ceph_sock_state_change(struct sock *sk)
407 struct ceph_connection *con = sk->sk_user_data;
409 dout("%s %p state = %lu sk_state = %u\n", __func__,
410 con, con->state, sk->sk_state);
412 switch (sk->sk_state) {
414 dout("%s TCP_CLOSE\n", __func__);
417 dout("%s TCP_CLOSE_WAIT\n", __func__);
418 con_sock_state_closing(con);
419 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
422 case TCP_ESTABLISHED:
423 dout("%s TCP_ESTABLISHED\n", __func__);
424 con_sock_state_connected(con);
427 default: /* Everything else is uninteresting */
433 * set up socket callbacks
435 static void set_sock_callbacks(struct socket *sock,
436 struct ceph_connection *con)
438 struct sock *sk = sock->sk;
439 sk->sk_user_data = con;
440 sk->sk_data_ready = ceph_sock_data_ready;
441 sk->sk_write_space = ceph_sock_write_space;
442 sk->sk_state_change = ceph_sock_state_change;
451 * initiate connection to a remote socket.
453 static int ceph_tcp_connect(struct ceph_connection *con)
455 struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
457 unsigned int noio_flag;
462 /* sock_create_kern() allocates with GFP_KERNEL */
463 noio_flag = memalloc_noio_save();
464 ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
465 SOCK_STREAM, IPPROTO_TCP, &sock);
466 memalloc_noio_restore(noio_flag);
469 sock->sk->sk_allocation = GFP_NOFS;
471 #ifdef CONFIG_LOCKDEP
472 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
475 set_sock_callbacks(sock, con);
477 dout("connect %s\n", ceph_pr_addr(&con->peer_addr));
479 con_sock_state_connecting(con);
480 ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
482 if (ret == -EINPROGRESS) {
483 dout("connect %s EINPROGRESS sk_state = %u\n",
484 ceph_pr_addr(&con->peer_addr),
486 } else if (ret < 0) {
487 pr_err("connect %s error %d\n",
488 ceph_pr_addr(&con->peer_addr), ret);
493 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
494 tcp_sock_set_nodelay(sock->sk);
501 * If @buf is NULL, discard up to @len bytes.
503 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
505 struct kvec iov = {buf, len};
506 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
510 msg.msg_flags |= MSG_TRUNC;
512 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
513 r = sock_recvmsg(sock, &msg, msg.msg_flags);
519 static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
520 int page_offset, size_t length)
522 struct bio_vec bvec = {
524 .bv_offset = page_offset,
527 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
530 BUG_ON(page_offset + length > PAGE_SIZE);
531 iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
532 r = sock_recvmsg(sock, &msg, msg.msg_flags);
539 * write something. @more is true if caller will be sending more data
542 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
543 size_t kvlen, size_t len, bool more)
545 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
549 msg.msg_flags |= MSG_MORE;
551 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
553 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
560 * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST
562 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
563 int offset, size_t size, int more)
565 ssize_t (*sendpage)(struct socket *sock, struct page *page,
566 int offset, size_t size, int flags);
567 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
571 * sendpage cannot properly handle pages with page_count == 0,
572 * we need to fall back to sendmsg if that's the case.
574 * Same goes for slab pages: skb_can_coalesce() allows
575 * coalescing neighboring slab objects into a single frag which
576 * triggers one of hardened usercopy checks.
578 if (sendpage_ok(page))
579 sendpage = sock->ops->sendpage;
581 sendpage = sock_no_sendpage;
583 ret = sendpage(sock, page, offset, size, flags);
591 * Shutdown/close the socket for the given connection.
593 static int con_close_socket(struct ceph_connection *con)
597 dout("con_close_socket on %p sock %p\n", con, con->sock);
599 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
600 sock_release(con->sock);
605 * Forcibly clear the SOCK_CLOSED flag. It gets set
606 * independent of the connection mutex, and we could have
607 * received a socket close event before we had the chance to
608 * shut the socket down.
610 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
612 con_sock_state_closed(con);
616 static void ceph_con_reset_protocol(struct ceph_connection *con)
618 dout("%s con %p\n", __func__, con);
620 con_close_socket(con);
622 WARN_ON(con->in_msg->con != con);
623 ceph_msg_put(con->in_msg);
627 WARN_ON(con->out_msg->con != con);
628 ceph_msg_put(con->out_msg);
636 * Reset a connection. Discard all incoming and outgoing messages
637 * and clear *_seq state.
639 static void ceph_msg_remove(struct ceph_msg *msg)
641 list_del_init(&msg->list_head);
645 static void ceph_msg_remove_list(struct list_head *head)
647 while (!list_empty(head)) {
648 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
650 ceph_msg_remove(msg);
654 static void ceph_con_reset_session(struct ceph_connection *con)
656 dout("%s con %p\n", __func__, con);
658 WARN_ON(con->in_msg);
659 WARN_ON(con->out_msg);
660 ceph_msg_remove_list(&con->out_queue);
661 ceph_msg_remove_list(&con->out_sent);
664 con->in_seq_acked = 0;
666 con->connect_seq = 0;
667 con->peer_global_seq = 0;
671 * mark a peer down. drop any open connections.
673 void ceph_con_close(struct ceph_connection *con)
675 mutex_lock(&con->mutex);
676 dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
677 con->state = CON_STATE_CLOSED;
679 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
680 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
681 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
682 con_flag_clear(con, CON_FLAG_BACKOFF);
684 ceph_con_reset_protocol(con);
685 ceph_con_reset_session(con);
687 mutex_unlock(&con->mutex);
689 EXPORT_SYMBOL(ceph_con_close);
692 * Reopen a closed connection, with a new peer address.
694 void ceph_con_open(struct ceph_connection *con,
695 __u8 entity_type, __u64 entity_num,
696 struct ceph_entity_addr *addr)
698 mutex_lock(&con->mutex);
699 dout("con_open %p %s\n", con, ceph_pr_addr(addr));
701 WARN_ON(con->state != CON_STATE_CLOSED);
702 con->state = CON_STATE_PREOPEN;
704 con->peer_name.type = (__u8) entity_type;
705 con->peer_name.num = cpu_to_le64(entity_num);
707 memcpy(&con->peer_addr, addr, sizeof(*addr));
708 con->delay = 0; /* reset backoff memory */
709 mutex_unlock(&con->mutex);
712 EXPORT_SYMBOL(ceph_con_open);
715 * return true if this connection ever successfully opened
717 bool ceph_con_opened(struct ceph_connection *con)
719 return con->connect_seq > 0;
723 * initialize a new connection.
725 void ceph_con_init(struct ceph_connection *con, void *private,
726 const struct ceph_connection_operations *ops,
727 struct ceph_messenger *msgr)
729 dout("con_init %p\n", con);
730 memset(con, 0, sizeof(*con));
731 con->private = private;
735 con_sock_state_init(con);
737 mutex_init(&con->mutex);
738 INIT_LIST_HEAD(&con->out_queue);
739 INIT_LIST_HEAD(&con->out_sent);
740 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
742 con->state = CON_STATE_CLOSED;
744 EXPORT_SYMBOL(ceph_con_init);
748 * We maintain a global counter to order connection attempts. Get
749 * a unique seq greater than @gt.
751 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
755 spin_lock(&msgr->global_seq_lock);
756 if (msgr->global_seq < gt)
757 msgr->global_seq = gt;
758 ret = ++msgr->global_seq;
759 spin_unlock(&msgr->global_seq_lock);
764 * Discard messages that have been acked by the server.
766 static void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
768 struct ceph_msg *msg;
771 dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
772 while (!list_empty(&con->out_sent)) {
773 msg = list_first_entry(&con->out_sent, struct ceph_msg,
775 WARN_ON(msg->needs_out_seq);
776 seq = le64_to_cpu(msg->hdr.seq);
780 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
782 ceph_msg_remove(msg);
787 * Discard messages that have been requeued in con_fault(), up to
788 * reconnect_seq. This avoids gratuitously resending messages that
789 * the server had received and handled prior to reconnect.
791 static void ceph_con_discard_requeued(struct ceph_connection *con,
794 struct ceph_msg *msg;
797 dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
798 while (!list_empty(&con->out_queue)) {
799 msg = list_first_entry(&con->out_queue, struct ceph_msg,
801 if (msg->needs_out_seq)
803 seq = le64_to_cpu(msg->hdr.seq);
804 if (seq > reconnect_seq)
807 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
809 ceph_msg_remove(msg);
813 static void con_out_kvec_reset(struct ceph_connection *con)
815 BUG_ON(con->out_skip);
817 con->out_kvec_left = 0;
818 con->out_kvec_bytes = 0;
819 con->out_kvec_cur = &con->out_kvec[0];
822 static void con_out_kvec_add(struct ceph_connection *con,
823 size_t size, void *data)
825 int index = con->out_kvec_left;
827 BUG_ON(con->out_skip);
828 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
830 con->out_kvec[index].iov_len = size;
831 con->out_kvec[index].iov_base = data;
832 con->out_kvec_left++;
833 con->out_kvec_bytes += size;
837 * Chop off a kvec from the end. Return residual number of bytes for
838 * that kvec, i.e. how many bytes would have been written if the kvec
841 static int con_out_kvec_skip(struct ceph_connection *con)
843 int off = con->out_kvec_cur - con->out_kvec;
846 if (con->out_kvec_bytes > 0) {
847 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
848 BUG_ON(con->out_kvec_bytes < skip);
849 BUG_ON(!con->out_kvec_left);
850 con->out_kvec_bytes -= skip;
851 con->out_kvec_left--;
860 * For a bio data item, a piece is whatever remains of the next
861 * entry in the current bio iovec, or the first entry in the next
864 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
867 struct ceph_msg_data *data = cursor->data;
868 struct ceph_bio_iter *it = &cursor->bio_iter;
870 cursor->resid = min_t(size_t, length, data->bio_length);
872 if (cursor->resid < it->iter.bi_size)
873 it->iter.bi_size = cursor->resid;
875 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
876 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
879 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
883 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
884 cursor->bio_iter.iter);
886 *page_offset = bv.bv_offset;
891 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
894 struct ceph_bio_iter *it = &cursor->bio_iter;
895 struct page *page = bio_iter_page(it->bio, it->iter);
897 BUG_ON(bytes > cursor->resid);
898 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
899 cursor->resid -= bytes;
900 bio_advance_iter(it->bio, &it->iter, bytes);
902 if (!cursor->resid) {
903 BUG_ON(!cursor->last_piece);
904 return false; /* no more data */
907 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
908 page == bio_iter_page(it->bio, it->iter)))
909 return false; /* more bytes to process in this segment */
911 if (!it->iter.bi_size) {
912 it->bio = it->bio->bi_next;
913 it->iter = it->bio->bi_iter;
914 if (cursor->resid < it->iter.bi_size)
915 it->iter.bi_size = cursor->resid;
918 BUG_ON(cursor->last_piece);
919 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
920 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter);
923 #endif /* CONFIG_BLOCK */
925 static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
928 struct ceph_msg_data *data = cursor->data;
929 struct bio_vec *bvecs = data->bvec_pos.bvecs;
931 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
932 cursor->bvec_iter = data->bvec_pos.iter;
933 cursor->bvec_iter.bi_size = cursor->resid;
935 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
937 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
940 static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
944 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
947 *page_offset = bv.bv_offset;
952 static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
955 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
956 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
958 BUG_ON(bytes > cursor->resid);
959 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
960 cursor->resid -= bytes;
961 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
963 if (!cursor->resid) {
964 BUG_ON(!cursor->last_piece);
965 return false; /* no more data */
968 if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
969 page == bvec_iter_page(bvecs, cursor->bvec_iter)))
970 return false; /* more bytes to process in this segment */
972 BUG_ON(cursor->last_piece);
973 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
975 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter);
980 * For a page array, a piece comes from the first page in the array
981 * that has not already been fully consumed.
983 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
986 struct ceph_msg_data *data = cursor->data;
989 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
991 BUG_ON(!data->pages);
992 BUG_ON(!data->length);
994 cursor->resid = min(length, data->length);
995 page_count = calc_pages_for(data->alignment, (u64)data->length);
996 cursor->page_offset = data->alignment & ~PAGE_MASK;
997 cursor->page_index = 0;
998 BUG_ON(page_count > (int)USHRT_MAX);
999 cursor->page_count = (unsigned short)page_count;
1000 BUG_ON(length > SIZE_MAX - cursor->page_offset);
1001 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
1004 static struct page *
1005 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
1006 size_t *page_offset, size_t *length)
1008 struct ceph_msg_data *data = cursor->data;
1010 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
1012 BUG_ON(cursor->page_index >= cursor->page_count);
1013 BUG_ON(cursor->page_offset >= PAGE_SIZE);
1015 *page_offset = cursor->page_offset;
1016 if (cursor->last_piece)
1017 *length = cursor->resid;
1019 *length = PAGE_SIZE - *page_offset;
1021 return data->pages[cursor->page_index];
1024 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
1027 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
1029 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
1031 /* Advance the cursor page offset */
1033 cursor->resid -= bytes;
1034 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
1035 if (!bytes || cursor->page_offset)
1036 return false; /* more bytes to process in the current page */
1039 return false; /* no more data */
1041 /* Move on to the next page; offset is already at 0 */
1043 BUG_ON(cursor->page_index >= cursor->page_count);
1044 cursor->page_index++;
1045 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1051 * For a pagelist, a piece is whatever remains to be consumed in the
1052 * first page in the list, or the front of the next page.
1055 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
1058 struct ceph_msg_data *data = cursor->data;
1059 struct ceph_pagelist *pagelist;
1062 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1064 pagelist = data->pagelist;
1068 return; /* pagelist can be assigned but empty */
1070 BUG_ON(list_empty(&pagelist->head));
1071 page = list_first_entry(&pagelist->head, struct page, lru);
1073 cursor->resid = min(length, pagelist->length);
1074 cursor->page = page;
1076 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1079 static struct page *
1080 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1081 size_t *page_offset, size_t *length)
1083 struct ceph_msg_data *data = cursor->data;
1084 struct ceph_pagelist *pagelist;
1086 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1088 pagelist = data->pagelist;
1091 BUG_ON(!cursor->page);
1092 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1094 /* offset of first page in pagelist is always 0 */
1095 *page_offset = cursor->offset & ~PAGE_MASK;
1096 if (cursor->last_piece)
1097 *length = cursor->resid;
1099 *length = PAGE_SIZE - *page_offset;
1101 return cursor->page;
1104 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
1107 struct ceph_msg_data *data = cursor->data;
1108 struct ceph_pagelist *pagelist;
1110 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1112 pagelist = data->pagelist;
1115 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1116 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1118 /* Advance the cursor offset */
1120 cursor->resid -= bytes;
1121 cursor->offset += bytes;
1122 /* offset of first page in pagelist is always 0 */
1123 if (!bytes || cursor->offset & ~PAGE_MASK)
1124 return false; /* more bytes to process in the current page */
1127 return false; /* no more data */
1129 /* Move on to the next page */
1131 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1132 cursor->page = list_next_entry(cursor->page, lru);
1133 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1139 * Message data is handled (sent or received) in pieces, where each
1140 * piece resides on a single page. The network layer might not
1141 * consume an entire piece at once. A data item's cursor keeps
1142 * track of which piece is next to process and how much remains to
1143 * be processed in that piece. It also tracks whether the current
1144 * piece is the last one in the data item.
1146 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1148 size_t length = cursor->total_resid;
1150 switch (cursor->data->type) {
1151 case CEPH_MSG_DATA_PAGELIST:
1152 ceph_msg_data_pagelist_cursor_init(cursor, length);
1154 case CEPH_MSG_DATA_PAGES:
1155 ceph_msg_data_pages_cursor_init(cursor, length);
1158 case CEPH_MSG_DATA_BIO:
1159 ceph_msg_data_bio_cursor_init(cursor, length);
1161 #endif /* CONFIG_BLOCK */
1162 case CEPH_MSG_DATA_BVECS:
1163 ceph_msg_data_bvecs_cursor_init(cursor, length);
1165 case CEPH_MSG_DATA_NONE:
1170 cursor->need_crc = true;
1173 static void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1174 struct ceph_msg *msg, size_t length)
1177 BUG_ON(length > msg->data_length);
1178 BUG_ON(!msg->num_data_items);
1180 cursor->total_resid = length;
1181 cursor->data = msg->data;
1183 __ceph_msg_data_cursor_init(cursor);
1187 * Return the page containing the next piece to process for a given
1188 * data item, and supply the page offset and length of that piece.
1189 * Indicate whether this is the last piece in this data item.
1191 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1192 size_t *page_offset, size_t *length,
1197 switch (cursor->data->type) {
1198 case CEPH_MSG_DATA_PAGELIST:
1199 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1201 case CEPH_MSG_DATA_PAGES:
1202 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1205 case CEPH_MSG_DATA_BIO:
1206 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1208 #endif /* CONFIG_BLOCK */
1209 case CEPH_MSG_DATA_BVECS:
1210 page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1212 case CEPH_MSG_DATA_NONE:
1219 BUG_ON(*page_offset + *length > PAGE_SIZE);
1221 BUG_ON(*length > cursor->resid);
1223 *last_piece = cursor->last_piece;
1229 * Returns true if the result moves the cursor on to the next piece
1232 static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1237 BUG_ON(bytes > cursor->resid);
1238 switch (cursor->data->type) {
1239 case CEPH_MSG_DATA_PAGELIST:
1240 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1242 case CEPH_MSG_DATA_PAGES:
1243 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1246 case CEPH_MSG_DATA_BIO:
1247 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1249 #endif /* CONFIG_BLOCK */
1250 case CEPH_MSG_DATA_BVECS:
1251 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1253 case CEPH_MSG_DATA_NONE:
1258 cursor->total_resid -= bytes;
1260 if (!cursor->resid && cursor->total_resid) {
1261 WARN_ON(!cursor->last_piece);
1263 __ceph_msg_data_cursor_init(cursor);
1266 cursor->need_crc = new_piece;
1269 static size_t sizeof_footer(struct ceph_connection *con)
1271 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1272 sizeof(struct ceph_msg_footer) :
1273 sizeof(struct ceph_msg_footer_old);
1276 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1278 /* Initialize data cursor */
1280 ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
1284 * Prepare footer for currently outgoing message, and finish things
1285 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1287 static void prepare_write_message_footer(struct ceph_connection *con)
1289 struct ceph_msg *m = con->out_msg;
1291 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1293 dout("prepare_write_message_footer %p\n", con);
1294 con_out_kvec_add(con, sizeof_footer(con), &m->footer);
1295 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1296 if (con->ops->sign_message)
1297 con->ops->sign_message(m);
1301 m->old_footer.flags = m->footer.flags;
1303 con->out_more = m->more_to_follow;
1304 con->out_msg_done = true;
1308 * Prepare headers for the next outgoing message.
1310 static void prepare_write_message(struct ceph_connection *con)
1315 con_out_kvec_reset(con);
1316 con->out_msg_done = false;
1318 /* Sneak an ack in there first? If we can get it into the same
1319 * TCP packet that's a good thing. */
1320 if (con->in_seq > con->in_seq_acked) {
1321 con->in_seq_acked = con->in_seq;
1322 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1323 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1324 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1325 &con->out_temp_ack);
1328 BUG_ON(list_empty(&con->out_queue));
1329 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1331 BUG_ON(m->con != con);
1333 /* put message on sent list */
1335 list_move_tail(&m->list_head, &con->out_sent);
1338 * only assign outgoing seq # if we haven't sent this message
1339 * yet. if it is requeued, resend with it's original seq.
1341 if (m->needs_out_seq) {
1342 m->hdr.seq = cpu_to_le64(++con->out_seq);
1343 m->needs_out_seq = false;
1345 if (con->ops->reencode_message)
1346 con->ops->reencode_message(m);
1349 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1350 m, con->out_seq, le16_to_cpu(m->hdr.type),
1351 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1353 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len));
1354 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
1356 /* tag + hdr + front + middle */
1357 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1358 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
1359 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1362 con_out_kvec_add(con, m->middle->vec.iov_len,
1363 m->middle->vec.iov_base);
1365 /* fill in hdr crc and finalize hdr */
1366 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1367 con->out_msg->hdr.crc = cpu_to_le32(crc);
1368 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
1370 /* fill in front and middle crc, footer */
1371 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1372 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1374 crc = crc32c(0, m->middle->vec.iov_base,
1375 m->middle->vec.iov_len);
1376 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1378 con->out_msg->footer.middle_crc = 0;
1379 dout("%s front_crc %u middle_crc %u\n", __func__,
1380 le32_to_cpu(con->out_msg->footer.front_crc),
1381 le32_to_cpu(con->out_msg->footer.middle_crc));
1382 con->out_msg->footer.flags = 0;
1384 /* is there a data payload? */
1385 con->out_msg->footer.data_crc = 0;
1386 if (m->data_length) {
1387 prepare_message_data(con->out_msg, m->data_length);
1388 con->out_more = 1; /* data + footer will follow */
1390 /* no, queue up footer too and be done */
1391 prepare_write_message_footer(con);
1394 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1400 static void prepare_write_ack(struct ceph_connection *con)
1402 dout("prepare_write_ack %p %llu -> %llu\n", con,
1403 con->in_seq_acked, con->in_seq);
1404 con->in_seq_acked = con->in_seq;
1406 con_out_kvec_reset(con);
1408 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1410 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1411 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1412 &con->out_temp_ack);
1414 con->out_more = 1; /* more will follow.. eventually.. */
1415 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1419 * Prepare to share the seq during handshake
1421 static void prepare_write_seq(struct ceph_connection *con)
1423 dout("prepare_write_seq %p %llu -> %llu\n", con,
1424 con->in_seq_acked, con->in_seq);
1425 con->in_seq_acked = con->in_seq;
1427 con_out_kvec_reset(con);
1429 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1430 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1431 &con->out_temp_ack);
1433 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1437 * Prepare to write keepalive byte.
1439 static void prepare_write_keepalive(struct ceph_connection *con)
1441 dout("prepare_write_keepalive %p\n", con);
1442 con_out_kvec_reset(con);
1443 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1444 struct timespec64 now;
1446 ktime_get_real_ts64(&now);
1447 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
1448 ceph_encode_timespec64(&con->out_temp_keepalive2, &now);
1449 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1450 &con->out_temp_keepalive2);
1452 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1454 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1458 * Connection negotiation.
1461 static int get_connect_authorizer(struct ceph_connection *con)
1463 struct ceph_auth_handshake *auth;
1466 if (!con->ops->get_authorizer) {
1468 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1469 con->out_connect.authorizer_len = 0;
1473 auth = con->ops->get_authorizer(con, &auth_proto, con->auth_retry);
1475 return PTR_ERR(auth);
1478 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1479 con->out_connect.authorizer_len = cpu_to_le32(auth->authorizer_buf_len);
1484 * We connected to a peer and are saying hello.
1486 static void prepare_write_banner(struct ceph_connection *con)
1488 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1489 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1490 &con->msgr->my_enc_addr);
1493 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1496 static void __prepare_write_connect(struct ceph_connection *con)
1498 con_out_kvec_add(con, sizeof(con->out_connect), &con->out_connect);
1500 con_out_kvec_add(con, con->auth->authorizer_buf_len,
1501 con->auth->authorizer_buf);
1504 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1507 static int prepare_write_connect(struct ceph_connection *con)
1509 unsigned int global_seq = get_global_seq(con->msgr, 0);
1513 switch (con->peer_name.type) {
1514 case CEPH_ENTITY_TYPE_MON:
1515 proto = CEPH_MONC_PROTOCOL;
1517 case CEPH_ENTITY_TYPE_OSD:
1518 proto = CEPH_OSDC_PROTOCOL;
1520 case CEPH_ENTITY_TYPE_MDS:
1521 proto = CEPH_MDSC_PROTOCOL;
1527 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1528 con->connect_seq, global_seq, proto);
1530 con->out_connect.features =
1531 cpu_to_le64(from_msgr(con->msgr)->supported_features);
1532 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1533 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1534 con->out_connect.global_seq = cpu_to_le32(global_seq);
1535 con->out_connect.protocol_version = cpu_to_le32(proto);
1536 con->out_connect.flags = 0;
1538 ret = get_connect_authorizer(con);
1542 __prepare_write_connect(con);
1547 * write as much of pending kvecs to the socket as we can.
1549 * 0 -> socket full, but more to do
1552 static int write_partial_kvec(struct ceph_connection *con)
1556 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1557 while (con->out_kvec_bytes > 0) {
1558 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1559 con->out_kvec_left, con->out_kvec_bytes,
1563 con->out_kvec_bytes -= ret;
1564 if (con->out_kvec_bytes == 0)
1567 /* account for full iov entries consumed */
1568 while (ret >= con->out_kvec_cur->iov_len) {
1569 BUG_ON(!con->out_kvec_left);
1570 ret -= con->out_kvec_cur->iov_len;
1571 con->out_kvec_cur++;
1572 con->out_kvec_left--;
1574 /* and for a partially-consumed entry */
1576 con->out_kvec_cur->iov_len -= ret;
1577 con->out_kvec_cur->iov_base += ret;
1580 con->out_kvec_left = 0;
1583 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1584 con->out_kvec_bytes, con->out_kvec_left, ret);
1585 return ret; /* done! */
1588 static u32 ceph_crc32c_page(u32 crc, struct page *page,
1589 unsigned int page_offset,
1590 unsigned int length)
1595 BUG_ON(kaddr == NULL);
1596 crc = crc32c(crc, kaddr + page_offset, length);
1602 * Write as much message data payload as we can. If we finish, queue
1604 * 1 -> done, footer is now queued in out_kvec[].
1605 * 0 -> socket full, but more to do
1608 static int write_partial_message_data(struct ceph_connection *con)
1610 struct ceph_msg *msg = con->out_msg;
1611 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1612 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
1613 int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
1616 dout("%s %p msg %p\n", __func__, con, msg);
1618 if (!msg->num_data_items)
1622 * Iterate through each page that contains data to be
1623 * written, and send as much as possible for each.
1625 * If we are calculating the data crc (the default), we will
1626 * need to map the page. If we have no pages, they have
1627 * been revoked, so use the zero page.
1629 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
1630 while (cursor->total_resid) {
1636 if (!cursor->resid) {
1637 ceph_msg_data_advance(cursor, 0);
1641 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
1642 if (length == cursor->total_resid)
1644 ret = ceph_tcp_sendpage(con->sock, page, page_offset, length,
1648 msg->footer.data_crc = cpu_to_le32(crc);
1652 if (do_datacrc && cursor->need_crc)
1653 crc = ceph_crc32c_page(crc, page, page_offset, length);
1654 ceph_msg_data_advance(cursor, (size_t)ret);
1657 dout("%s %p msg %p done\n", __func__, con, msg);
1659 /* prepare and queue up footer, too */
1661 msg->footer.data_crc = cpu_to_le32(crc);
1663 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1664 con_out_kvec_reset(con);
1665 prepare_write_message_footer(con);
1667 return 1; /* must return > 0 to indicate success */
1673 static int write_partial_skip(struct ceph_connection *con)
1675 int more = MSG_MORE | MSG_SENDPAGE_NOTLAST;
1678 dout("%s %p %d left\n", __func__, con, con->out_skip);
1679 while (con->out_skip > 0) {
1680 size_t size = min(con->out_skip, (int) PAGE_SIZE);
1682 if (size == con->out_skip)
1684 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, more);
1687 con->out_skip -= ret;
1695 * Prepare to read connection handshake, or an ack.
1697 static void prepare_read_banner(struct ceph_connection *con)
1699 dout("prepare_read_banner %p\n", con);
1700 con->in_base_pos = 0;
1703 static void prepare_read_connect(struct ceph_connection *con)
1705 dout("prepare_read_connect %p\n", con);
1706 con->in_base_pos = 0;
1709 static void prepare_read_ack(struct ceph_connection *con)
1711 dout("prepare_read_ack %p\n", con);
1712 con->in_base_pos = 0;
1715 static void prepare_read_seq(struct ceph_connection *con)
1717 dout("prepare_read_seq %p\n", con);
1718 con->in_base_pos = 0;
1719 con->in_tag = CEPH_MSGR_TAG_SEQ;
1722 static void prepare_read_tag(struct ceph_connection *con)
1724 dout("prepare_read_tag %p\n", con);
1725 con->in_base_pos = 0;
1726 con->in_tag = CEPH_MSGR_TAG_READY;
1729 static void prepare_read_keepalive_ack(struct ceph_connection *con)
1731 dout("prepare_read_keepalive_ack %p\n", con);
1732 con->in_base_pos = 0;
1736 * Prepare to read a message.
1738 static int prepare_read_message(struct ceph_connection *con)
1740 dout("prepare_read_message %p\n", con);
1741 BUG_ON(con->in_msg != NULL);
1742 con->in_base_pos = 0;
1743 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1748 static int read_partial(struct ceph_connection *con,
1749 int end, int size, void *object)
1751 while (con->in_base_pos < end) {
1752 int left = end - con->in_base_pos;
1753 int have = size - left;
1754 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1757 con->in_base_pos += ret;
1764 * Read all or part of the connect-side handshake on a new connection
1766 static int read_partial_banner(struct ceph_connection *con)
1772 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1775 size = strlen(CEPH_BANNER);
1777 ret = read_partial(con, end, size, con->in_banner);
1781 size = sizeof (con->actual_peer_addr);
1783 ret = read_partial(con, end, size, &con->actual_peer_addr);
1786 ceph_decode_banner_addr(&con->actual_peer_addr);
1788 size = sizeof (con->peer_addr_for_me);
1790 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1793 ceph_decode_banner_addr(&con->peer_addr_for_me);
1799 static int read_partial_connect(struct ceph_connection *con)
1805 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1807 size = sizeof (con->in_reply);
1809 ret = read_partial(con, end, size, &con->in_reply);
1814 size = le32_to_cpu(con->in_reply.authorizer_len);
1815 if (size > con->auth->authorizer_reply_buf_len) {
1816 pr_err("authorizer reply too big: %d > %zu\n", size,
1817 con->auth->authorizer_reply_buf_len);
1823 ret = read_partial(con, end, size,
1824 con->auth->authorizer_reply_buf);
1829 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1830 con, (int)con->in_reply.tag,
1831 le32_to_cpu(con->in_reply.connect_seq),
1832 le32_to_cpu(con->in_reply.global_seq));
1838 * Verify the hello banner looks okay.
1840 static int verify_hello(struct ceph_connection *con)
1842 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1843 pr_err("connect to %s got bad banner\n",
1844 ceph_pr_addr(&con->peer_addr));
1845 con->error_msg = "protocol error, bad banner";
1851 static bool addr_is_blank(struct ceph_entity_addr *addr)
1853 struct sockaddr_storage ss = addr->in_addr; /* align */
1854 struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1855 struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
1857 switch (ss.ss_family) {
1859 return addr4->s_addr == htonl(INADDR_ANY);
1861 return ipv6_addr_any(addr6);
1867 static int addr_port(struct ceph_entity_addr *addr)
1869 switch (get_unaligned(&addr->in_addr.ss_family)) {
1871 return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
1873 return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
1878 static void addr_set_port(struct ceph_entity_addr *addr, int p)
1880 switch (get_unaligned(&addr->in_addr.ss_family)) {
1882 put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
1885 put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
1891 * Unlike other *_pton function semantics, zero indicates success.
1893 static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
1894 char delim, const char **ipend)
1896 memset(&addr->in_addr, 0, sizeof(addr->in_addr));
1898 if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1899 put_unaligned(AF_INET, &addr->in_addr.ss_family);
1903 if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1904 put_unaligned(AF_INET6, &addr->in_addr.ss_family);
1912 * Extract hostname string and resolve using kernel DNS facility.
1914 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1915 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1916 struct ceph_entity_addr *addr, char delim, const char **ipend)
1918 const char *end, *delim_p;
1919 char *colon_p, *ip_addr = NULL;
1923 * The end of the hostname occurs immediately preceding the delimiter or
1924 * the port marker (':') where the delimiter takes precedence.
1926 delim_p = memchr(name, delim, namelen);
1927 colon_p = memchr(name, ':', namelen);
1929 if (delim_p && colon_p)
1930 end = delim_p < colon_p ? delim_p : colon_p;
1931 else if (!delim_p && colon_p)
1935 if (!end) /* case: hostname:/ */
1936 end = name + namelen;
1942 /* do dns_resolve upcall */
1943 ip_len = dns_query(current->nsproxy->net_ns,
1944 NULL, name, end - name, NULL, &ip_addr, NULL, false);
1946 ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
1954 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1955 ret, ret ? "failed" : ceph_pr_addr(addr));
1960 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1961 struct ceph_entity_addr *addr, char delim, const char **ipend)
1968 * Parse a server name (IP or hostname). If a valid IP address is not found
1969 * then try to extract a hostname to resolve using userspace DNS upcall.
1971 static int ceph_parse_server_name(const char *name, size_t namelen,
1972 struct ceph_entity_addr *addr, char delim, const char **ipend)
1976 ret = ceph_pton(name, namelen, addr, delim, ipend);
1978 ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
1984 * Parse an ip[:port] list into an addr array. Use the default
1985 * monitor port if a port isn't specified.
1987 int ceph_parse_ips(const char *c, const char *end,
1988 struct ceph_entity_addr *addr,
1989 int max_count, int *count)
1991 int i, ret = -EINVAL;
1994 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1995 for (i = 0; i < max_count; i++) {
2005 ret = ceph_parse_server_name(p, end - p, &addr[i], delim, &ipend);
2014 dout("missing matching ']'\n");
2021 if (p < end && *p == ':') {
2024 while (p < end && *p >= '0' && *p <= '9') {
2025 port = (port * 10) + (*p - '0');
2029 port = CEPH_MON_PORT;
2030 else if (port > 65535)
2033 port = CEPH_MON_PORT;
2036 addr_set_port(&addr[i], port);
2037 addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
2039 dout("parse_ips got %s\n", ceph_pr_addr(&addr[i]));
2059 static int process_banner(struct ceph_connection *con)
2061 dout("process_banner on %p\n", con);
2063 if (verify_hello(con) < 0)
2067 * Make sure the other end is who we wanted. note that the other
2068 * end may not yet know their ip address, so if it's 0.0.0.0, give
2069 * them the benefit of the doubt.
2071 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
2072 sizeof(con->peer_addr)) != 0 &&
2073 !(addr_is_blank(&con->actual_peer_addr) &&
2074 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
2075 pr_warn("wrong peer, want %s/%u, got %s/%u\n",
2076 ceph_pr_addr(&con->peer_addr),
2077 le32_to_cpu(con->peer_addr.nonce),
2078 ceph_pr_addr(&con->actual_peer_addr),
2079 le32_to_cpu(con->actual_peer_addr.nonce));
2080 con->error_msg = "wrong peer at address";
2085 * did we learn our address?
2087 if (addr_is_blank(&con->msgr->inst.addr)) {
2088 int port = addr_port(&con->msgr->inst.addr);
2090 memcpy(&con->msgr->inst.addr.in_addr,
2091 &con->peer_addr_for_me.in_addr,
2092 sizeof(con->peer_addr_for_me.in_addr));
2093 addr_set_port(&con->msgr->inst.addr, port);
2094 encode_my_addr(con->msgr);
2095 dout("process_banner learned my addr is %s\n",
2096 ceph_pr_addr(&con->msgr->inst.addr));
2102 static int process_connect(struct ceph_connection *con)
2104 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2105 u64 req_feat = from_msgr(con->msgr)->required_features;
2106 u64 server_feat = le64_to_cpu(con->in_reply.features);
2109 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2112 int len = le32_to_cpu(con->in_reply.authorizer_len);
2115 * Any connection that defines ->get_authorizer()
2116 * should also define ->add_authorizer_challenge() and
2117 * ->verify_authorizer_reply().
2119 * See get_connect_authorizer().
2121 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
2122 ret = con->ops->add_authorizer_challenge(
2123 con, con->auth->authorizer_reply_buf, len);
2127 con_out_kvec_reset(con);
2128 __prepare_write_connect(con);
2129 prepare_read_connect(con);
2134 ret = con->ops->verify_authorizer_reply(con);
2136 con->error_msg = "bad authorize reply";
2142 switch (con->in_reply.tag) {
2143 case CEPH_MSGR_TAG_FEATURES:
2144 pr_err("%s%lld %s feature set mismatch,"
2145 " my %llx < server's %llx, missing %llx\n",
2146 ENTITY_NAME(con->peer_name),
2147 ceph_pr_addr(&con->peer_addr),
2148 sup_feat, server_feat, server_feat & ~sup_feat);
2149 con->error_msg = "missing required protocol features";
2152 case CEPH_MSGR_TAG_BADPROTOVER:
2153 pr_err("%s%lld %s protocol version mismatch,"
2154 " my %d != server's %d\n",
2155 ENTITY_NAME(con->peer_name),
2156 ceph_pr_addr(&con->peer_addr),
2157 le32_to_cpu(con->out_connect.protocol_version),
2158 le32_to_cpu(con->in_reply.protocol_version));
2159 con->error_msg = "protocol version mismatch";
2162 case CEPH_MSGR_TAG_BADAUTHORIZER:
2164 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2166 if (con->auth_retry == 2) {
2167 con->error_msg = "connect authorization failure";
2170 con_out_kvec_reset(con);
2171 ret = prepare_write_connect(con);
2174 prepare_read_connect(con);
2177 case CEPH_MSGR_TAG_RESETSESSION:
2179 * If we connected with a large connect_seq but the peer
2180 * has no record of a session with us (no connection, or
2181 * connect_seq == 0), they will send RESETSESION to indicate
2182 * that they must have reset their session, and may have
2185 dout("process_connect got RESET peer seq %u\n",
2186 le32_to_cpu(con->in_reply.connect_seq));
2187 pr_info("%s%lld %s session reset\n",
2188 ENTITY_NAME(con->peer_name),
2189 ceph_pr_addr(&con->peer_addr));
2190 ceph_con_reset_session(con);
2191 con_out_kvec_reset(con);
2192 ret = prepare_write_connect(con);
2195 prepare_read_connect(con);
2197 /* Tell ceph about it. */
2198 mutex_unlock(&con->mutex);
2199 if (con->ops->peer_reset)
2200 con->ops->peer_reset(con);
2201 mutex_lock(&con->mutex);
2202 if (con->state != CON_STATE_NEGOTIATING)
2206 case CEPH_MSGR_TAG_RETRY_SESSION:
2208 * If we sent a smaller connect_seq than the peer has, try
2209 * again with a larger value.
2211 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2212 le32_to_cpu(con->out_connect.connect_seq),
2213 le32_to_cpu(con->in_reply.connect_seq));
2214 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2215 con_out_kvec_reset(con);
2216 ret = prepare_write_connect(con);
2219 prepare_read_connect(con);
2222 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2224 * If we sent a smaller global_seq than the peer has, try
2225 * again with a larger value.
2227 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2228 con->peer_global_seq,
2229 le32_to_cpu(con->in_reply.global_seq));
2230 get_global_seq(con->msgr,
2231 le32_to_cpu(con->in_reply.global_seq));
2232 con_out_kvec_reset(con);
2233 ret = prepare_write_connect(con);
2236 prepare_read_connect(con);
2239 case CEPH_MSGR_TAG_SEQ:
2240 case CEPH_MSGR_TAG_READY:
2241 if (req_feat & ~server_feat) {
2242 pr_err("%s%lld %s protocol feature mismatch,"
2243 " my required %llx > server's %llx, need %llx\n",
2244 ENTITY_NAME(con->peer_name),
2245 ceph_pr_addr(&con->peer_addr),
2246 req_feat, server_feat, req_feat & ~server_feat);
2247 con->error_msg = "missing required protocol features";
2251 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2252 con->state = CON_STATE_OPEN;
2253 con->auth_retry = 0; /* we authenticated; clear flag */
2254 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2256 con->peer_features = server_feat;
2257 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2258 con->peer_global_seq,
2259 le32_to_cpu(con->in_reply.connect_seq),
2261 WARN_ON(con->connect_seq !=
2262 le32_to_cpu(con->in_reply.connect_seq));
2264 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2265 con_flag_set(con, CON_FLAG_LOSSYTX);
2267 con->delay = 0; /* reset backoff memory */
2269 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2270 prepare_write_seq(con);
2271 prepare_read_seq(con);
2273 prepare_read_tag(con);
2277 case CEPH_MSGR_TAG_WAIT:
2279 * If there is a connection race (we are opening
2280 * connections to each other), one of us may just have
2281 * to WAIT. This shouldn't happen if we are the
2284 con->error_msg = "protocol error, got WAIT as client";
2288 con->error_msg = "protocol error, garbage tag during connect";
2296 * read (part of) an ack
2298 static int read_partial_ack(struct ceph_connection *con)
2300 int size = sizeof (con->in_temp_ack);
2303 return read_partial(con, end, size, &con->in_temp_ack);
2307 * We can finally discard anything that's been acked.
2309 static void process_ack(struct ceph_connection *con)
2311 u64 ack = le64_to_cpu(con->in_temp_ack);
2313 if (con->in_tag == CEPH_MSGR_TAG_ACK)
2314 ceph_con_discard_sent(con, ack);
2316 ceph_con_discard_requeued(con, ack);
2318 prepare_read_tag(con);
2322 static int read_partial_message_section(struct ceph_connection *con,
2323 struct kvec *section,
2324 unsigned int sec_len, u32 *crc)
2330 while (section->iov_len < sec_len) {
2331 BUG_ON(section->iov_base == NULL);
2332 left = sec_len - section->iov_len;
2333 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2334 section->iov_len, left);
2337 section->iov_len += ret;
2339 if (section->iov_len == sec_len)
2340 *crc = crc32c(0, section->iov_base, section->iov_len);
2345 static int read_partial_msg_data(struct ceph_connection *con)
2347 struct ceph_msg *msg = con->in_msg;
2348 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2349 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2356 if (!msg->num_data_items)
2360 crc = con->in_data_crc;
2361 while (cursor->total_resid) {
2362 if (!cursor->resid) {
2363 ceph_msg_data_advance(cursor, 0);
2367 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
2368 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2371 con->in_data_crc = crc;
2377 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2378 ceph_msg_data_advance(cursor, (size_t)ret);
2381 con->in_data_crc = crc;
2383 return 1; /* must return > 0 to indicate success */
2387 * read (part of) a message.
2389 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2391 static int read_partial_message(struct ceph_connection *con)
2393 struct ceph_msg *m = con->in_msg;
2397 unsigned int front_len, middle_len, data_len;
2398 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2399 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2403 dout("read_partial_message con %p msg %p\n", con, m);
2406 size = sizeof (con->in_hdr);
2408 ret = read_partial(con, end, size, &con->in_hdr);
2412 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2413 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2414 pr_err("read_partial_message bad hdr crc %u != expected %u\n",
2415 crc, con->in_hdr.crc);
2419 front_len = le32_to_cpu(con->in_hdr.front_len);
2420 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2422 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2423 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2425 data_len = le32_to_cpu(con->in_hdr.data_len);
2426 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2430 seq = le64_to_cpu(con->in_hdr.seq);
2431 if ((s64)seq - (s64)con->in_seq < 1) {
2432 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2433 ENTITY_NAME(con->peer_name),
2434 ceph_pr_addr(&con->peer_addr),
2435 seq, con->in_seq + 1);
2436 con->in_base_pos = -front_len - middle_len - data_len -
2438 con->in_tag = CEPH_MSGR_TAG_READY;
2440 } else if ((s64)seq - (s64)con->in_seq > 1) {
2441 pr_err("read_partial_message bad seq %lld expected %lld\n",
2442 seq, con->in_seq + 1);
2443 con->error_msg = "bad message sequence # for incoming message";
2447 /* allocate message? */
2451 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2452 front_len, data_len);
2453 ret = ceph_con_in_msg_alloc(con, &skip);
2457 BUG_ON(!con->in_msg ^ skip);
2459 /* skip this message */
2460 dout("alloc_msg said skip message\n");
2461 con->in_base_pos = -front_len - middle_len - data_len -
2463 con->in_tag = CEPH_MSGR_TAG_READY;
2468 BUG_ON(!con->in_msg);
2469 BUG_ON(con->in_msg->con != con);
2471 m->front.iov_len = 0; /* haven't read it yet */
2473 m->middle->vec.iov_len = 0;
2475 /* prepare for data payload, if any */
2478 prepare_message_data(con->in_msg, data_len);
2482 ret = read_partial_message_section(con, &m->front, front_len,
2483 &con->in_front_crc);
2489 ret = read_partial_message_section(con, &m->middle->vec,
2491 &con->in_middle_crc);
2498 ret = read_partial_msg_data(con);
2504 size = sizeof_footer(con);
2506 ret = read_partial(con, end, size, &m->footer);
2511 m->footer.flags = m->old_footer.flags;
2515 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2516 m, front_len, m->footer.front_crc, middle_len,
2517 m->footer.middle_crc, data_len, m->footer.data_crc);
2520 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2521 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2522 m, con->in_front_crc, m->footer.front_crc);
2525 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2526 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2527 m, con->in_middle_crc, m->footer.middle_crc);
2531 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2532 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2533 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2534 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2538 if (need_sign && con->ops->check_message_signature &&
2539 con->ops->check_message_signature(m)) {
2540 pr_err("read_partial_message %p signature check failed\n", m);
2544 return 1; /* done! */
2548 * Process message. This happens in the worker thread. The callback should
2549 * be careful not to do anything that waits on other incoming messages or it
2552 static void process_message(struct ceph_connection *con)
2554 struct ceph_msg *msg = con->in_msg;
2556 BUG_ON(con->in_msg->con != con);
2559 /* if first message, set peer_name */
2560 if (con->peer_name.type == 0)
2561 con->peer_name = msg->hdr.src;
2564 mutex_unlock(&con->mutex);
2566 dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
2567 msg, le64_to_cpu(msg->hdr.seq),
2568 ENTITY_NAME(msg->hdr.src),
2569 le16_to_cpu(msg->hdr.type),
2570 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2571 le32_to_cpu(msg->hdr.front_len),
2572 le32_to_cpu(msg->hdr.middle_len),
2573 le32_to_cpu(msg->hdr.data_len),
2574 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2575 con->ops->dispatch(con, msg);
2577 mutex_lock(&con->mutex);
2580 static int read_keepalive_ack(struct ceph_connection *con)
2582 struct ceph_timespec ceph_ts;
2583 size_t size = sizeof(ceph_ts);
2584 int ret = read_partial(con, size, size, &ceph_ts);
2587 ceph_decode_timespec64(&con->last_keepalive_ack, &ceph_ts);
2588 prepare_read_tag(con);
2593 * Write something to the socket. Called in a worker thread when the
2594 * socket appears to be writeable and we have something ready to send.
2596 static int try_write(struct ceph_connection *con)
2600 dout("try_write start %p state %lu\n", con, con->state);
2601 if (con->state != CON_STATE_PREOPEN &&
2602 con->state != CON_STATE_CONNECTING &&
2603 con->state != CON_STATE_NEGOTIATING &&
2604 con->state != CON_STATE_OPEN)
2607 /* open the socket first? */
2608 if (con->state == CON_STATE_PREOPEN) {
2610 con->state = CON_STATE_CONNECTING;
2612 con_out_kvec_reset(con);
2613 prepare_write_banner(con);
2614 prepare_read_banner(con);
2616 BUG_ON(con->in_msg);
2617 con->in_tag = CEPH_MSGR_TAG_READY;
2618 dout("try_write initiating connect on %p new state %lu\n",
2620 ret = ceph_tcp_connect(con);
2622 con->error_msg = "connect error";
2628 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2631 /* kvec data queued? */
2632 if (con->out_kvec_left) {
2633 ret = write_partial_kvec(con);
2637 if (con->out_skip) {
2638 ret = write_partial_skip(con);
2645 if (con->out_msg_done) {
2646 ceph_msg_put(con->out_msg);
2647 con->out_msg = NULL; /* we're done with this one */
2651 ret = write_partial_message_data(con);
2653 goto more; /* we need to send the footer, too! */
2657 dout("try_write write_partial_message_data err %d\n",
2664 if (con->state == CON_STATE_OPEN) {
2665 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2666 prepare_write_keepalive(con);
2669 /* is anything else pending? */
2670 if (!list_empty(&con->out_queue)) {
2671 prepare_write_message(con);
2674 if (con->in_seq > con->in_seq_acked) {
2675 prepare_write_ack(con);
2680 /* Nothing to do! */
2681 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2682 dout("try_write nothing else to write.\n");
2685 dout("try_write done on %p ret %d\n", con, ret);
2690 * Read what we can from the socket.
2692 static int try_read(struct ceph_connection *con)
2697 dout("try_read start on %p state %lu\n", con, con->state);
2698 if (con->state != CON_STATE_CONNECTING &&
2699 con->state != CON_STATE_NEGOTIATING &&
2700 con->state != CON_STATE_OPEN)
2705 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2708 if (con->state == CON_STATE_CONNECTING) {
2709 dout("try_read connecting\n");
2710 ret = read_partial_banner(con);
2713 ret = process_banner(con);
2717 con->state = CON_STATE_NEGOTIATING;
2720 * Received banner is good, exchange connection info.
2721 * Do not reset out_kvec, as sending our banner raced
2722 * with receiving peer banner after connect completed.
2724 ret = prepare_write_connect(con);
2727 prepare_read_connect(con);
2729 /* Send connection info before awaiting response */
2733 if (con->state == CON_STATE_NEGOTIATING) {
2734 dout("try_read negotiating\n");
2735 ret = read_partial_connect(con);
2738 ret = process_connect(con);
2744 WARN_ON(con->state != CON_STATE_OPEN);
2746 if (con->in_base_pos < 0) {
2748 * skipping + discarding content.
2750 ret = ceph_tcp_recvmsg(con->sock, NULL, -con->in_base_pos);
2753 dout("skipped %d / %d bytes\n", ret, -con->in_base_pos);
2754 con->in_base_pos += ret;
2755 if (con->in_base_pos)
2758 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2762 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2765 dout("try_read got tag %d\n", (int)con->in_tag);
2766 switch (con->in_tag) {
2767 case CEPH_MSGR_TAG_MSG:
2768 prepare_read_message(con);
2770 case CEPH_MSGR_TAG_ACK:
2771 prepare_read_ack(con);
2773 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2774 prepare_read_keepalive_ack(con);
2776 case CEPH_MSGR_TAG_CLOSE:
2777 con_close_socket(con);
2778 con->state = CON_STATE_CLOSED;
2784 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2785 ret = read_partial_message(con);
2789 con->error_msg = "bad crc/signature";
2795 con->error_msg = "io error";
2800 if (con->in_tag == CEPH_MSGR_TAG_READY)
2802 process_message(con);
2803 if (con->state == CON_STATE_OPEN)
2804 prepare_read_tag(con);
2807 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2808 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2810 * the final handshake seq exchange is semantically
2811 * equivalent to an ACK
2813 ret = read_partial_ack(con);
2819 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2820 ret = read_keepalive_ack(con);
2827 dout("try_read done on %p ret %d\n", con, ret);
2831 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2832 con->error_msg = "protocol error, garbage tag";
2839 * Atomically queue work on a connection after the specified delay.
2840 * Bump @con reference to avoid races with connection teardown.
2841 * Returns 0 if work was queued, or an error code otherwise.
2843 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2845 if (!con->ops->get(con)) {
2846 dout("%s %p ref count 0\n", __func__, con);
2851 delay = round_jiffies_relative(delay);
2853 dout("%s %p %lu\n", __func__, con, delay);
2854 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2855 dout("%s %p - already queued\n", __func__, con);
2863 static void queue_con(struct ceph_connection *con)
2865 (void) queue_con_delay(con, 0);
2868 static void cancel_con(struct ceph_connection *con)
2870 if (cancel_delayed_work(&con->work)) {
2871 dout("%s %p\n", __func__, con);
2876 static bool con_sock_closed(struct ceph_connection *con)
2878 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2882 case CON_STATE_ ## x: \
2883 con->error_msg = "socket closed (con state " #x ")"; \
2886 switch (con->state) {
2894 pr_warn("%s con %p unrecognized state %lu\n",
2895 __func__, con, con->state);
2896 con->error_msg = "unrecognized con state";
2905 static bool con_backoff(struct ceph_connection *con)
2909 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2912 ret = queue_con_delay(con, con->delay);
2914 dout("%s: con %p FAILED to back off %lu\n", __func__,
2916 BUG_ON(ret == -ENOENT);
2917 con_flag_set(con, CON_FLAG_BACKOFF);
2923 /* Finish fault handling; con->mutex must *not* be held here */
2925 static void con_fault_finish(struct ceph_connection *con)
2927 dout("%s %p\n", __func__, con);
2930 * in case we faulted due to authentication, invalidate our
2931 * current tickets so that we can get new ones.
2933 if (con->auth_retry) {
2934 dout("auth_retry %d, invalidating\n", con->auth_retry);
2935 if (con->ops->invalidate_authorizer)
2936 con->ops->invalidate_authorizer(con);
2937 con->auth_retry = 0;
2940 if (con->ops->fault)
2941 con->ops->fault(con);
2945 * Do some work on a connection. Drop a connection ref when we're done.
2947 static void ceph_con_workfn(struct work_struct *work)
2949 struct ceph_connection *con = container_of(work, struct ceph_connection,
2953 mutex_lock(&con->mutex);
2957 if ((fault = con_sock_closed(con))) {
2958 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2961 if (con_backoff(con)) {
2962 dout("%s: con %p BACKOFF\n", __func__, con);
2965 if (con->state == CON_STATE_STANDBY) {
2966 dout("%s: con %p STANDBY\n", __func__, con);
2969 if (con->state == CON_STATE_CLOSED) {
2970 dout("%s: con %p CLOSED\n", __func__, con);
2974 if (con->state == CON_STATE_PREOPEN) {
2975 dout("%s: con %p PREOPEN\n", __func__, con);
2979 ret = try_read(con);
2983 if (!con->error_msg)
2984 con->error_msg = "socket error on read";
2989 ret = try_write(con);
2993 if (!con->error_msg)
2994 con->error_msg = "socket error on write";
2998 break; /* If we make it to here, we're done */
3002 mutex_unlock(&con->mutex);
3005 con_fault_finish(con);
3011 * Generic error/fault handler. A retry mechanism is used with
3012 * exponential backoff
3014 static void con_fault(struct ceph_connection *con)
3016 dout("fault %p state %lu to peer %s\n",
3017 con, con->state, ceph_pr_addr(&con->peer_addr));
3019 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
3020 ceph_pr_addr(&con->peer_addr), con->error_msg);
3021 con->error_msg = NULL;
3023 WARN_ON(con->state != CON_STATE_CONNECTING &&
3024 con->state != CON_STATE_NEGOTIATING &&
3025 con->state != CON_STATE_OPEN);
3027 ceph_con_reset_protocol(con);
3029 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
3030 dout("fault on LOSSYTX channel, marking CLOSED\n");
3031 con->state = CON_STATE_CLOSED;
3035 /* Requeue anything that hasn't been acked */
3036 list_splice_init(&con->out_sent, &con->out_queue);
3038 /* If there are no messages queued or keepalive pending, place
3039 * the connection in a STANDBY state */
3040 if (list_empty(&con->out_queue) &&
3041 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
3042 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
3043 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
3044 con->state = CON_STATE_STANDBY;
3046 /* retry after a delay. */
3047 con->state = CON_STATE_PREOPEN;
3049 con->delay = BASE_DELAY_INTERVAL;
3050 } else if (con->delay < MAX_DELAY_INTERVAL) {
3052 if (con->delay > MAX_DELAY_INTERVAL)
3053 con->delay = MAX_DELAY_INTERVAL;
3055 con_flag_set(con, CON_FLAG_BACKOFF);
3061 void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
3063 u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
3064 msgr->inst.addr.nonce = cpu_to_le32(nonce);
3065 encode_my_addr(msgr);
3069 * initialize a new messenger instance
3071 void ceph_messenger_init(struct ceph_messenger *msgr,
3072 struct ceph_entity_addr *myaddr)
3074 spin_lock_init(&msgr->global_seq_lock);
3077 msgr->inst.addr = *myaddr;
3079 /* select a random nonce */
3080 msgr->inst.addr.type = 0;
3081 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
3082 encode_my_addr(msgr);
3084 atomic_set(&msgr->stopping, 0);
3085 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
3087 dout("%s %p\n", __func__, msgr);
3089 EXPORT_SYMBOL(ceph_messenger_init);
3091 void ceph_messenger_fini(struct ceph_messenger *msgr)
3093 put_net(read_pnet(&msgr->net));
3095 EXPORT_SYMBOL(ceph_messenger_fini);
3097 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
3100 msg->con->ops->put(msg->con);
3102 msg->con = con ? con->ops->get(con) : NULL;
3103 BUG_ON(msg->con != con);
3106 static void clear_standby(struct ceph_connection *con)
3108 /* come back from STANDBY? */
3109 if (con->state == CON_STATE_STANDBY) {
3110 dout("clear_standby %p and ++connect_seq\n", con);
3111 con->state = CON_STATE_PREOPEN;
3113 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
3114 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
3119 * Queue up an outgoing message on the given connection.
3121 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3124 msg->hdr.src = con->msgr->inst.name;
3125 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
3126 msg->needs_out_seq = true;
3128 mutex_lock(&con->mutex);
3130 if (con->state == CON_STATE_CLOSED) {
3131 dout("con_send %p closed, dropping %p\n", con, msg);
3133 mutex_unlock(&con->mutex);
3137 msg_con_set(msg, con);
3139 BUG_ON(!list_empty(&msg->list_head));
3140 list_add_tail(&msg->list_head, &con->out_queue);
3141 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3142 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3143 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3144 le32_to_cpu(msg->hdr.front_len),
3145 le32_to_cpu(msg->hdr.middle_len),
3146 le32_to_cpu(msg->hdr.data_len));
3149 mutex_unlock(&con->mutex);
3151 /* if there wasn't anything waiting to send before, queue
3153 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3156 EXPORT_SYMBOL(ceph_con_send);
3159 * Revoke a message that was previously queued for send
3161 void ceph_msg_revoke(struct ceph_msg *msg)
3163 struct ceph_connection *con = msg->con;
3166 dout("%s msg %p null con\n", __func__, msg);
3167 return; /* Message not in our possession */
3170 mutex_lock(&con->mutex);
3171 if (!list_empty(&msg->list_head)) {
3172 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3173 list_del_init(&msg->list_head);
3178 if (con->out_msg == msg) {
3179 BUG_ON(con->out_skip);
3181 if (con->out_msg_done) {
3182 con->out_skip += con_out_kvec_skip(con);
3184 BUG_ON(!msg->data_length);
3185 con->out_skip += sizeof_footer(con);
3187 /* data, middle, front */
3188 if (msg->data_length)
3189 con->out_skip += msg->cursor.total_resid;
3191 con->out_skip += con_out_kvec_skip(con);
3192 con->out_skip += con_out_kvec_skip(con);
3194 dout("%s %p msg %p - was sending, will write %d skip %d\n",
3195 __func__, con, msg, con->out_kvec_bytes, con->out_skip);
3197 con->out_msg = NULL;
3201 mutex_unlock(&con->mutex);
3205 * Revoke a message that we may be reading data into
3207 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3209 struct ceph_connection *con = msg->con;
3212 dout("%s msg %p null con\n", __func__, msg);
3213 return; /* Message not in our possession */
3216 mutex_lock(&con->mutex);
3217 if (con->in_msg == msg) {
3218 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3219 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3220 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3222 /* skip rest of message */
3223 dout("%s %p msg %p revoked\n", __func__, con, msg);
3224 con->in_base_pos = con->in_base_pos -
3225 sizeof(struct ceph_msg_header) -
3229 sizeof(struct ceph_msg_footer);
3230 ceph_msg_put(con->in_msg);
3232 con->in_tag = CEPH_MSGR_TAG_READY;
3235 dout("%s %p in_msg %p msg %p no-op\n",
3236 __func__, con, con->in_msg, msg);
3238 mutex_unlock(&con->mutex);
3242 * Queue a keepalive byte to ensure the tcp connection is alive.
3244 void ceph_con_keepalive(struct ceph_connection *con)
3246 dout("con_keepalive %p\n", con);
3247 mutex_lock(&con->mutex);
3249 con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
3250 mutex_unlock(&con->mutex);
3252 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3255 EXPORT_SYMBOL(ceph_con_keepalive);
3257 bool ceph_con_keepalive_expired(struct ceph_connection *con,
3258 unsigned long interval)
3261 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
3262 struct timespec64 now;
3263 struct timespec64 ts;
3264 ktime_get_real_ts64(&now);
3265 jiffies_to_timespec64(interval, &ts);
3266 ts = timespec64_add(con->last_keepalive_ack, ts);
3267 return timespec64_compare(&now, &ts) >= 0;
3272 static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
3274 BUG_ON(msg->num_data_items >= msg->max_data_items);
3275 return &msg->data[msg->num_data_items++];
3278 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3280 if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
3281 int num_pages = calc_pages_for(data->alignment, data->length);
3282 ceph_release_page_vector(data->pages, num_pages);
3283 } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
3284 ceph_pagelist_release(data->pagelist);
3288 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
3289 size_t length, size_t alignment, bool own_pages)
3291 struct ceph_msg_data *data;
3296 data = ceph_msg_data_add(msg);
3297 data->type = CEPH_MSG_DATA_PAGES;
3298 data->pages = pages;
3299 data->length = length;
3300 data->alignment = alignment & ~PAGE_MASK;
3301 data->own_pages = own_pages;
3303 msg->data_length += length;
3305 EXPORT_SYMBOL(ceph_msg_data_add_pages);
3307 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
3308 struct ceph_pagelist *pagelist)
3310 struct ceph_msg_data *data;
3313 BUG_ON(!pagelist->length);
3315 data = ceph_msg_data_add(msg);
3316 data->type = CEPH_MSG_DATA_PAGELIST;
3317 refcount_inc(&pagelist->refcnt);
3318 data->pagelist = pagelist;
3320 msg->data_length += pagelist->length;
3322 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
3325 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
3328 struct ceph_msg_data *data;
3330 data = ceph_msg_data_add(msg);
3331 data->type = CEPH_MSG_DATA_BIO;
3332 data->bio_pos = *bio_pos;
3333 data->bio_length = length;
3335 msg->data_length += length;
3337 EXPORT_SYMBOL(ceph_msg_data_add_bio);
3338 #endif /* CONFIG_BLOCK */
3340 void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
3341 struct ceph_bvec_iter *bvec_pos)
3343 struct ceph_msg_data *data;
3345 data = ceph_msg_data_add(msg);
3346 data->type = CEPH_MSG_DATA_BVECS;
3347 data->bvec_pos = *bvec_pos;
3349 msg->data_length += bvec_pos->iter.bi_size;
3351 EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
3354 * construct a new message with given type, size
3355 * the new msg has a ref count of 1.
3357 struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
3358 gfp_t flags, bool can_fail)
3362 m = kmem_cache_zalloc(ceph_msg_cache, flags);
3366 m->hdr.type = cpu_to_le16(type);
3367 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3368 m->hdr.front_len = cpu_to_le32(front_len);
3370 INIT_LIST_HEAD(&m->list_head);
3371 kref_init(&m->kref);
3375 m->front.iov_base = ceph_kvmalloc(front_len, flags);
3376 if (m->front.iov_base == NULL) {
3377 dout("ceph_msg_new can't allocate %d bytes\n",
3382 m->front.iov_base = NULL;
3384 m->front_alloc_len = m->front.iov_len = front_len;
3386 if (max_data_items) {
3387 m->data = kmalloc_array(max_data_items, sizeof(*m->data),
3392 m->max_data_items = max_data_items;
3395 dout("ceph_msg_new %p front %d\n", m, front_len);
3402 pr_err("msg_new can't create type %d front %d\n", type,
3406 dout("msg_new can't create type %d front %d\n", type,
3411 EXPORT_SYMBOL(ceph_msg_new2);
3413 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3416 return ceph_msg_new2(type, front_len, 0, flags, can_fail);
3418 EXPORT_SYMBOL(ceph_msg_new);
3421 * Allocate "middle" portion of a message, if it is needed and wasn't
3422 * allocated by alloc_msg. This allows us to read a small fixed-size
3423 * per-type header in the front and then gracefully fail (i.e.,
3424 * propagate the error to the caller based on info in the front) when
3425 * the middle is too large.
3427 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3429 int type = le16_to_cpu(msg->hdr.type);
3430 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3432 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3433 ceph_msg_type_name(type), middle_len);
3434 BUG_ON(!middle_len);
3435 BUG_ON(msg->middle);
3437 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3444 * Allocate a message for receiving an incoming message on a
3445 * connection, and save the result in con->in_msg. Uses the
3446 * connection's private alloc_msg op if available.
3448 * Returns 0 on success, or a negative error code.
3450 * On success, if we set *skip = 1:
3451 * - the next message should be skipped and ignored.
3452 * - con->in_msg == NULL
3453 * or if we set *skip = 0:
3454 * - con->in_msg is non-null.
3455 * On error (ENOMEM, EAGAIN, ...),
3456 * - con->in_msg == NULL
3458 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3460 struct ceph_msg_header *hdr = &con->in_hdr;
3461 int middle_len = le32_to_cpu(hdr->middle_len);
3462 struct ceph_msg *msg;
3465 BUG_ON(con->in_msg != NULL);
3466 BUG_ON(!con->ops->alloc_msg);
3468 mutex_unlock(&con->mutex);
3469 msg = con->ops->alloc_msg(con, hdr, skip);
3470 mutex_lock(&con->mutex);
3471 if (con->state != CON_STATE_OPEN) {
3478 msg_con_set(msg, con);
3482 * Null message pointer means either we should skip
3483 * this message or we couldn't allocate memory. The
3484 * former is not an error.
3489 con->error_msg = "error allocating memory for incoming message";
3492 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3494 if (middle_len && !con->in_msg->middle) {
3495 ret = ceph_alloc_middle(con, con->in_msg);
3497 ceph_msg_put(con->in_msg);
3507 * Free a generically kmalloc'd message.
3509 static void ceph_msg_free(struct ceph_msg *m)
3511 dout("%s %p\n", __func__, m);
3512 kvfree(m->front.iov_base);
3514 kmem_cache_free(ceph_msg_cache, m);
3517 static void ceph_msg_release(struct kref *kref)
3519 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3522 dout("%s %p\n", __func__, m);
3523 WARN_ON(!list_empty(&m->list_head));
3525 msg_con_set(m, NULL);
3527 /* drop middle, data, if any */
3529 ceph_buffer_put(m->middle);
3533 for (i = 0; i < m->num_data_items; i++)
3534 ceph_msg_data_destroy(&m->data[i]);
3537 ceph_msgpool_put(m->pool, m);
3542 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3544 dout("%s %p (was %d)\n", __func__, msg,
3545 kref_read(&msg->kref));
3546 kref_get(&msg->kref);
3549 EXPORT_SYMBOL(ceph_msg_get);
3551 void ceph_msg_put(struct ceph_msg *msg)
3553 dout("%s %p (was %d)\n", __func__, msg,
3554 kref_read(&msg->kref));
3555 kref_put(&msg->kref, ceph_msg_release);
3557 EXPORT_SYMBOL(ceph_msg_put);
3559 void ceph_msg_dump(struct ceph_msg *msg)
3561 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3562 msg->front_alloc_len, msg->data_length);
3563 print_hex_dump(KERN_DEBUG, "header: ",
3564 DUMP_PREFIX_OFFSET, 16, 1,
3565 &msg->hdr, sizeof(msg->hdr), true);
3566 print_hex_dump(KERN_DEBUG, " front: ",
3567 DUMP_PREFIX_OFFSET, 16, 1,
3568 msg->front.iov_base, msg->front.iov_len, true);
3570 print_hex_dump(KERN_DEBUG, "middle: ",
3571 DUMP_PREFIX_OFFSET, 16, 1,
3572 msg->middle->vec.iov_base,
3573 msg->middle->vec.iov_len, true);
3574 print_hex_dump(KERN_DEBUG, "footer: ",
3575 DUMP_PREFIX_OFFSET, 16, 1,
3576 &msg->footer, sizeof(msg->footer), true);
3578 EXPORT_SYMBOL(ceph_msg_dump);