1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/scatterlist.h>
7 #include <linux/highmem.h>
8 #include <rdma/rdma_cm.h>
9 #include <linux/mutex.h>
10 #include <linux/rds.h>
11 #include <linux/rhashtable.h>
12 #include <linux/refcount.h>
13 #include <linux/in6.h>
18 * RDS Network protocol version
20 #define RDS_PROTOCOL_3_0 0x0300
21 #define RDS_PROTOCOL_3_1 0x0301
22 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
23 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
24 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
25 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
27 /* The following ports, 16385, 18634, 18635, are registered with IANA as
28 * the ports to be used for RDS over TCP and UDP. Currently, only RDS over
29 * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value
30 * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After
31 * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept
32 * to ensure compatibility with older RDS modules. Those ports are defined
33 * in each transport's header file.
35 #define RDS_PORT 18634
38 #define KERNEL_HAS_ATOMIC64
42 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
44 /* sigh, pr_debug() causes unused variable warnings */
45 static inline __printf(1, 2)
46 void rdsdebug(char *fmt, ...)
51 #define RDS_FRAG_SHIFT 12
52 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
54 /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
55 #define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
57 #define RDS_CONG_MAP_BYTES (65536 / 8)
58 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
59 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
62 struct rb_node m_rb_node;
63 struct in6_addr m_addr;
64 wait_queue_head_t m_waitq;
65 struct list_head m_conn_list;
66 unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
71 * This is how we will track the connection state:
72 * A connection is always in one of the following
73 * states. Updates to the state are atomic and imply
79 RDS_CONN_DISCONNECTING,
85 /* Bits for c_flags */
86 #define RDS_LL_SEND_FULL 0
87 #define RDS_RECONNECT_PENDING 1
89 #define RDS_RECV_REFILL 3
90 #define RDS_DESTROY_PENDING 4
92 /* Max number of multipaths per RDS connection. Must be a power of 2 */
93 #define RDS_MPATH_WORKERS 8
94 #define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
95 (rs)->rs_hash_initval) & ((n) - 1))
97 #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
99 /* Per mpath connection state */
100 struct rds_conn_path {
101 struct rds_connection *cp_conn;
102 struct rds_message *cp_xmit_rm;
103 unsigned long cp_xmit_sg;
104 unsigned int cp_xmit_hdr_off;
105 unsigned int cp_xmit_data_off;
106 unsigned int cp_xmit_atomic_sent;
107 unsigned int cp_xmit_rdma_sent;
108 unsigned int cp_xmit_data_sent;
110 spinlock_t cp_lock; /* protect msg queues */
112 struct list_head cp_send_queue;
113 struct list_head cp_retrans;
117 void *cp_transport_data;
120 unsigned long cp_send_gen;
121 unsigned long cp_flags;
122 unsigned long cp_reconnect_jiffies;
123 struct delayed_work cp_send_w;
124 struct delayed_work cp_recv_w;
125 struct delayed_work cp_conn_w;
126 struct work_struct cp_down_w;
127 struct mutex cp_cm_lock; /* protect cp_state & cm */
128 wait_queue_head_t cp_waitq;
130 unsigned int cp_unacked_packets;
131 unsigned int cp_unacked_bytes;
132 unsigned int cp_index;
135 /* One rds_connection per RDS address pair */
136 struct rds_connection {
137 struct hlist_node c_hash_node;
138 struct in6_addr c_laddr;
139 struct in6_addr c_faddr;
140 int c_dev_if; /* ifindex used for this conn */
141 int c_bound_if; /* ifindex of c_laddr */
142 unsigned int c_loopback:1,
147 struct rds_connection *c_passive;
148 struct rds_transport *c_trans;
150 struct rds_cong_map *c_lcong;
151 struct rds_cong_map *c_fcong;
153 /* Protocol version */
154 unsigned int c_version;
155 possible_net_t c_net;
157 struct list_head c_map_item;
158 unsigned long c_map_queued;
160 struct rds_conn_path *c_path;
161 wait_queue_head_t c_hs_waitq; /* handshake waitq */
168 struct net *rds_conn_net(struct rds_connection *conn)
170 return read_pnet(&conn->c_net);
174 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
176 write_pnet(&conn->c_net, net);
179 #define RDS_FLAG_CONG_BITMAP 0x01
180 #define RDS_FLAG_ACK_REQUIRED 0x02
181 #define RDS_FLAG_RETRANSMITTED 0x04
182 #define RDS_MAX_ADV_CREDIT 255
184 /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
185 * probe to exchange control information before establishing a connection.
186 * Currently the control information that is exchanged is the number of
187 * supported paths. If the peer is a legacy (older kernel revision) peer,
188 * it would return a pong message without additional control information
189 * that would then alert the sender that the peer was an older rev.
191 #define RDS_FLAG_PROBE_PORT 1
192 #define RDS_HS_PROBE(sport, dport) \
193 ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
194 (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
196 * Maximum space available for extension headers.
198 #define RDS_HEADER_EXT_SPACE 16
211 u8 h_exthdr[RDS_HEADER_EXT_SPACE];
215 * Reserved - indicates end of extensions
217 #define RDS_EXTHDR_NONE 0
220 * This extension header is included in the very
221 * first message that is sent on a new connection,
222 * and identifies the protocol level. This will help
223 * rolling updates if a future change requires breaking
225 * NB: This is no longer true for IB, where we do a version
226 * negotiation during the connection setup phase (protocol
227 * version information is included in the RDMA CM private data).
229 #define RDS_EXTHDR_VERSION 1
230 struct rds_ext_header_version {
235 * This extension header is included in the RDS message
236 * chasing an RDMA operation.
238 #define RDS_EXTHDR_RDMA 2
239 struct rds_ext_header_rdma {
244 * This extension header tells the peer about the
245 * destination <R_Key,offset> of the requested RDMA
248 #define RDS_EXTHDR_RDMA_DEST 3
249 struct rds_ext_header_rdma_dest {
251 __be32 h_rdma_offset;
254 /* Extension header announcing number of paths.
255 * Implicit length = 2 bytes.
257 #define RDS_EXTHDR_NPATHS 5
258 #define RDS_EXTHDR_GEN_NUM 6
260 #define __RDS_EXTHDR_MAX 16 /* for now */
261 #define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
262 #define RDS_MSG_RX_HDR 0
263 #define RDS_MSG_RX_START 1
264 #define RDS_MSG_RX_END 2
265 #define RDS_MSG_RX_CMSG 3
267 struct rds_incoming {
268 refcount_t i_refcount;
269 struct list_head i_item;
270 struct rds_connection *i_conn;
271 struct rds_conn_path *i_conn_path;
272 struct rds_header i_hdr;
273 unsigned long i_rx_jiffies;
274 struct in6_addr i_saddr;
276 rds_rdma_cookie_t i_rdma_cookie;
278 u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
282 struct rb_node r_rb_node;
283 refcount_t r_refcount;
286 /* A copy of the creation flags */
287 unsigned int r_use_once:1;
288 unsigned int r_invalidate:1;
289 unsigned int r_write:1;
291 /* This is for RDS_MR_DEAD.
292 * It would be nice & consistent to make this part of the above
293 * bit field here, but we need to use test_and_set_bit.
295 unsigned long r_state;
296 struct rds_sock *r_sock; /* back pointer to the socket that owns us */
297 struct rds_transport *r_trans;
298 void *r_trans_private;
301 /* Flags for mr->r_state */
302 #define RDS_MR_DEAD 0
304 static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
306 return r_key | (((u64) offset) << 32);
309 static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
314 static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
319 /* atomic operation types */
320 #define RDS_ATOMIC_TYPE_CSWP 0
321 #define RDS_ATOMIC_TYPE_FADD 1
324 * m_sock_item and m_conn_item are on lists that are serialized under
325 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
326 * the message will not be put back on the retransmit list after being sent.
327 * messages that are canceled while being sent rely on this.
329 * m_inc is used by loopback so that it can pass an incoming message straight
330 * back up into the rx path. It embeds a wire header which is also used by
331 * the send path, which is kind of awkward.
333 * m_sock_item indicates the message's presence on a socket's send or receive
334 * queue. m_rs will point to that socket.
336 * m_daddr is used by cancellation to prune messages to a given destination.
338 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
339 * nesting. As paths iterate over messages on a sock, or conn, they must
340 * also lock the conn, or sock, to remove the message from those lists too.
341 * Testing the flag to determine if the message is still on the lists lets
342 * us avoid testing the list_head directly. That means each path can use
343 * the message's list_head to keep it on a local list while juggling locks
344 * without confusing the other path.
346 * m_ack_seq is an optional field set by transports who need a different
347 * sequence number range to invalidate. They can use this in a callback
348 * that they pass to rds_send_drop_acked() to see if each message has been
349 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
350 * had ack_seq set yet.
352 #define RDS_MSG_ON_SOCK 1
353 #define RDS_MSG_ON_CONN 2
354 #define RDS_MSG_HAS_ACK_SEQ 3
355 #define RDS_MSG_ACK_REQUIRED 4
356 #define RDS_MSG_RETRANSMITTED 5
357 #define RDS_MSG_MAPPED 6
358 #define RDS_MSG_PAGEVEC 7
359 #define RDS_MSG_FLUSH 8
361 struct rds_znotifier {
366 struct rds_msg_zcopy_info {
367 struct list_head rs_zcookie_next;
369 struct rds_znotifier znotif;
370 struct rds_zcopy_cookies zcookies;
374 struct rds_msg_zcopy_queue {
375 struct list_head zcookie_head;
376 spinlock_t lock; /* protects zcookie_head queue */
379 static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
381 spin_lock_init(&q->lock);
382 INIT_LIST_HEAD(&q->zcookie_head);
385 struct rds_iov_vector {
386 struct rds_iovec *iov;
390 struct rds_iov_vector_arr {
391 struct rds_iov_vector *vec;
398 refcount_t m_refcount;
399 struct list_head m_sock_item;
400 struct list_head m_conn_item;
401 struct rds_incoming m_inc;
403 struct in6_addr m_daddr;
404 unsigned long m_flags;
406 /* Never access m_rs without holding m_rs_lock.
411 spinlock_t m_rs_lock;
412 wait_queue_head_t m_flush_wait;
414 struct rds_sock *m_rs;
416 /* cookie to send to remote, in rds header */
417 rds_rdma_cookie_t m_rdma_cookie;
419 unsigned int m_used_sgs;
420 unsigned int m_total_sgs;
425 struct rm_atomic_op {
431 uint64_t compare_mask;
436 uint64_t nocarry_mask;
442 unsigned int op_notify:1;
443 unsigned int op_recverr:1;
444 unsigned int op_mapped:1;
445 unsigned int op_silent:1;
446 unsigned int op_active:1;
447 struct scatterlist *op_sg;
448 struct rds_notifier *op_notifier;
450 struct rds_mr *op_rdma_mr;
455 unsigned int op_write:1;
456 unsigned int op_fence:1;
457 unsigned int op_notify:1;
458 unsigned int op_recverr:1;
459 unsigned int op_mapped:1;
460 unsigned int op_silent:1;
461 unsigned int op_active:1;
462 unsigned int op_bytes;
463 unsigned int op_nents;
464 unsigned int op_count;
465 struct scatterlist *op_sg;
466 struct rds_notifier *op_notifier;
468 struct rds_mr *op_rdma_mr;
471 unsigned int op_active:1;
472 unsigned int op_notify:1;
473 unsigned int op_nents;
474 unsigned int op_count;
475 unsigned int op_dmasg;
476 unsigned int op_dmaoff;
477 struct rds_znotifier *op_mmp_znotifier;
478 struct scatterlist *op_sg;
482 struct rds_conn_path *m_conn_path;
486 * The RDS notifier is used (optionally) to tell the application about
487 * completed RDMA operations. Rather than keeping the whole rds message
488 * around on the queue, we allocate a small notifier that is put on the
489 * socket's notifier_list. Notifications are delivered to the application
490 * through control messages.
492 struct rds_notifier {
493 struct list_head n_list;
494 uint64_t n_user_token;
498 /* Available as part of RDS core, so doesn't need to participate
499 * in get_preferred transport etc
501 #define RDS_TRANS_LOOP 3
504 * struct rds_transport - transport specific behavioural hooks
506 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
507 * part of a message. The caller serializes on the send_sem so this
508 * doesn't need to be reentrant for a given conn. The header must be
509 * sent before the data payload. .xmit must be prepared to send a
510 * message with no data payload. .xmit should return the number of
511 * bytes that were sent down the connection, including header bytes.
512 * Returning 0 tells the caller that it doesn't need to perform any
513 * additional work now. This is usually the case when the transport has
514 * filled the sending queue for its connection and will handle
515 * triggering the rds thread to continue the send when space becomes
516 * available. Returning -EAGAIN tells the caller to retry the send
517 * immediately. Returning -ENOMEM tells the caller to retry the send at
518 * some point in the future.
520 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
521 * it returns the connection can not call rds_recv_incoming().
522 * This will only be called once after conn_connect returns
523 * non-zero success and will The caller serializes this with
524 * the send and connecting paths (xmit_* and conn_*). The
525 * transport is responsible for other serialization, including
526 * rds_recv_incoming(). This is called in process context but
527 * should try hard not to block.
530 struct rds_transport {
531 char t_name[TRANSNAMSIZ];
532 struct list_head t_item;
533 struct module *t_owner;
534 unsigned int t_prefer_loopback:1,
538 int (*laddr_check)(struct net *net, const struct in6_addr *addr,
540 int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
541 void (*conn_free)(void *data);
542 int (*conn_path_connect)(struct rds_conn_path *cp);
543 void (*conn_path_shutdown)(struct rds_conn_path *conn);
544 void (*xmit_path_prepare)(struct rds_conn_path *cp);
545 void (*xmit_path_complete)(struct rds_conn_path *cp);
546 int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
547 unsigned int hdr_off, unsigned int sg, unsigned int off);
548 int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
549 int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
550 int (*recv_path)(struct rds_conn_path *cp);
551 int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
552 void (*inc_free)(struct rds_incoming *inc);
554 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
555 struct rdma_cm_event *event, bool isv6);
556 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
557 void (*cm_connect_complete)(struct rds_connection *conn,
558 struct rdma_cm_event *event);
560 unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
563 void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
564 struct rds_sock *rs, u32 *key_ret,
565 struct rds_connection *conn);
566 void (*sync_mr)(void *trans_private, int direction);
567 void (*free_mr)(void *trans_private, int invalidate);
568 void (*flush_mrs)(void);
569 bool (*t_unloading)(struct rds_connection *conn);
572 /* Bind hash table key length. It is the sum of the size of a struct
573 * in6_addr, a scope_id and a port.
575 #define RDS_BOUND_KEY_LEN \
576 (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
585 * bound_addr used for both incoming and outgoing, no INADDR_ANY
588 struct rhash_head rs_bound_node;
589 u8 rs_bound_key[RDS_BOUND_KEY_LEN];
590 struct sockaddr_in6 rs_bound_sin6;
591 #define rs_bound_addr rs_bound_sin6.sin6_addr
592 #define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3]
593 #define rs_bound_port rs_bound_sin6.sin6_port
594 #define rs_bound_scope_id rs_bound_sin6.sin6_scope_id
595 struct in6_addr rs_conn_addr;
596 #define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3]
598 struct rds_transport *rs_transport;
601 * rds_sendmsg caches the conn it used the last time around.
602 * This helps avoid costly lookups.
604 struct rds_connection *rs_conn;
606 /* flag indicating we were congested or not */
608 /* seen congestion (ENOBUFS) when sending? */
609 int rs_seen_congestion;
611 /* rs_lock protects all these adjacent members before the newline */
613 struct list_head rs_send_queue;
616 struct list_head rs_notify_queue; /* currently used for failed RDMAs */
618 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
619 * to decide whether the application should be woken up.
620 * If not set, we use rs_cong_track to find out whether a cong map
623 uint64_t rs_cong_mask;
624 uint64_t rs_cong_notify;
625 struct list_head rs_cong_list;
626 unsigned long rs_cong_track;
629 * rs_recv_lock protects the receive queue, and is
630 * used to serialize with rds_release.
632 rwlock_t rs_recv_lock;
633 struct list_head rs_recv_queue;
635 /* just for stats reporting */
636 struct list_head rs_item;
638 /* these have their own lock */
639 spinlock_t rs_rdma_lock;
640 struct rb_root rs_rdma_keys;
642 /* Socket options - in case there will be more */
643 unsigned char rs_recverr,
647 /* Socket receive path trace points*/
649 u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
650 struct rds_msg_zcopy_queue rs_zcookie_queue;
653 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
655 return container_of(sk, struct rds_sock, rs_sk);
657 static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
663 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
664 * to account for overhead. We don't account for overhead, we just apply
665 * the number of payload bytes to the specified value.
667 static inline int rds_sk_sndbuf(struct rds_sock *rs)
669 return rds_rs_to_sk(rs)->sk_sndbuf / 2;
671 static inline int rds_sk_rcvbuf(struct rds_sock *rs)
673 return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
676 struct rds_statistics {
677 uint64_t s_conn_reset;
678 uint64_t s_recv_drop_bad_checksum;
679 uint64_t s_recv_drop_old_seq;
680 uint64_t s_recv_drop_no_sock;
681 uint64_t s_recv_drop_dead_sock;
682 uint64_t s_recv_deliver_raced;
683 uint64_t s_recv_delivered;
684 uint64_t s_recv_queued;
685 uint64_t s_recv_immediate_retry;
686 uint64_t s_recv_delayed_retry;
687 uint64_t s_recv_ack_required;
688 uint64_t s_recv_rdma_bytes;
689 uint64_t s_recv_ping;
690 uint64_t s_send_queue_empty;
691 uint64_t s_send_queue_full;
692 uint64_t s_send_lock_contention;
693 uint64_t s_send_lock_queue_raced;
694 uint64_t s_send_immediate_retry;
695 uint64_t s_send_delayed_retry;
696 uint64_t s_send_drop_acked;
697 uint64_t s_send_ack_required;
698 uint64_t s_send_queued;
699 uint64_t s_send_rdma;
700 uint64_t s_send_rdma_bytes;
701 uint64_t s_send_pong;
702 uint64_t s_page_remainder_hit;
703 uint64_t s_page_remainder_miss;
704 uint64_t s_copy_to_user;
705 uint64_t s_copy_from_user;
706 uint64_t s_cong_update_queued;
707 uint64_t s_cong_update_received;
708 uint64_t s_cong_send_error;
709 uint64_t s_cong_send_blocked;
710 uint64_t s_recv_bytes_added_to_socket;
711 uint64_t s_recv_bytes_removed_from_socket;
716 void rds_sock_addref(struct rds_sock *rs);
717 void rds_sock_put(struct rds_sock *rs);
718 void rds_wake_sk_sleep(struct rds_sock *rs);
719 static inline void __rds_wake_sk_sleep(struct sock *sk)
721 wait_queue_head_t *waitq = sk_sleep(sk);
723 if (!sock_flag(sk, SOCK_DEAD) && waitq)
726 extern wait_queue_head_t rds_poll_waitq;
730 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
731 void rds_remove_bound(struct rds_sock *rs);
732 struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
734 int rds_bind_lock_init(void);
735 void rds_bind_lock_destroy(void);
738 int rds_cong_get_maps(struct rds_connection *conn);
739 void rds_cong_add_conn(struct rds_connection *conn);
740 void rds_cong_remove_conn(struct rds_connection *conn);
741 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
742 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
743 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
744 void rds_cong_queue_updates(struct rds_cong_map *map);
745 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
746 int rds_cong_updated_since(unsigned long *recent);
747 void rds_cong_add_socket(struct rds_sock *);
748 void rds_cong_remove_socket(struct rds_sock *);
749 void rds_cong_exit(void);
750 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
753 extern u32 rds_gen_num;
754 int rds_conn_init(void);
755 void rds_conn_exit(void);
756 struct rds_connection *rds_conn_create(struct net *net,
757 const struct in6_addr *laddr,
758 const struct in6_addr *faddr,
759 struct rds_transport *trans, gfp_t gfp,
761 struct rds_connection *rds_conn_create_outgoing(struct net *net,
762 const struct in6_addr *laddr,
763 const struct in6_addr *faddr,
764 struct rds_transport *trans,
765 gfp_t gfp, int dev_if);
766 void rds_conn_shutdown(struct rds_conn_path *cpath);
767 void rds_conn_destroy(struct rds_connection *conn);
768 void rds_conn_drop(struct rds_connection *conn);
769 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
770 void rds_conn_connect_if_down(struct rds_connection *conn);
771 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
772 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
773 struct rds_info_iterator *iter,
774 struct rds_info_lengths *lens,
775 int (*visitor)(struct rds_connection *, void *),
780 void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
781 #define rds_conn_path_error(cp, fmt...) \
782 __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
785 rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
787 return atomic_cmpxchg(&cp->cp_state, old, new) == old;
791 rds_conn_transition(struct rds_connection *conn, int old, int new)
793 WARN_ON(conn->c_trans->t_mp_capable);
794 return rds_conn_path_transition(&conn->c_path[0], old, new);
798 rds_conn_path_state(struct rds_conn_path *cp)
800 return atomic_read(&cp->cp_state);
804 rds_conn_state(struct rds_connection *conn)
806 WARN_ON(conn->c_trans->t_mp_capable);
807 return rds_conn_path_state(&conn->c_path[0]);
811 rds_conn_path_up(struct rds_conn_path *cp)
813 return atomic_read(&cp->cp_state) == RDS_CONN_UP;
817 rds_conn_up(struct rds_connection *conn)
819 WARN_ON(conn->c_trans->t_mp_capable);
820 return rds_conn_path_up(&conn->c_path[0]);
824 rds_conn_path_connecting(struct rds_conn_path *cp)
826 return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
830 rds_conn_connecting(struct rds_connection *conn)
832 WARN_ON(conn->c_trans->t_mp_capable);
833 return rds_conn_path_connecting(&conn->c_path[0]);
837 struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
838 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
840 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
842 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
843 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
844 __be16 dport, u64 seq);
845 int rds_message_add_extension(struct rds_header *hdr,
846 unsigned int type, const void *data, unsigned int len);
847 int rds_message_next_extension(struct rds_header *hdr,
848 unsigned int *pos, void *buf, unsigned int *buflen);
849 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
850 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
851 void rds_message_inc_free(struct rds_incoming *inc);
852 void rds_message_addref(struct rds_message *rm);
853 void rds_message_put(struct rds_message *rm);
854 void rds_message_wait(struct rds_message *rm);
855 void rds_message_unmapped(struct rds_message *rm);
856 void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
858 static inline void rds_message_make_checksum(struct rds_header *hdr)
861 hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
864 static inline int rds_message_verify_checksum(const struct rds_header *hdr)
866 return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
871 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
873 void rds_page_exit(void);
876 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
877 struct in6_addr *saddr);
878 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
879 struct in6_addr *saddr);
880 void rds_inc_put(struct rds_incoming *inc);
881 void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
882 struct in6_addr *daddr,
883 struct rds_incoming *inc, gfp_t gfp);
884 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
886 void rds_clear_recv_queue(struct rds_sock *rs);
887 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
888 void rds_inc_info_copy(struct rds_incoming *inc,
889 struct rds_info_iterator *iter,
890 __be32 saddr, __be32 daddr, int flip);
891 void rds6_inc_info_copy(struct rds_incoming *inc,
892 struct rds_info_iterator *iter,
893 struct in6_addr *saddr, struct in6_addr *daddr,
897 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
898 void rds_send_path_reset(struct rds_conn_path *conn);
899 int rds_send_xmit(struct rds_conn_path *cp);
901 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
902 typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
903 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
904 is_acked_func is_acked);
905 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
906 is_acked_func is_acked);
907 void rds_send_ping(struct rds_connection *conn, int cp_index);
908 int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
911 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
912 int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
913 int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
914 int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
915 void rds_rdma_drop_keys(struct rds_sock *rs);
916 int rds_rdma_extra_size(struct rds_rdma_args *args,
917 struct rds_iov_vector *iov);
918 int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
919 struct cmsghdr *cmsg);
920 int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
921 struct cmsghdr *cmsg,
922 struct rds_iov_vector *vec);
923 int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
924 struct cmsghdr *cmsg);
925 void rds_rdma_free_op(struct rm_rdma_op *ro);
926 void rds_atomic_free_op(struct rm_atomic_op *ao);
927 void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
928 void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
929 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
930 struct cmsghdr *cmsg);
932 void __rds_put_mr_final(struct rds_mr *mr);
933 static inline void rds_mr_put(struct rds_mr *mr)
935 if (refcount_dec_and_test(&mr->r_refcount))
936 __rds_put_mr_final(mr);
939 static inline bool rds_destroy_pending(struct rds_connection *conn)
941 return !check_net(rds_conn_net(conn)) ||
942 (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
946 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
947 #define rds_stats_inc_which(which, member) do { \
948 per_cpu(which, get_cpu()).member++; \
951 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
952 #define rds_stats_add_which(which, member, count) do { \
953 per_cpu(which, get_cpu()).member += count; \
956 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
957 int rds_stats_init(void);
958 void rds_stats_exit(void);
959 void rds_stats_info_copy(struct rds_info_iterator *iter,
960 uint64_t *values, const char *const *names,
964 int rds_sysctl_init(void);
965 void rds_sysctl_exit(void);
966 extern unsigned long rds_sysctl_sndbuf_min;
967 extern unsigned long rds_sysctl_sndbuf_default;
968 extern unsigned long rds_sysctl_sndbuf_max;
969 extern unsigned long rds_sysctl_reconnect_min_jiffies;
970 extern unsigned long rds_sysctl_reconnect_max_jiffies;
971 extern unsigned int rds_sysctl_max_unacked_packets;
972 extern unsigned int rds_sysctl_max_unacked_bytes;
973 extern unsigned int rds_sysctl_ping_enable;
974 extern unsigned long rds_sysctl_trace_flags;
975 extern unsigned int rds_sysctl_trace_level;
978 int rds_threads_init(void);
979 void rds_threads_exit(void);
980 extern struct workqueue_struct *rds_wq;
981 void rds_queue_reconnect(struct rds_conn_path *cp);
982 void rds_connect_worker(struct work_struct *);
983 void rds_shutdown_worker(struct work_struct *);
984 void rds_send_worker(struct work_struct *);
985 void rds_recv_worker(struct work_struct *);
986 void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
987 void rds_connect_complete(struct rds_connection *conn);
988 int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
991 void rds_trans_register(struct rds_transport *trans);
992 void rds_trans_unregister(struct rds_transport *trans);
993 struct rds_transport *rds_trans_get_preferred(struct net *net,
994 const struct in6_addr *addr,
996 void rds_trans_put(struct rds_transport *trans);
997 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
999 struct rds_transport *rds_trans_get(int t_type);
1000 int rds_trans_init(void);
1001 void rds_trans_exit(void);