2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
37 #include <linux/ratelimit.h>
38 #include <net/addrconf.h>
40 #include "rds_single_path.h"
46 * Set the selected protocol version
48 static void rds_ib_set_protocol(struct rds_connection *conn, unsigned int version)
50 conn->c_version = version;
56 static void rds_ib_set_flow_control(struct rds_connection *conn, u32 credits)
58 struct rds_ib_connection *ic = conn->c_transport_data;
60 if (rds_ib_sysctl_flow_control && credits != 0) {
61 /* We're doing flow control */
63 rds_ib_send_add_credits(conn, credits);
70 * Tune RNR behavior. Without flow control, we use a rather
71 * low timeout, but not the absolute minimum - this should
74 * We already set the RNR retry count to 7 (which is the
75 * smallest infinite number :-) above.
76 * If flow control is off, we want to change this back to 0
77 * so that we learn quickly when our credit accounting is
80 * Caller passes in a qp_attr pointer - don't waste stack spacv
81 * by allocation this twice.
84 rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr)
88 attr->min_rnr_timer = IB_RNR_TIMER_000_32;
89 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER);
91 printk(KERN_NOTICE "ib_modify_qp(IB_QP_MIN_RNR_TIMER): err=%d\n", -ret);
95 * Connection established.
96 * We get here for both outgoing and incoming connection.
98 void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_event *event)
100 struct rds_ib_connection *ic = conn->c_transport_data;
101 const union rds_ib_conn_priv *dp = NULL;
102 struct ib_qp_attr qp_attr;
109 dp = event->param.conn.private_data;
111 if (event->param.conn.private_data_len >=
112 sizeof(struct rds6_ib_connect_private)) {
113 major = dp->ricp_v6.dp_protocol_major;
114 minor = dp->ricp_v6.dp_protocol_minor;
115 credit = dp->ricp_v6.dp_credit;
116 /* dp structure start is not guaranteed to be 8 bytes
117 * aligned. Since dp_ack_seq is 64-bit extended load
118 * operations can be used so go through get_unaligned
119 * to avoid unaligned errors.
121 ack_seq = get_unaligned(&dp->ricp_v6.dp_ack_seq);
123 } else if (event->param.conn.private_data_len >=
124 sizeof(struct rds_ib_connect_private)) {
125 major = dp->ricp_v4.dp_protocol_major;
126 minor = dp->ricp_v4.dp_protocol_minor;
127 credit = dp->ricp_v4.dp_credit;
128 ack_seq = get_unaligned(&dp->ricp_v4.dp_ack_seq);
131 /* make sure it isn't empty data */
133 rds_ib_set_protocol(conn, RDS_PROTOCOL(major, minor));
134 rds_ib_set_flow_control(conn, be32_to_cpu(credit));
137 if (conn->c_version < RDS_PROTOCOL_VERSION) {
138 if (conn->c_version != RDS_PROTOCOL_COMPAT_VERSION) {
139 pr_notice("RDS/IB: Connection <%pI6c,%pI6c> version %u.%u no longer supported\n",
140 &conn->c_laddr, &conn->c_faddr,
141 RDS_PROTOCOL_MAJOR(conn->c_version),
142 RDS_PROTOCOL_MINOR(conn->c_version));
143 rds_conn_destroy(conn);
148 pr_notice("RDS/IB: %s conn connected <%pI6c,%pI6c,%d> version %u.%u%s\n",
149 ic->i_active_side ? "Active" : "Passive",
150 &conn->c_laddr, &conn->c_faddr, conn->c_tos,
151 RDS_PROTOCOL_MAJOR(conn->c_version),
152 RDS_PROTOCOL_MINOR(conn->c_version),
153 ic->i_flowctl ? ", flow control" : "");
155 atomic_set(&ic->i_cq_quiesce, 0);
157 /* Init rings and fill recv. this needs to wait until protocol
158 * negotiation is complete, since ring layout is different
161 rds_ib_send_init_ring(ic);
162 rds_ib_recv_init_ring(ic);
163 /* Post receive buffers - as a side effect, this will update
164 * the posted credit count. */
165 rds_ib_recv_refill(conn, 1, GFP_KERNEL);
167 /* Tune RNR behavior */
168 rds_ib_tune_rnr(ic, &qp_attr);
170 qp_attr.qp_state = IB_QPS_RTS;
171 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE);
173 printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
175 /* update ib_device with this local ipaddr */
176 err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr);
178 printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
181 /* If the peer gave us the last packet it saw, process this as if
182 * we had received a regular ACK. */
185 rds_send_drop_acked(conn, be64_to_cpu(ack_seq),
189 conn->c_proposed_version = conn->c_version;
190 rds_connect_complete(conn);
193 static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
194 struct rdma_conn_param *conn_param,
195 union rds_ib_conn_priv *dp,
196 u32 protocol_version,
197 u32 max_responder_resources,
198 u32 max_initiator_depth,
201 struct rds_ib_connection *ic = conn->c_transport_data;
202 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
204 memset(conn_param, 0, sizeof(struct rdma_conn_param));
206 conn_param->responder_resources =
207 min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
208 conn_param->initiator_depth =
209 min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
210 conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
211 conn_param->rnr_retry_count = 7;
214 memset(dp, 0, sizeof(*dp));
216 dp->ricp_v6.dp_saddr = conn->c_laddr;
217 dp->ricp_v6.dp_daddr = conn->c_faddr;
218 dp->ricp_v6.dp_protocol_major =
219 RDS_PROTOCOL_MAJOR(protocol_version);
220 dp->ricp_v6.dp_protocol_minor =
221 RDS_PROTOCOL_MINOR(protocol_version);
222 dp->ricp_v6.dp_protocol_minor_mask =
223 cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
224 dp->ricp_v6.dp_ack_seq =
225 cpu_to_be64(rds_ib_piggyb_ack(ic));
226 dp->ricp_v6.dp_cmn.ricpc_dp_toss = conn->c_tos;
228 conn_param->private_data = &dp->ricp_v6;
229 conn_param->private_data_len = sizeof(dp->ricp_v6);
231 dp->ricp_v4.dp_saddr = conn->c_laddr.s6_addr32[3];
232 dp->ricp_v4.dp_daddr = conn->c_faddr.s6_addr32[3];
233 dp->ricp_v4.dp_protocol_major =
234 RDS_PROTOCOL_MAJOR(protocol_version);
235 dp->ricp_v4.dp_protocol_minor =
236 RDS_PROTOCOL_MINOR(protocol_version);
237 dp->ricp_v4.dp_protocol_minor_mask =
238 cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
239 dp->ricp_v4.dp_ack_seq =
240 cpu_to_be64(rds_ib_piggyb_ack(ic));
241 dp->ricp_v4.dp_cmn.ricpc_dp_toss = conn->c_tos;
243 conn_param->private_data = &dp->ricp_v4;
244 conn_param->private_data_len = sizeof(dp->ricp_v4);
247 /* Advertise flow control */
249 unsigned int credits;
251 credits = IB_GET_POST_CREDITS
252 (atomic_read(&ic->i_credits));
254 dp->ricp_v6.dp_credit = cpu_to_be32(credits);
256 dp->ricp_v4.dp_credit = cpu_to_be32(credits);
257 atomic_sub(IB_SET_POST_CREDITS(credits),
263 static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
265 rdsdebug("event %u (%s) data %p\n",
266 event->event, ib_event_msg(event->event), data);
269 /* Plucking the oldest entry from the ring can be done concurrently with
270 * the thread refilling the ring. Each ring operation is protected by
271 * spinlocks and the transient state of refilling doesn't change the
272 * recording of which entry is oldest.
274 * This relies on IB only calling one cq comp_handler for each cq so that
275 * there will only be one caller of rds_recv_incoming() per RDS connection.
277 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
279 struct rds_connection *conn = context;
280 struct rds_ib_connection *ic = conn->c_transport_data;
282 rdsdebug("conn %p cq %p\n", conn, cq);
284 rds_ib_stats_inc(s_ib_evt_handler_call);
286 tasklet_schedule(&ic->i_recv_tasklet);
289 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq,
295 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
296 for (i = 0; i < nr; i++) {
298 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
299 (unsigned long long)wc->wr_id, wc->status,
300 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
302 if (wc->wr_id <= ic->i_send_ring.w_nr ||
303 wc->wr_id == RDS_IB_ACK_WR_ID)
304 rds_ib_send_cqe_handler(ic, wc);
306 rds_ib_mr_cqe_handler(ic, wc);
312 static void rds_ib_tasklet_fn_send(unsigned long data)
314 struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
315 struct rds_connection *conn = ic->conn;
317 rds_ib_stats_inc(s_ib_tasklet_call);
319 /* if cq has been already reaped, ignore incoming cq event */
320 if (atomic_read(&ic->i_cq_quiesce))
323 poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
324 ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
325 poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
327 if (rds_conn_up(conn) &&
328 (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
329 test_bit(0, &conn->c_map_queued)))
330 rds_send_xmit(&ic->conn->c_path[0]);
333 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq,
335 struct rds_ib_ack_state *ack_state)
340 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
341 for (i = 0; i < nr; i++) {
343 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
344 (unsigned long long)wc->wr_id, wc->status,
345 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
347 rds_ib_recv_cqe_handler(ic, wc, ack_state);
352 static void rds_ib_tasklet_fn_recv(unsigned long data)
354 struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
355 struct rds_connection *conn = ic->conn;
356 struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
357 struct rds_ib_ack_state state;
362 rds_ib_stats_inc(s_ib_tasklet_call);
364 /* if cq has been already reaped, ignore incoming cq event */
365 if (atomic_read(&ic->i_cq_quiesce))
368 memset(&state, 0, sizeof(state));
369 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
370 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
371 poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
373 if (state.ack_next_valid)
374 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
375 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
376 rds_send_drop_acked(conn, state.ack_recv, NULL);
377 ic->i_ack_recv = state.ack_recv;
380 if (rds_conn_up(conn))
381 rds_ib_attempt_ack(ic);
384 static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
386 struct rds_connection *conn = data;
387 struct rds_ib_connection *ic = conn->c_transport_data;
389 rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
390 ib_event_msg(event->event));
392 switch (event->event) {
393 case IB_EVENT_COMM_EST:
394 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
397 rdsdebug("Fatal QP Event %u (%s) - connection %pI6c->%pI6c, reconnecting\n",
398 event->event, ib_event_msg(event->event),
399 &conn->c_laddr, &conn->c_faddr);
405 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
407 struct rds_connection *conn = context;
408 struct rds_ib_connection *ic = conn->c_transport_data;
410 rdsdebug("conn %p cq %p\n", conn, cq);
412 rds_ib_stats_inc(s_ib_evt_handler_call);
414 tasklet_schedule(&ic->i_send_tasklet);
417 static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
419 int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
420 int index = rds_ibdev->dev->num_comp_vectors - 1;
423 for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
424 if (rds_ibdev->vector_load[i] < min) {
426 min = rds_ibdev->vector_load[i];
430 rds_ibdev->vector_load[index]++;
434 static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
436 rds_ibdev->vector_load[index]--;
440 * This needs to be very careful to not leave IS_ERR pointers around for
441 * cleanup to trip over.
443 static int rds_ib_setup_qp(struct rds_connection *conn)
445 struct rds_ib_connection *ic = conn->c_transport_data;
446 struct ib_device *dev = ic->i_cm_id->device;
447 struct ib_qp_init_attr attr;
448 struct ib_cq_init_attr cq_attr = {};
449 struct rds_ib_device *rds_ibdev;
450 int ret, fr_queue_space;
453 * It's normal to see a null device if an incoming connection races
454 * with device removal, so we don't print a warning.
456 rds_ibdev = rds_ib_get_client_data(dev);
460 /* The fr_queue_space is currently set to 512, to add extra space on
461 * completion queue and send queue. This extra space is used for FRMR
462 * registration and invalidation work requests
464 fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
466 /* add the conn now so that connection establishment has the dev */
467 rds_ib_add_conn(rds_ibdev, conn);
469 if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
470 rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
471 if (rds_ibdev->max_wrs < ic->i_recv_ring.w_nr + 1)
472 rds_ib_ring_resize(&ic->i_recv_ring, rds_ibdev->max_wrs - 1);
474 /* Protection domain and memory range */
475 ic->i_pd = rds_ibdev->pd;
477 ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
478 cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
479 cq_attr.comp_vector = ic->i_scq_vector;
480 ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
481 rds_ib_cq_event_handler, conn,
483 if (IS_ERR(ic->i_send_cq)) {
484 ret = PTR_ERR(ic->i_send_cq);
485 ic->i_send_cq = NULL;
486 ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
487 rdsdebug("ib_create_cq send failed: %d\n", ret);
491 ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
492 cq_attr.cqe = ic->i_recv_ring.w_nr;
493 cq_attr.comp_vector = ic->i_rcq_vector;
494 ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
495 rds_ib_cq_event_handler, conn,
497 if (IS_ERR(ic->i_recv_cq)) {
498 ret = PTR_ERR(ic->i_recv_cq);
499 ic->i_recv_cq = NULL;
500 ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
501 rdsdebug("ib_create_cq recv failed: %d\n", ret);
505 ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
507 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
511 ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
513 rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
517 /* XXX negotiate max send/recv with remote? */
518 memset(&attr, 0, sizeof(attr));
519 attr.event_handler = rds_ib_qp_event_handler;
520 attr.qp_context = conn;
521 /* + 1 to allow for the single ack message */
522 attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1;
523 attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1;
524 attr.cap.max_send_sge = rds_ibdev->max_sge;
525 attr.cap.max_recv_sge = RDS_IB_RECV_SGE;
526 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
527 attr.qp_type = IB_QPT_RC;
528 attr.send_cq = ic->i_send_cq;
529 attr.recv_cq = ic->i_recv_cq;
530 atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
533 * XXX this can fail if max_*_wr is too large? Are we supposed
534 * to back off until we get a value that the hardware can support?
536 ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
538 rdsdebug("rdma_create_qp failed: %d\n", ret);
542 ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
543 ic->i_send_ring.w_nr *
544 sizeof(struct rds_header),
545 &ic->i_send_hdrs_dma, GFP_KERNEL);
546 if (!ic->i_send_hdrs) {
548 rdsdebug("ib_dma_alloc_coherent send failed\n");
552 ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
553 ic->i_recv_ring.w_nr *
554 sizeof(struct rds_header),
555 &ic->i_recv_hdrs_dma, GFP_KERNEL);
556 if (!ic->i_recv_hdrs) {
558 rdsdebug("ib_dma_alloc_coherent recv failed\n");
559 goto send_hdrs_dma_out;
562 ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
563 &ic->i_ack_dma, GFP_KERNEL);
566 rdsdebug("ib_dma_alloc_coherent ack failed\n");
567 goto recv_hdrs_dma_out;
570 ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work),
571 ic->i_send_ring.w_nr),
575 rdsdebug("send allocation failed\n");
579 ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work),
580 ic->i_recv_ring.w_nr),
584 rdsdebug("recv allocation failed\n");
588 rds_ib_recv_init_ack(ic);
590 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
591 ic->i_send_cq, ic->i_recv_cq);
598 ib_dma_free_coherent(dev, sizeof(struct rds_header),
599 ic->i_ack, ic->i_ack_dma);
601 ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
602 sizeof(struct rds_header),
603 ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
605 ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
606 sizeof(struct rds_header),
607 ic->i_send_hdrs, ic->i_send_hdrs_dma);
609 rdma_destroy_qp(ic->i_cm_id);
611 if (!ib_destroy_cq(ic->i_recv_cq))
612 ic->i_recv_cq = NULL;
614 if (!ib_destroy_cq(ic->i_send_cq))
615 ic->i_send_cq = NULL;
617 rds_ib_remove_conn(rds_ibdev, conn);
619 rds_ib_dev_put(rds_ibdev);
624 static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
626 const union rds_ib_conn_priv *dp = event->param.conn.private_data;
627 u8 data_len, major, minor;
633 * rdma_cm private data is odd - when there is any private data in the
634 * request, we will be given a pretty large buffer without telling us the
635 * original size. The only way to tell the difference is by looking at
636 * the contents, which are initialized to zero.
637 * If the protocol version fields aren't set, this is a connection attempt
638 * from an older version. This could could be 3.0 or 2.0 - we can't tell.
639 * We really should have changed this for OFED 1.3 :-(
642 /* Be paranoid. RDS always has privdata */
643 if (!event->param.conn.private_data_len) {
644 printk(KERN_NOTICE "RDS incoming connection has no private data, "
650 data_len = sizeof(struct rds6_ib_connect_private);
651 major = dp->ricp_v6.dp_protocol_major;
652 minor = dp->ricp_v6.dp_protocol_minor;
653 mask = dp->ricp_v6.dp_protocol_minor_mask;
655 data_len = sizeof(struct rds_ib_connect_private);
656 major = dp->ricp_v4.dp_protocol_major;
657 minor = dp->ricp_v4.dp_protocol_minor;
658 mask = dp->ricp_v4.dp_protocol_minor_mask;
661 /* Even if len is crap *now* I still want to check it. -ASG */
662 if (event->param.conn.private_data_len < data_len || major == 0)
663 return RDS_PROTOCOL_4_0;
665 common = be16_to_cpu(mask) & RDS_IB_SUPPORTED_PROTOCOLS;
666 if (major == 4 && common) {
667 version = RDS_PROTOCOL_4_0;
668 while ((common >>= 1) != 0)
670 } else if (RDS_PROTOCOL_COMPAT_VERSION ==
671 RDS_PROTOCOL(major, minor)) {
672 version = RDS_PROTOCOL_COMPAT_VERSION;
675 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI6c using incompatible protocol version %u.%u\n",
676 &dp->ricp_v6.dp_saddr, major, minor);
678 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using incompatible protocol version %u.%u\n",
679 &dp->ricp_v4.dp_saddr, major, minor);
684 #if IS_ENABLED(CONFIG_IPV6)
685 /* Given an IPv6 address, find the net_device which hosts that address and
686 * return its index. This is used by the rds_ib_cm_handle_connect() code to
687 * find the interface index of where an incoming request comes from when
688 * the request is using a link local address.
690 * Note one problem in this search. It is possible that two interfaces have
691 * the same link local address. Unfortunately, this cannot be solved unless
692 * the underlying layer gives us the interface which an incoming RDMA connect
693 * request comes from.
695 static u32 __rds_find_ifindex(struct net *net, const struct in6_addr *addr)
697 struct net_device *dev;
701 for_each_netdev_rcu(net, dev) {
702 if (ipv6_chk_addr(net, addr, dev, 1)) {
713 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
714 struct rdma_cm_event *event, bool isv6)
716 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id;
717 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id;
718 const struct rds_ib_conn_priv_cmn *dp_cmn;
719 struct rds_connection *conn = NULL;
720 struct rds_ib_connection *ic = NULL;
721 struct rdma_conn_param conn_param;
722 const union rds_ib_conn_priv *dp;
723 union rds_ib_conn_priv dp_rep;
724 struct in6_addr s_mapped_addr;
725 struct in6_addr d_mapped_addr;
726 const struct in6_addr *saddr6;
727 const struct in6_addr *daddr6;
733 /* Check whether the remote protocol version matches ours. */
734 version = rds_ib_protocol_compatible(event, isv6);
736 err = RDS_RDMA_REJ_INCOMPAT;
740 dp = event->param.conn.private_data;
742 #if IS_ENABLED(CONFIG_IPV6)
743 dp_cmn = &dp->ricp_v6.dp_cmn;
744 saddr6 = &dp->ricp_v6.dp_saddr;
745 daddr6 = &dp->ricp_v6.dp_daddr;
746 /* If either address is link local, need to find the
747 * interface index in order to create a proper RDS
750 if (ipv6_addr_type(daddr6) & IPV6_ADDR_LINKLOCAL) {
751 /* Using init_net for now .. */
752 ifindex = __rds_find_ifindex(&init_net, daddr6);
753 /* No index found... Need to bail out. */
758 } else if (ipv6_addr_type(saddr6) & IPV6_ADDR_LINKLOCAL) {
759 /* Use our address to find the correct index. */
760 ifindex = __rds_find_ifindex(&init_net, daddr6);
761 /* No index found... Need to bail out. */
772 dp_cmn = &dp->ricp_v4.dp_cmn;
773 ipv6_addr_set_v4mapped(dp->ricp_v4.dp_saddr, &s_mapped_addr);
774 ipv6_addr_set_v4mapped(dp->ricp_v4.dp_daddr, &d_mapped_addr);
775 saddr6 = &s_mapped_addr;
776 daddr6 = &d_mapped_addr;
779 rdsdebug("saddr %pI6c daddr %pI6c RDSv%u.%u lguid 0x%llx fguid 0x%llx, tos:%d\n",
780 saddr6, daddr6, RDS_PROTOCOL_MAJOR(version),
781 RDS_PROTOCOL_MINOR(version),
782 (unsigned long long)be64_to_cpu(lguid),
783 (unsigned long long)be64_to_cpu(fguid), dp_cmn->ricpc_dp_toss);
785 /* RDS/IB is not currently netns aware, thus init_net */
786 conn = rds_conn_create(&init_net, daddr6, saddr6,
787 &rds_ib_transport, dp_cmn->ricpc_dp_toss,
788 GFP_KERNEL, ifindex);
790 rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
796 * The connection request may occur while the
797 * previous connection exist, e.g. in case of failover.
798 * But as connections may be initiated simultaneously
799 * by both hosts, we have a random backoff mechanism -
800 * see the comment above rds_queue_reconnect()
802 mutex_lock(&conn->c_cm_lock);
803 if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
804 if (rds_conn_state(conn) == RDS_CONN_UP) {
805 rdsdebug("incoming connect while connecting\n");
807 rds_ib_stats_inc(s_ib_listen_closed_stale);
809 if (rds_conn_state(conn) == RDS_CONN_CONNECTING) {
810 /* Wait and see - our connect may still be succeeding */
811 rds_ib_stats_inc(s_ib_connect_raced);
816 ic = conn->c_transport_data;
818 rds_ib_set_protocol(conn, version);
819 rds_ib_set_flow_control(conn, be32_to_cpu(dp_cmn->ricpc_credit));
821 /* If the peer gave us the last packet it saw, process this as if
822 * we had received a regular ACK. */
823 if (dp_cmn->ricpc_ack_seq)
824 rds_send_drop_acked(conn, be64_to_cpu(dp_cmn->ricpc_ack_seq),
827 BUG_ON(cm_id->context);
831 cm_id->context = conn;
833 /* We got halfway through setting up the ib_connection, if we
834 * fail now, we have to take the long route out of this mess. */
837 err = rds_ib_setup_qp(conn);
839 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
843 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
844 event->param.conn.responder_resources,
845 event->param.conn.initiator_depth, isv6);
847 /* rdma_accept() calls rdma_reject() internally if it fails */
848 if (rdma_accept(cm_id, &conn_param))
849 rds_ib_conn_error(conn, "rdma_accept failed\n");
853 mutex_unlock(&conn->c_cm_lock);
855 rdma_reject(cm_id, &err, sizeof(int));
860 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6)
862 struct rds_connection *conn = cm_id->context;
863 struct rds_ib_connection *ic = conn->c_transport_data;
864 struct rdma_conn_param conn_param;
865 union rds_ib_conn_priv dp;
868 /* If the peer doesn't do protocol negotiation, we must
869 * default to RDSv3.0 */
870 rds_ib_set_protocol(conn, RDS_PROTOCOL_4_1);
871 ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */
873 ret = rds_ib_setup_qp(conn);
875 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", ret);
879 rds_ib_cm_fill_conn_param(conn, &conn_param, &dp,
880 conn->c_proposed_version,
881 UINT_MAX, UINT_MAX, isv6);
882 ret = rdma_connect(cm_id, &conn_param);
884 rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
887 /* Beware - returning non-zero tells the rdma_cm to destroy
888 * the cm_id. We should certainly not do it as long as we still
889 * "own" the cm_id. */
891 if (ic->i_cm_id == cm_id)
894 ic->i_active_side = true;
898 int rds_ib_conn_path_connect(struct rds_conn_path *cp)
900 struct rds_connection *conn = cp->cp_conn;
901 struct sockaddr_storage src, dest;
902 rdma_cm_event_handler handler;
903 struct rds_ib_connection *ic;
906 ic = conn->c_transport_data;
908 /* XXX I wonder what affect the port space has */
909 /* delegate cm event handler to rdma_transport */
910 #if IS_ENABLED(CONFIG_IPV6)
912 handler = rds6_rdma_cm_event_handler;
915 handler = rds_rdma_cm_event_handler;
916 ic->i_cm_id = rdma_create_id(&init_net, handler, conn,
917 RDMA_PS_TCP, IB_QPT_RC);
918 if (IS_ERR(ic->i_cm_id)) {
919 ret = PTR_ERR(ic->i_cm_id);
921 rdsdebug("rdma_create_id() failed: %d\n", ret);
925 rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn);
927 if (ipv6_addr_v4mapped(&conn->c_faddr)) {
928 struct sockaddr_in *sin;
930 sin = (struct sockaddr_in *)&src;
931 sin->sin_family = AF_INET;
932 sin->sin_addr.s_addr = conn->c_laddr.s6_addr32[3];
935 sin = (struct sockaddr_in *)&dest;
936 sin->sin_family = AF_INET;
937 sin->sin_addr.s_addr = conn->c_faddr.s6_addr32[3];
938 sin->sin_port = htons(RDS_PORT);
940 struct sockaddr_in6 *sin6;
942 sin6 = (struct sockaddr_in6 *)&src;
943 sin6->sin6_family = AF_INET6;
944 sin6->sin6_addr = conn->c_laddr;
946 sin6->sin6_scope_id = conn->c_dev_if;
948 sin6 = (struct sockaddr_in6 *)&dest;
949 sin6->sin6_family = AF_INET6;
950 sin6->sin6_addr = conn->c_faddr;
951 sin6->sin6_port = htons(RDS_CM_PORT);
952 sin6->sin6_scope_id = conn->c_dev_if;
955 ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src,
956 (struct sockaddr *)&dest,
957 RDS_RDMA_RESOLVE_TIMEOUT_MS);
959 rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id,
961 rdma_destroy_id(ic->i_cm_id);
970 * This is so careful about only cleaning up resources that were built up
971 * so that it can be called at any point during startup. In fact it
972 * can be called multiple times for a given connection.
974 void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
976 struct rds_connection *conn = cp->cp_conn;
977 struct rds_ib_connection *ic = conn->c_transport_data;
980 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id,
981 ic->i_pd, ic->i_send_cq, ic->i_recv_cq,
982 ic->i_cm_id ? ic->i_cm_id->qp : NULL);
985 struct ib_device *dev = ic->i_cm_id->device;
987 rdsdebug("disconnecting cm %p\n", ic->i_cm_id);
988 err = rdma_disconnect(ic->i_cm_id);
990 /* Actually this may happen quite frequently, when
991 * an outgoing connect raced with an incoming connect.
993 rdsdebug("failed to disconnect, cm: %p err %d\n",
997 /* kick off "flush_worker" for all pools in order to reap
998 * all FRMR registrations that are still marked "FRMR_IS_INUSE"
1003 * We want to wait for tx and rx completion to finish
1004 * before we tear down the connection, but we have to be
1005 * careful not to get stuck waiting on a send ring that
1006 * only has unsignaled sends in it. We've shutdown new
1007 * sends before getting here so by waiting for signaled
1008 * sends to complete we're ensured that there will be no
1009 * more tx processing.
1011 wait_event(rds_ib_ring_empty_wait,
1012 rds_ib_ring_empty(&ic->i_recv_ring) &&
1013 (atomic_read(&ic->i_signaled_sends) == 0) &&
1014 (atomic_read(&ic->i_fastreg_inuse_count) == 0) &&
1015 (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
1016 tasklet_kill(&ic->i_send_tasklet);
1017 tasklet_kill(&ic->i_recv_tasklet);
1019 atomic_set(&ic->i_cq_quiesce, 1);
1021 /* first destroy the ib state that generates callbacks */
1022 if (ic->i_cm_id->qp)
1023 rdma_destroy_qp(ic->i_cm_id);
1024 if (ic->i_send_cq) {
1026 ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
1027 ib_destroy_cq(ic->i_send_cq);
1030 if (ic->i_recv_cq) {
1032 ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
1033 ib_destroy_cq(ic->i_recv_cq);
1036 /* then free the resources that ib callbacks use */
1037 if (ic->i_send_hdrs)
1038 ib_dma_free_coherent(dev,
1039 ic->i_send_ring.w_nr *
1040 sizeof(struct rds_header),
1042 ic->i_send_hdrs_dma);
1044 if (ic->i_recv_hdrs)
1045 ib_dma_free_coherent(dev,
1046 ic->i_recv_ring.w_nr *
1047 sizeof(struct rds_header),
1049 ic->i_recv_hdrs_dma);
1052 ib_dma_free_coherent(dev, sizeof(struct rds_header),
1053 ic->i_ack, ic->i_ack_dma);
1056 rds_ib_send_clear_ring(ic);
1058 rds_ib_recv_clear_ring(ic);
1060 rdma_destroy_id(ic->i_cm_id);
1063 * Move connection back to the nodev list.
1066 rds_ib_remove_conn(ic->rds_ibdev, conn);
1070 ic->i_send_cq = NULL;
1071 ic->i_recv_cq = NULL;
1072 ic->i_send_hdrs = NULL;
1073 ic->i_recv_hdrs = NULL;
1076 BUG_ON(ic->rds_ibdev);
1078 /* Clear pending transmit */
1079 if (ic->i_data_op) {
1080 struct rds_message *rm;
1082 rm = container_of(ic->i_data_op, struct rds_message, data);
1083 rds_message_put(rm);
1084 ic->i_data_op = NULL;
1087 /* Clear the ACK state */
1088 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
1089 #ifdef KERNEL_HAS_ATOMIC64
1090 atomic64_set(&ic->i_ack_next, 0);
1096 /* Clear flow control state */
1098 atomic_set(&ic->i_credits, 0);
1100 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
1101 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
1104 rds_inc_put(&ic->i_ibinc->ii_inc);
1112 ic->i_active_side = false;
1115 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1117 struct rds_ib_connection *ic;
1118 unsigned long flags;
1122 ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
1126 ret = rds_ib_recv_alloc_caches(ic, gfp);
1132 INIT_LIST_HEAD(&ic->ib_node);
1133 tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
1135 tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
1137 mutex_init(&ic->i_recv_mutex);
1138 #ifndef KERNEL_HAS_ATOMIC64
1139 spin_lock_init(&ic->i_ack_lock);
1141 atomic_set(&ic->i_signaled_sends, 0);
1144 * rds_ib_conn_shutdown() waits for these to be emptied so they
1145 * must be initialized before it can be called.
1147 rds_ib_ring_init(&ic->i_send_ring, rds_ib_sysctl_max_send_wr);
1148 rds_ib_ring_init(&ic->i_recv_ring, rds_ib_sysctl_max_recv_wr);
1151 conn->c_transport_data = ic;
1153 spin_lock_irqsave(&ib_nodev_conns_lock, flags);
1154 list_add_tail(&ic->ib_node, &ib_nodev_conns);
1155 spin_unlock_irqrestore(&ib_nodev_conns_lock, flags);
1158 rdsdebug("conn %p conn ic %p\n", conn, conn->c_transport_data);
1163 * Free a connection. Connection must be shut down and not set for reconnect.
1165 void rds_ib_conn_free(void *arg)
1167 struct rds_ib_connection *ic = arg;
1168 spinlock_t *lock_ptr;
1170 rdsdebug("ic %p\n", ic);
1173 * Conn is either on a dev's list or on the nodev list.
1174 * A race with shutdown() or connect() would cause problems
1175 * (since rds_ibdev would change) but that should never happen.
1177 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock;
1179 spin_lock_irq(lock_ptr);
1180 list_del(&ic->ib_node);
1181 spin_unlock_irq(lock_ptr);
1183 rds_ib_recv_free_caches(ic);
1190 * An error occurred on the connection
1193 __rds_ib_conn_error(struct rds_connection *conn, const char *fmt, ...)
1197 rds_conn_drop(conn);