1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2015-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
45 #include <linux/interrupt.h>
46 #include <linux/sched.h>
47 #include <linux/slab.h>
48 #include <linux/spinlock.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
56 #include <linux/sunrpc/addr.h>
57 #include <linux/sunrpc/debug.h>
58 #include <linux/sunrpc/rpc_rdma.h>
59 #include <linux/sunrpc/svc_xprt.h>
60 #include <linux/sunrpc/svc_rdma.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
65 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
67 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
69 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
71 struct sockaddr *sa, int salen,
73 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
74 static void svc_rdma_release_rqst(struct svc_rqst *);
75 static void svc_rdma_detach(struct svc_xprt *xprt);
76 static void svc_rdma_free(struct svc_xprt *xprt);
77 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
78 static void svc_rdma_secure_port(struct svc_rqst *);
79 static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
81 static const struct svc_xprt_ops svc_rdma_ops = {
82 .xpo_create = svc_rdma_create,
83 .xpo_recvfrom = svc_rdma_recvfrom,
84 .xpo_sendto = svc_rdma_sendto,
85 .xpo_release_rqst = svc_rdma_release_rqst,
86 .xpo_detach = svc_rdma_detach,
87 .xpo_free = svc_rdma_free,
88 .xpo_has_wspace = svc_rdma_has_wspace,
89 .xpo_accept = svc_rdma_accept,
90 .xpo_secure_port = svc_rdma_secure_port,
91 .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
94 struct svc_xprt_class svc_rdma_class = {
96 .xcl_owner = THIS_MODULE,
97 .xcl_ops = &svc_rdma_ops,
98 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
99 .xcl_ident = XPRT_TRANSPORT_RDMA,
102 /* QP event handler */
103 static void qp_event_handler(struct ib_event *event, void *context)
105 struct svc_xprt *xprt = context;
107 trace_svcrdma_qp_error(event, (struct sockaddr *)&xprt->xpt_remote);
108 switch (event->event) {
109 /* These are considered benign events */
110 case IB_EVENT_PATH_MIG:
111 case IB_EVENT_COMM_EST:
112 case IB_EVENT_SQ_DRAINED:
113 case IB_EVENT_QP_LAST_WQE_REACHED:
116 /* These are considered fatal events */
117 case IB_EVENT_PATH_MIG_ERR:
118 case IB_EVENT_QP_FATAL:
119 case IB_EVENT_QP_REQ_ERR:
120 case IB_EVENT_QP_ACCESS_ERR:
121 case IB_EVENT_DEVICE_FATAL:
123 set_bit(XPT_CLOSE, &xprt->xpt_flags);
124 svc_xprt_enqueue(xprt);
129 static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
132 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
135 dprintk("svcrdma: failed to create new transport\n");
138 svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
139 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
140 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
141 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
142 INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
143 INIT_LIST_HEAD(&cma_xprt->sc_recv_ctxts);
144 INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
145 init_waitqueue_head(&cma_xprt->sc_send_wait);
147 spin_lock_init(&cma_xprt->sc_lock);
148 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
149 spin_lock_init(&cma_xprt->sc_send_lock);
150 spin_lock_init(&cma_xprt->sc_recv_lock);
151 spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
154 * Note that this implies that the underlying transport support
155 * has some form of congestion control (see RFC 7530 section 3.1
156 * paragraph 2). For now, we assume that all supported RDMA
157 * transports are suitable here.
159 set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
165 svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
166 struct rdma_conn_param *param)
168 const struct rpcrdma_connect_private *pmsg = param->private_data;
171 pmsg->cp_magic == rpcrdma_cmp_magic &&
172 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
173 newxprt->sc_snd_w_inv = pmsg->cp_flags &
174 RPCRDMA_CMP_F_SND_W_INV_OK;
176 dprintk("svcrdma: client send_size %u, recv_size %u "
177 "remote inv %ssupported\n",
178 rpcrdma_decode_buffer_size(pmsg->cp_send_size),
179 rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
180 newxprt->sc_snd_w_inv ? "" : "un");
185 * This function handles the CONNECT_REQUEST event on a listening
186 * endpoint. It is passed the cma_id for the _new_ connection. The context in
187 * this cma_id is inherited from the listening cma_id and is the svc_xprt
188 * structure for the listening endpoint.
190 * This function creates a new xprt for the new connection and enqueues it on
191 * the accept queue for the listent xprt. When the listen thread is kicked, it
192 * will call the recvfrom method on the listen xprt which will accept the new
195 static void handle_connect_req(struct rdma_cm_id *new_cma_id,
196 struct rdma_conn_param *param)
198 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
199 struct svcxprt_rdma *newxprt;
202 /* Create a new transport */
203 newxprt = svc_rdma_create_xprt(listen_xprt->sc_xprt.xpt_server,
204 listen_xprt->sc_xprt.xpt_net);
207 newxprt->sc_cm_id = new_cma_id;
208 new_cma_id->context = newxprt;
209 svc_rdma_parse_connect_private(newxprt, param);
211 /* Save client advertised inbound read limit for use later in accept. */
212 newxprt->sc_ord = param->initiator_depth;
214 /* Set the local and remote addresses in the transport */
215 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
216 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
217 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
218 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
221 * Enqueue the new transport on the accept queue of the listening
224 spin_lock_bh(&listen_xprt->sc_lock);
225 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
226 spin_unlock_bh(&listen_xprt->sc_lock);
228 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
229 svc_xprt_enqueue(&listen_xprt->sc_xprt);
233 * Handles events generated on the listening endpoint. These events will be
234 * either be incoming connect requests or adapter removal events.
236 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
237 struct rdma_cm_event *event)
239 struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr;
241 trace_svcrdma_cm_event(event, sap);
243 switch (event->event) {
244 case RDMA_CM_EVENT_CONNECT_REQUEST:
245 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
246 "event = %s (%d)\n", cma_id, cma_id->context,
247 rdma_event_msg(event->event), event->event);
248 handle_connect_req(cma_id, &event->param.conn);
251 /* NB: No device removal upcall for INADDR_ANY listeners */
252 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
253 "event = %s (%d)\n", cma_id,
254 rdma_event_msg(event->event), event->event);
261 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
262 struct rdma_cm_event *event)
264 struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.dst_addr;
265 struct svcxprt_rdma *rdma = cma_id->context;
266 struct svc_xprt *xprt = &rdma->sc_xprt;
268 trace_svcrdma_cm_event(event, sap);
270 switch (event->event) {
271 case RDMA_CM_EVENT_ESTABLISHED:
272 /* Accept complete */
274 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
275 "cm_id=%p\n", xprt, cma_id);
276 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
277 svc_xprt_enqueue(xprt);
279 case RDMA_CM_EVENT_DISCONNECTED:
280 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
282 set_bit(XPT_CLOSE, &xprt->xpt_flags);
283 svc_xprt_enqueue(xprt);
286 case RDMA_CM_EVENT_DEVICE_REMOVAL:
287 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
288 "event = %s (%d)\n", cma_id, xprt,
289 rdma_event_msg(event->event), event->event);
290 set_bit(XPT_CLOSE, &xprt->xpt_flags);
291 svc_xprt_enqueue(xprt);
295 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
296 "event = %s (%d)\n", cma_id,
297 rdma_event_msg(event->event), event->event);
304 * Create a listening RDMA service endpoint.
306 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
308 struct sockaddr *sa, int salen,
311 struct rdma_cm_id *listen_id;
312 struct svcxprt_rdma *cma_xprt;
315 dprintk("svcrdma: Creating RDMA listener\n");
316 if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
317 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
318 return ERR_PTR(-EAFNOSUPPORT);
320 cma_xprt = svc_rdma_create_xprt(serv, net);
322 return ERR_PTR(-ENOMEM);
323 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
324 strcpy(cma_xprt->sc_xprt.xpt_remotebuf, "listener");
326 listen_id = rdma_create_id(net, rdma_listen_handler, cma_xprt,
327 RDMA_PS_TCP, IB_QPT_RC);
328 if (IS_ERR(listen_id)) {
329 ret = PTR_ERR(listen_id);
330 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
334 /* Allow both IPv4 and IPv6 sockets to bind a single port
337 #if IS_ENABLED(CONFIG_IPV6)
338 ret = rdma_set_afonly(listen_id, 1);
340 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
344 ret = rdma_bind_addr(listen_id, sa);
346 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
349 cma_xprt->sc_cm_id = listen_id;
351 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
353 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
358 * We need to use the address from the cm_id in case the
359 * caller specified 0 for the port number.
361 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
362 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
364 return &cma_xprt->sc_xprt;
367 rdma_destroy_id(listen_id);
374 * This is the xpo_recvfrom function for listening endpoints. Its
375 * purpose is to accept incoming connections. The CMA callback handler
376 * has already created a new transport and attached it to the new CMA
379 * There is a queue of pending connections hung on the listening
380 * transport. This queue contains the new svc_xprt structure. This
381 * function takes svc_xprt structures off the accept_q and completes
384 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
386 struct svcxprt_rdma *listen_rdma;
387 struct svcxprt_rdma *newxprt = NULL;
388 struct rdma_conn_param conn_param;
389 struct rpcrdma_connect_private pmsg;
390 struct ib_qp_init_attr qp_attr;
391 unsigned int ctxts, rq_depth;
392 struct ib_device *dev;
393 struct sockaddr *sap;
396 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
397 clear_bit(XPT_CONN, &xprt->xpt_flags);
398 /* Get the next entry off the accept list */
399 spin_lock_bh(&listen_rdma->sc_lock);
400 if (!list_empty(&listen_rdma->sc_accept_q)) {
401 newxprt = list_entry(listen_rdma->sc_accept_q.next,
402 struct svcxprt_rdma, sc_accept_q);
403 list_del_init(&newxprt->sc_accept_q);
405 if (!list_empty(&listen_rdma->sc_accept_q))
406 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
407 spin_unlock_bh(&listen_rdma->sc_lock);
411 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
412 newxprt, newxprt->sc_cm_id);
414 dev = newxprt->sc_cm_id->device;
415 newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
417 /* Qualify the transport resource defaults with the
418 * capabilities of this particular device */
419 /* Transport header, head iovec, tail iovec */
420 newxprt->sc_max_send_sges = 3;
421 /* Add one SGE per page list entry */
422 newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE;
423 if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) {
424 pr_err("svcrdma: too few Send SGEs available (%d needed)\n",
425 newxprt->sc_max_send_sges);
428 newxprt->sc_max_req_size = svcrdma_max_req_size;
429 newxprt->sc_max_requests = svcrdma_max_requests;
430 newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
431 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
432 if (rq_depth > dev->attrs.max_qp_wr) {
433 pr_warn("svcrdma: reducing receive depth to %d\n",
434 dev->attrs.max_qp_wr);
435 rq_depth = dev->attrs.max_qp_wr;
436 newxprt->sc_max_requests = rq_depth - 2;
437 newxprt->sc_max_bc_requests = 2;
439 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
440 ctxts = rdma_rw_mr_factor(dev, newxprt->sc_port_num, RPCSVC_MAXPAGES);
441 ctxts *= newxprt->sc_max_requests;
442 newxprt->sc_sq_depth = rq_depth + ctxts;
443 if (newxprt->sc_sq_depth > dev->attrs.max_qp_wr) {
444 pr_warn("svcrdma: reducing send depth to %d\n",
445 dev->attrs.max_qp_wr);
446 newxprt->sc_sq_depth = dev->attrs.max_qp_wr;
448 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
450 newxprt->sc_pd = ib_alloc_pd(dev, 0);
451 if (IS_ERR(newxprt->sc_pd)) {
452 dprintk("svcrdma: error creating PD for connect request\n");
455 newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
456 0, IB_POLL_WORKQUEUE);
457 if (IS_ERR(newxprt->sc_sq_cq)) {
458 dprintk("svcrdma: error creating SQ CQ for connect request\n");
461 newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth,
462 0, IB_POLL_WORKQUEUE);
463 if (IS_ERR(newxprt->sc_rq_cq)) {
464 dprintk("svcrdma: error creating RQ CQ for connect request\n");
468 memset(&qp_attr, 0, sizeof qp_attr);
469 qp_attr.event_handler = qp_event_handler;
470 qp_attr.qp_context = &newxprt->sc_xprt;
471 qp_attr.port_num = newxprt->sc_port_num;
472 qp_attr.cap.max_rdma_ctxs = ctxts;
473 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth - ctxts;
474 qp_attr.cap.max_recv_wr = rq_depth;
475 qp_attr.cap.max_send_sge = newxprt->sc_max_send_sges;
476 qp_attr.cap.max_recv_sge = 1;
477 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
478 qp_attr.qp_type = IB_QPT_RC;
479 qp_attr.send_cq = newxprt->sc_sq_cq;
480 qp_attr.recv_cq = newxprt->sc_rq_cq;
481 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
482 newxprt->sc_cm_id, newxprt->sc_pd);
483 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
484 qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
485 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
486 qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
488 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
490 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
493 newxprt->sc_qp = newxprt->sc_cm_id->qp;
495 if (!(dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
496 newxprt->sc_snd_w_inv = false;
497 if (!rdma_protocol_iwarp(dev, newxprt->sc_port_num) &&
498 !rdma_ib_or_roce(dev, newxprt->sc_port_num))
501 if (!svc_rdma_post_recvs(newxprt))
504 /* Swap out the handler */
505 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
507 /* Construct RDMA-CM private message */
508 pmsg.cp_magic = rpcrdma_cmp_magic;
509 pmsg.cp_version = RPCRDMA_CMP_VERSION;
511 pmsg.cp_send_size = pmsg.cp_recv_size =
512 rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
514 /* Accept Connection */
515 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
516 memset(&conn_param, 0, sizeof conn_param);
517 conn_param.responder_resources = 0;
518 conn_param.initiator_depth = min_t(int, newxprt->sc_ord,
519 dev->attrs.max_qp_init_rd_atom);
520 if (!conn_param.initiator_depth) {
521 dprintk("svcrdma: invalid ORD setting\n");
525 conn_param.private_data = &pmsg;
526 conn_param.private_data_len = sizeof(pmsg);
527 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
531 dprintk("svcrdma: new connection %p accepted:\n", newxprt);
532 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
533 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
534 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
535 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
536 dprintk(" max_sge : %d\n", newxprt->sc_max_send_sges);
537 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
538 dprintk(" rdma_rw_ctxs : %d\n", ctxts);
539 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
540 dprintk(" ord : %d\n", conn_param.initiator_depth);
542 trace_svcrdma_xprt_accept(&newxprt->sc_xprt);
543 return &newxprt->sc_xprt;
546 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
547 trace_svcrdma_xprt_fail(&newxprt->sc_xprt);
548 /* Take a reference in case the DTO handler runs */
549 svc_xprt_get(&newxprt->sc_xprt);
550 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
551 ib_destroy_qp(newxprt->sc_qp);
552 rdma_destroy_id(newxprt->sc_cm_id);
553 /* This call to put will destroy the transport */
554 svc_xprt_put(&newxprt->sc_xprt);
558 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
563 * When connected, an svc_xprt has at least two references:
565 * - A reference held by the cm_id between the ESTABLISHED and
566 * DISCONNECTED events. If the remote peer disconnected first, this
567 * reference could be gone.
569 * - A reference held by the svc_recv code that called this function
570 * as part of close processing.
572 * At a minimum one references should still be held.
574 static void svc_rdma_detach(struct svc_xprt *xprt)
576 struct svcxprt_rdma *rdma =
577 container_of(xprt, struct svcxprt_rdma, sc_xprt);
579 /* Disconnect and flush posted WQE */
580 rdma_disconnect(rdma->sc_cm_id);
583 static void __svc_rdma_free(struct work_struct *work)
585 struct svcxprt_rdma *rdma =
586 container_of(work, struct svcxprt_rdma, sc_work);
587 struct svc_xprt *xprt = &rdma->sc_xprt;
589 trace_svcrdma_xprt_free(xprt);
591 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
592 ib_drain_qp(rdma->sc_qp);
594 /* We should only be called from kref_put */
595 if (kref_read(&xprt->xpt_ref) != 0)
596 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
597 kref_read(&xprt->xpt_ref));
599 svc_rdma_flush_recv_queues(rdma);
601 /* Final put of backchannel client transport */
602 if (xprt->xpt_bc_xprt) {
603 xprt_put(xprt->xpt_bc_xprt);
604 xprt->xpt_bc_xprt = NULL;
607 svc_rdma_destroy_rw_ctxts(rdma);
608 svc_rdma_send_ctxts_destroy(rdma);
609 svc_rdma_recv_ctxts_destroy(rdma);
611 /* Destroy the QP if present (not a listener) */
612 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
613 ib_destroy_qp(rdma->sc_qp);
615 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
616 ib_free_cq(rdma->sc_sq_cq);
618 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
619 ib_free_cq(rdma->sc_rq_cq);
621 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
622 ib_dealloc_pd(rdma->sc_pd);
624 /* Destroy the CM ID */
625 rdma_destroy_id(rdma->sc_cm_id);
630 static void svc_rdma_free(struct svc_xprt *xprt)
632 struct svcxprt_rdma *rdma =
633 container_of(xprt, struct svcxprt_rdma, sc_xprt);
634 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
635 queue_work(svc_rdma_wq, &rdma->sc_work);
638 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
640 struct svcxprt_rdma *rdma =
641 container_of(xprt, struct svcxprt_rdma, sc_xprt);
644 * If there are already waiters on the SQ,
647 if (waitqueue_active(&rdma->sc_send_wait))
650 /* Otherwise return true. */
654 static void svc_rdma_secure_port(struct svc_rqst *rqstp)
656 set_bit(RQ_SECURE, &rqstp->rq_flags);
659 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)