Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / net / sunrpc / xprtrdma / svc_rdma_transport.c
1 /*
2  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3  * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the BSD-type
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *      Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *
18  *      Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  *      Neither the name of the Network Appliance, Inc. nor the names of
24  *      its contributors may be used to endorse or promote products
25  *      derived from this software without specific prior written
26  *      permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  *
40  * Author: Tom Tucker <tom@opengridcomputing.com>
41  */
42
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/debug.h>
45 #include <linux/sunrpc/rpc_rdma.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 #include <linux/sunrpc/svc_rdma.h>
54 #include <linux/export.h>
55 #include "xprt_rdma.h"
56
57 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
58
59 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
60                                         struct net *net,
61                                         struct sockaddr *sa, int salen,
62                                         int flags);
63 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
64 static void svc_rdma_release_rqst(struct svc_rqst *);
65 static void dto_tasklet_func(unsigned long data);
66 static void svc_rdma_detach(struct svc_xprt *xprt);
67 static void svc_rdma_free(struct svc_xprt *xprt);
68 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
69 static int svc_rdma_secure_port(struct svc_rqst *);
70 static void rq_cq_reap(struct svcxprt_rdma *xprt);
71 static void sq_cq_reap(struct svcxprt_rdma *xprt);
72
73 static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
74 static DEFINE_SPINLOCK(dto_lock);
75 static LIST_HEAD(dto_xprt_q);
76
77 static struct svc_xprt_ops svc_rdma_ops = {
78         .xpo_create = svc_rdma_create,
79         .xpo_recvfrom = svc_rdma_recvfrom,
80         .xpo_sendto = svc_rdma_sendto,
81         .xpo_release_rqst = svc_rdma_release_rqst,
82         .xpo_detach = svc_rdma_detach,
83         .xpo_free = svc_rdma_free,
84         .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
85         .xpo_has_wspace = svc_rdma_has_wspace,
86         .xpo_accept = svc_rdma_accept,
87         .xpo_secure_port = svc_rdma_secure_port,
88 };
89
90 struct svc_xprt_class svc_rdma_class = {
91         .xcl_name = "rdma",
92         .xcl_owner = THIS_MODULE,
93         .xcl_ops = &svc_rdma_ops,
94         .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
95         .xcl_ident = XPRT_TRANSPORT_RDMA,
96 };
97
98 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
99 {
100         struct svc_rdma_op_ctxt *ctxt;
101
102         ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
103                                 GFP_KERNEL | __GFP_NOFAIL);
104         ctxt->xprt = xprt;
105         INIT_LIST_HEAD(&ctxt->dto_q);
106         ctxt->count = 0;
107         ctxt->frmr = NULL;
108         atomic_inc(&xprt->sc_ctxt_used);
109         return ctxt;
110 }
111
112 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
113 {
114         struct svcxprt_rdma *xprt = ctxt->xprt;
115         int i;
116         for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
117                 /*
118                  * Unmap the DMA addr in the SGE if the lkey matches
119                  * the sc_dma_lkey, otherwise, ignore it since it is
120                  * an FRMR lkey and will be unmapped later when the
121                  * last WR that uses it completes.
122                  */
123                 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
124                         atomic_dec(&xprt->sc_dma_used);
125                         ib_dma_unmap_page(xprt->sc_cm_id->device,
126                                             ctxt->sge[i].addr,
127                                             ctxt->sge[i].length,
128                                             ctxt->direction);
129                 }
130         }
131 }
132
133 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
134 {
135         struct svcxprt_rdma *xprt;
136         int i;
137
138         xprt = ctxt->xprt;
139         if (free_pages)
140                 for (i = 0; i < ctxt->count; i++)
141                         put_page(ctxt->pages[i]);
142
143         kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
144         atomic_dec(&xprt->sc_ctxt_used);
145 }
146
147 /*
148  * Temporary NFS req mappings are shared across all transport
149  * instances. These are short lived and should be bounded by the number
150  * of concurrent server threads * depth of the SQ.
151  */
152 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
153 {
154         struct svc_rdma_req_map *map;
155         map = kmem_cache_alloc(svc_rdma_map_cachep,
156                                GFP_KERNEL | __GFP_NOFAIL);
157         map->count = 0;
158         return map;
159 }
160
161 void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
162 {
163         kmem_cache_free(svc_rdma_map_cachep, map);
164 }
165
166 /* ib_cq event handler */
167 static void cq_event_handler(struct ib_event *event, void *context)
168 {
169         struct svc_xprt *xprt = context;
170         dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
171                 ib_event_msg(event->event), event->event, context);
172         set_bit(XPT_CLOSE, &xprt->xpt_flags);
173 }
174
175 /* QP event handler */
176 static void qp_event_handler(struct ib_event *event, void *context)
177 {
178         struct svc_xprt *xprt = context;
179
180         switch (event->event) {
181         /* These are considered benign events */
182         case IB_EVENT_PATH_MIG:
183         case IB_EVENT_COMM_EST:
184         case IB_EVENT_SQ_DRAINED:
185         case IB_EVENT_QP_LAST_WQE_REACHED:
186                 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
187                         ib_event_msg(event->event), event->event,
188                         event->element.qp);
189                 break;
190         /* These are considered fatal events */
191         case IB_EVENT_PATH_MIG_ERR:
192         case IB_EVENT_QP_FATAL:
193         case IB_EVENT_QP_REQ_ERR:
194         case IB_EVENT_QP_ACCESS_ERR:
195         case IB_EVENT_DEVICE_FATAL:
196         default:
197                 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
198                         "closing transport\n",
199                         ib_event_msg(event->event), event->event,
200                         event->element.qp);
201                 set_bit(XPT_CLOSE, &xprt->xpt_flags);
202                 break;
203         }
204 }
205
206 /*
207  * Data Transfer Operation Tasklet
208  *
209  * Walks a list of transports with I/O pending, removing entries as
210  * they are added to the server's I/O pending list. Two bits indicate
211  * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
212  * spinlock that serializes access to the transport list with the RQ
213  * and SQ interrupt handlers.
214  */
215 static void dto_tasklet_func(unsigned long data)
216 {
217         struct svcxprt_rdma *xprt;
218         unsigned long flags;
219
220         spin_lock_irqsave(&dto_lock, flags);
221         while (!list_empty(&dto_xprt_q)) {
222                 xprt = list_entry(dto_xprt_q.next,
223                                   struct svcxprt_rdma, sc_dto_q);
224                 list_del_init(&xprt->sc_dto_q);
225                 spin_unlock_irqrestore(&dto_lock, flags);
226
227                 rq_cq_reap(xprt);
228                 sq_cq_reap(xprt);
229
230                 svc_xprt_put(&xprt->sc_xprt);
231                 spin_lock_irqsave(&dto_lock, flags);
232         }
233         spin_unlock_irqrestore(&dto_lock, flags);
234 }
235
236 /*
237  * Receive Queue Completion Handler
238  *
239  * Since an RQ completion handler is called on interrupt context, we
240  * need to defer the handling of the I/O to a tasklet
241  */
242 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
243 {
244         struct svcxprt_rdma *xprt = cq_context;
245         unsigned long flags;
246
247         /* Guard against unconditional flush call for destroyed QP */
248         if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
249                 return;
250
251         /*
252          * Set the bit regardless of whether or not it's on the list
253          * because it may be on the list already due to an SQ
254          * completion.
255          */
256         set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
257
258         /*
259          * If this transport is not already on the DTO transport queue,
260          * add it
261          */
262         spin_lock_irqsave(&dto_lock, flags);
263         if (list_empty(&xprt->sc_dto_q)) {
264                 svc_xprt_get(&xprt->sc_xprt);
265                 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
266         }
267         spin_unlock_irqrestore(&dto_lock, flags);
268
269         /* Tasklet does all the work to avoid irqsave locks. */
270         tasklet_schedule(&dto_tasklet);
271 }
272
273 /*
274  * rq_cq_reap - Process the RQ CQ.
275  *
276  * Take all completing WC off the CQE and enqueue the associated DTO
277  * context on the dto_q for the transport.
278  *
279  * Note that caller must hold a transport reference.
280  */
281 static void rq_cq_reap(struct svcxprt_rdma *xprt)
282 {
283         int ret;
284         struct ib_wc wc;
285         struct svc_rdma_op_ctxt *ctxt = NULL;
286
287         if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
288                 return;
289
290         ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
291         atomic_inc(&rdma_stat_rq_poll);
292
293         while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
294                 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
295                 ctxt->wc_status = wc.status;
296                 ctxt->byte_len = wc.byte_len;
297                 svc_rdma_unmap_dma(ctxt);
298                 if (wc.status != IB_WC_SUCCESS) {
299                         /* Close the transport */
300                         dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
301                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
302                         svc_rdma_put_context(ctxt, 1);
303                         svc_xprt_put(&xprt->sc_xprt);
304                         continue;
305                 }
306                 spin_lock_bh(&xprt->sc_rq_dto_lock);
307                 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
308                 spin_unlock_bh(&xprt->sc_rq_dto_lock);
309                 svc_xprt_put(&xprt->sc_xprt);
310         }
311
312         if (ctxt)
313                 atomic_inc(&rdma_stat_rq_prod);
314
315         set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
316         /*
317          * If data arrived before established event,
318          * don't enqueue. This defers RPC I/O until the
319          * RDMA connection is complete.
320          */
321         if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
322                 svc_xprt_enqueue(&xprt->sc_xprt);
323 }
324
325 /*
326  * Process a completion context
327  */
328 static void process_context(struct svcxprt_rdma *xprt,
329                             struct svc_rdma_op_ctxt *ctxt)
330 {
331         svc_rdma_unmap_dma(ctxt);
332
333         switch (ctxt->wr_op) {
334         case IB_WR_SEND:
335                 if (ctxt->frmr)
336                         pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
337                 svc_rdma_put_context(ctxt, 1);
338                 break;
339
340         case IB_WR_RDMA_WRITE:
341                 if (ctxt->frmr)
342                         pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
343                 svc_rdma_put_context(ctxt, 0);
344                 break;
345
346         case IB_WR_RDMA_READ:
347         case IB_WR_RDMA_READ_WITH_INV:
348                 svc_rdma_put_frmr(xprt, ctxt->frmr);
349                 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
350                         struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
351                         if (read_hdr) {
352                                 spin_lock_bh(&xprt->sc_rq_dto_lock);
353                                 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
354                                 list_add_tail(&read_hdr->dto_q,
355                                               &xprt->sc_read_complete_q);
356                                 spin_unlock_bh(&xprt->sc_rq_dto_lock);
357                         } else {
358                                 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
359                         }
360                         svc_xprt_enqueue(&xprt->sc_xprt);
361                 }
362                 svc_rdma_put_context(ctxt, 0);
363                 break;
364
365         default:
366                 printk(KERN_ERR "svcrdma: unexpected completion type, "
367                        "opcode=%d\n",
368                        ctxt->wr_op);
369                 break;
370         }
371 }
372
373 /*
374  * Send Queue Completion Handler - potentially called on interrupt context.
375  *
376  * Note that caller must hold a transport reference.
377  */
378 static void sq_cq_reap(struct svcxprt_rdma *xprt)
379 {
380         struct svc_rdma_op_ctxt *ctxt = NULL;
381         struct ib_wc wc_a[6];
382         struct ib_wc *wc;
383         struct ib_cq *cq = xprt->sc_sq_cq;
384         int ret;
385
386         memset(wc_a, 0, sizeof(wc_a));
387
388         if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
389                 return;
390
391         ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
392         atomic_inc(&rdma_stat_sq_poll);
393         while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
394                 int i;
395
396                 for (i = 0; i < ret; i++) {
397                         wc = &wc_a[i];
398                         if (wc->status != IB_WC_SUCCESS) {
399                                 dprintk("svcrdma: sq wc err status %s (%d)\n",
400                                         ib_wc_status_msg(wc->status),
401                                         wc->status);
402
403                                 /* Close the transport */
404                                 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
405                         }
406
407                         /* Decrement used SQ WR count */
408                         atomic_dec(&xprt->sc_sq_count);
409                         wake_up(&xprt->sc_send_wait);
410
411                         ctxt = (struct svc_rdma_op_ctxt *)
412                                 (unsigned long)wc->wr_id;
413                         if (ctxt)
414                                 process_context(xprt, ctxt);
415
416                         svc_xprt_put(&xprt->sc_xprt);
417                 }
418         }
419
420         if (ctxt)
421                 atomic_inc(&rdma_stat_sq_prod);
422 }
423
424 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
425 {
426         struct svcxprt_rdma *xprt = cq_context;
427         unsigned long flags;
428
429         /* Guard against unconditional flush call for destroyed QP */
430         if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
431                 return;
432
433         /*
434          * Set the bit regardless of whether or not it's on the list
435          * because it may be on the list already due to an RQ
436          * completion.
437          */
438         set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
439
440         /*
441          * If this transport is not already on the DTO transport queue,
442          * add it
443          */
444         spin_lock_irqsave(&dto_lock, flags);
445         if (list_empty(&xprt->sc_dto_q)) {
446                 svc_xprt_get(&xprt->sc_xprt);
447                 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
448         }
449         spin_unlock_irqrestore(&dto_lock, flags);
450
451         /* Tasklet does all the work to avoid irqsave locks. */
452         tasklet_schedule(&dto_tasklet);
453 }
454
455 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
456                                              int listener)
457 {
458         struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
459
460         if (!cma_xprt)
461                 return NULL;
462         svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
463         INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
464         INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
465         INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
466         INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
467         INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
468         init_waitqueue_head(&cma_xprt->sc_send_wait);
469
470         spin_lock_init(&cma_xprt->sc_lock);
471         spin_lock_init(&cma_xprt->sc_rq_dto_lock);
472         spin_lock_init(&cma_xprt->sc_frmr_q_lock);
473
474         cma_xprt->sc_ord = svcrdma_ord;
475
476         cma_xprt->sc_max_req_size = svcrdma_max_req_size;
477         cma_xprt->sc_max_requests = svcrdma_max_requests;
478         cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
479         atomic_set(&cma_xprt->sc_sq_count, 0);
480         atomic_set(&cma_xprt->sc_ctxt_used, 0);
481
482         if (listener)
483                 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
484
485         return cma_xprt;
486 }
487
488 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
489 {
490         struct ib_recv_wr recv_wr, *bad_recv_wr;
491         struct svc_rdma_op_ctxt *ctxt;
492         struct page *page;
493         dma_addr_t pa;
494         int sge_no;
495         int buflen;
496         int ret;
497
498         ctxt = svc_rdma_get_context(xprt);
499         buflen = 0;
500         ctxt->direction = DMA_FROM_DEVICE;
501         for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
502                 if (sge_no >= xprt->sc_max_sge) {
503                         pr_err("svcrdma: Too many sges (%d)\n", sge_no);
504                         goto err_put_ctxt;
505                 }
506                 page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
507                 ctxt->pages[sge_no] = page;
508                 pa = ib_dma_map_page(xprt->sc_cm_id->device,
509                                      page, 0, PAGE_SIZE,
510                                      DMA_FROM_DEVICE);
511                 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
512                         goto err_put_ctxt;
513                 atomic_inc(&xprt->sc_dma_used);
514                 ctxt->sge[sge_no].addr = pa;
515                 ctxt->sge[sge_no].length = PAGE_SIZE;
516                 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
517                 ctxt->count = sge_no + 1;
518                 buflen += PAGE_SIZE;
519         }
520         recv_wr.next = NULL;
521         recv_wr.sg_list = &ctxt->sge[0];
522         recv_wr.num_sge = ctxt->count;
523         recv_wr.wr_id = (u64)(unsigned long)ctxt;
524
525         svc_xprt_get(&xprt->sc_xprt);
526         ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
527         if (ret) {
528                 svc_rdma_unmap_dma(ctxt);
529                 svc_rdma_put_context(ctxt, 1);
530                 svc_xprt_put(&xprt->sc_xprt);
531         }
532         return ret;
533
534  err_put_ctxt:
535         svc_rdma_unmap_dma(ctxt);
536         svc_rdma_put_context(ctxt, 1);
537         return -ENOMEM;
538 }
539
540 /*
541  * This function handles the CONNECT_REQUEST event on a listening
542  * endpoint. It is passed the cma_id for the _new_ connection. The context in
543  * this cma_id is inherited from the listening cma_id and is the svc_xprt
544  * structure for the listening endpoint.
545  *
546  * This function creates a new xprt for the new connection and enqueues it on
547  * the accept queue for the listent xprt. When the listen thread is kicked, it
548  * will call the recvfrom method on the listen xprt which will accept the new
549  * connection.
550  */
551 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
552 {
553         struct svcxprt_rdma *listen_xprt = new_cma_id->context;
554         struct svcxprt_rdma *newxprt;
555         struct sockaddr *sa;
556
557         /* Create a new transport */
558         newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
559         if (!newxprt) {
560                 dprintk("svcrdma: failed to create new transport\n");
561                 return;
562         }
563         newxprt->sc_cm_id = new_cma_id;
564         new_cma_id->context = newxprt;
565         dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
566                 newxprt, newxprt->sc_cm_id, listen_xprt);
567
568         /* Save client advertised inbound read limit for use later in accept. */
569         newxprt->sc_ord = client_ird;
570
571         /* Set the local and remote addresses in the transport */
572         sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
573         svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
574         sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
575         svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
576
577         /*
578          * Enqueue the new transport on the accept queue of the listening
579          * transport
580          */
581         spin_lock_bh(&listen_xprt->sc_lock);
582         list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
583         spin_unlock_bh(&listen_xprt->sc_lock);
584
585         set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
586         svc_xprt_enqueue(&listen_xprt->sc_xprt);
587 }
588
589 /*
590  * Handles events generated on the listening endpoint. These events will be
591  * either be incoming connect requests or adapter removal  events.
592  */
593 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
594                                struct rdma_cm_event *event)
595 {
596         struct svcxprt_rdma *xprt = cma_id->context;
597         int ret = 0;
598
599         switch (event->event) {
600         case RDMA_CM_EVENT_CONNECT_REQUEST:
601                 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
602                         "event = %s (%d)\n", cma_id, cma_id->context,
603                         rdma_event_msg(event->event), event->event);
604                 handle_connect_req(cma_id,
605                                    event->param.conn.initiator_depth);
606                 break;
607
608         case RDMA_CM_EVENT_ESTABLISHED:
609                 /* Accept complete */
610                 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
611                         "cm_id=%p\n", xprt, cma_id);
612                 break;
613
614         case RDMA_CM_EVENT_DEVICE_REMOVAL:
615                 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
616                         xprt, cma_id);
617                 if (xprt)
618                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
619                 break;
620
621         default:
622                 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
623                         "event = %s (%d)\n", cma_id,
624                         rdma_event_msg(event->event), event->event);
625                 break;
626         }
627
628         return ret;
629 }
630
631 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
632                             struct rdma_cm_event *event)
633 {
634         struct svc_xprt *xprt = cma_id->context;
635         struct svcxprt_rdma *rdma =
636                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
637         switch (event->event) {
638         case RDMA_CM_EVENT_ESTABLISHED:
639                 /* Accept complete */
640                 svc_xprt_get(xprt);
641                 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
642                         "cm_id=%p\n", xprt, cma_id);
643                 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
644                 svc_xprt_enqueue(xprt);
645                 break;
646         case RDMA_CM_EVENT_DISCONNECTED:
647                 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
648                         xprt, cma_id);
649                 if (xprt) {
650                         set_bit(XPT_CLOSE, &xprt->xpt_flags);
651                         svc_xprt_enqueue(xprt);
652                         svc_xprt_put(xprt);
653                 }
654                 break;
655         case RDMA_CM_EVENT_DEVICE_REMOVAL:
656                 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
657                         "event = %s (%d)\n", cma_id, xprt,
658                         rdma_event_msg(event->event), event->event);
659                 if (xprt) {
660                         set_bit(XPT_CLOSE, &xprt->xpt_flags);
661                         svc_xprt_enqueue(xprt);
662                         svc_xprt_put(xprt);
663                 }
664                 break;
665         default:
666                 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
667                         "event = %s (%d)\n", cma_id,
668                         rdma_event_msg(event->event), event->event);
669                 break;
670         }
671         return 0;
672 }
673
674 /*
675  * Create a listening RDMA service endpoint.
676  */
677 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
678                                         struct net *net,
679                                         struct sockaddr *sa, int salen,
680                                         int flags)
681 {
682         struct rdma_cm_id *listen_id;
683         struct svcxprt_rdma *cma_xprt;
684         int ret;
685
686         dprintk("svcrdma: Creating RDMA socket\n");
687         if (sa->sa_family != AF_INET) {
688                 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
689                 return ERR_PTR(-EAFNOSUPPORT);
690         }
691         cma_xprt = rdma_create_xprt(serv, 1);
692         if (!cma_xprt)
693                 return ERR_PTR(-ENOMEM);
694
695         listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
696                                    IB_QPT_RC);
697         if (IS_ERR(listen_id)) {
698                 ret = PTR_ERR(listen_id);
699                 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
700                 goto err0;
701         }
702
703         ret = rdma_bind_addr(listen_id, sa);
704         if (ret) {
705                 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
706                 goto err1;
707         }
708         cma_xprt->sc_cm_id = listen_id;
709
710         ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
711         if (ret) {
712                 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
713                 goto err1;
714         }
715
716         /*
717          * We need to use the address from the cm_id in case the
718          * caller specified 0 for the port number.
719          */
720         sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
721         svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
722
723         return &cma_xprt->sc_xprt;
724
725  err1:
726         rdma_destroy_id(listen_id);
727  err0:
728         kfree(cma_xprt);
729         return ERR_PTR(ret);
730 }
731
732 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
733 {
734         struct ib_mr *mr;
735         struct ib_fast_reg_page_list *pl;
736         struct svc_rdma_fastreg_mr *frmr;
737
738         frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
739         if (!frmr)
740                 goto err;
741
742         mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
743         if (IS_ERR(mr))
744                 goto err_free_frmr;
745
746         pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
747                                          RPCSVC_MAXPAGES);
748         if (IS_ERR(pl))
749                 goto err_free_mr;
750
751         frmr->mr = mr;
752         frmr->page_list = pl;
753         INIT_LIST_HEAD(&frmr->frmr_list);
754         return frmr;
755
756  err_free_mr:
757         ib_dereg_mr(mr);
758  err_free_frmr:
759         kfree(frmr);
760  err:
761         return ERR_PTR(-ENOMEM);
762 }
763
764 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
765 {
766         struct svc_rdma_fastreg_mr *frmr;
767
768         while (!list_empty(&xprt->sc_frmr_q)) {
769                 frmr = list_entry(xprt->sc_frmr_q.next,
770                                   struct svc_rdma_fastreg_mr, frmr_list);
771                 list_del_init(&frmr->frmr_list);
772                 ib_dereg_mr(frmr->mr);
773                 ib_free_fast_reg_page_list(frmr->page_list);
774                 kfree(frmr);
775         }
776 }
777
778 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
779 {
780         struct svc_rdma_fastreg_mr *frmr = NULL;
781
782         spin_lock_bh(&rdma->sc_frmr_q_lock);
783         if (!list_empty(&rdma->sc_frmr_q)) {
784                 frmr = list_entry(rdma->sc_frmr_q.next,
785                                   struct svc_rdma_fastreg_mr, frmr_list);
786                 list_del_init(&frmr->frmr_list);
787                 frmr->map_len = 0;
788                 frmr->page_list_len = 0;
789         }
790         spin_unlock_bh(&rdma->sc_frmr_q_lock);
791         if (frmr)
792                 return frmr;
793
794         return rdma_alloc_frmr(rdma);
795 }
796
797 static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
798                            struct svc_rdma_fastreg_mr *frmr)
799 {
800         int page_no;
801         for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
802                 dma_addr_t addr = frmr->page_list->page_list[page_no];
803                 if (ib_dma_mapping_error(frmr->mr->device, addr))
804                         continue;
805                 atomic_dec(&xprt->sc_dma_used);
806                 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
807                                   frmr->direction);
808         }
809 }
810
811 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
812                        struct svc_rdma_fastreg_mr *frmr)
813 {
814         if (frmr) {
815                 frmr_unmap_dma(rdma, frmr);
816                 spin_lock_bh(&rdma->sc_frmr_q_lock);
817                 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
818                 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
819                 spin_unlock_bh(&rdma->sc_frmr_q_lock);
820         }
821 }
822
823 /*
824  * This is the xpo_recvfrom function for listening endpoints. Its
825  * purpose is to accept incoming connections. The CMA callback handler
826  * has already created a new transport and attached it to the new CMA
827  * ID.
828  *
829  * There is a queue of pending connections hung on the listening
830  * transport. This queue contains the new svc_xprt structure. This
831  * function takes svc_xprt structures off the accept_q and completes
832  * the connection.
833  */
834 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
835 {
836         struct svcxprt_rdma *listen_rdma;
837         struct svcxprt_rdma *newxprt = NULL;
838         struct rdma_conn_param conn_param;
839         struct ib_cq_init_attr cq_attr = {};
840         struct ib_qp_init_attr qp_attr;
841         struct ib_device_attr devattr;
842         int uninitialized_var(dma_mr_acc);
843         int need_dma_mr = 0;
844         int ret;
845         int i;
846
847         listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
848         clear_bit(XPT_CONN, &xprt->xpt_flags);
849         /* Get the next entry off the accept list */
850         spin_lock_bh(&listen_rdma->sc_lock);
851         if (!list_empty(&listen_rdma->sc_accept_q)) {
852                 newxprt = list_entry(listen_rdma->sc_accept_q.next,
853                                      struct svcxprt_rdma, sc_accept_q);
854                 list_del_init(&newxprt->sc_accept_q);
855         }
856         if (!list_empty(&listen_rdma->sc_accept_q))
857                 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
858         spin_unlock_bh(&listen_rdma->sc_lock);
859         if (!newxprt)
860                 return NULL;
861
862         dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
863                 newxprt, newxprt->sc_cm_id);
864
865         ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
866         if (ret) {
867                 dprintk("svcrdma: could not query device attributes on "
868                         "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
869                 goto errout;
870         }
871
872         /* Qualify the transport resource defaults with the
873          * capabilities of this particular device */
874         newxprt->sc_max_sge = min((size_t)devattr.max_sge,
875                                   (size_t)RPCSVC_MAXPAGES);
876         newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
877                                    (size_t)svcrdma_max_requests);
878         newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
879
880         /*
881          * Limit ORD based on client limit, local device limit, and
882          * configured svcrdma limit.
883          */
884         newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
885         newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
886
887         newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
888         if (IS_ERR(newxprt->sc_pd)) {
889                 dprintk("svcrdma: error creating PD for connect request\n");
890                 goto errout;
891         }
892         cq_attr.cqe = newxprt->sc_sq_depth;
893         newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
894                                          sq_comp_handler,
895                                          cq_event_handler,
896                                          newxprt,
897                                          &cq_attr);
898         if (IS_ERR(newxprt->sc_sq_cq)) {
899                 dprintk("svcrdma: error creating SQ CQ for connect request\n");
900                 goto errout;
901         }
902         cq_attr.cqe = newxprt->sc_max_requests;
903         newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
904                                          rq_comp_handler,
905                                          cq_event_handler,
906                                          newxprt,
907                                          &cq_attr);
908         if (IS_ERR(newxprt->sc_rq_cq)) {
909                 dprintk("svcrdma: error creating RQ CQ for connect request\n");
910                 goto errout;
911         }
912
913         memset(&qp_attr, 0, sizeof qp_attr);
914         qp_attr.event_handler = qp_event_handler;
915         qp_attr.qp_context = &newxprt->sc_xprt;
916         qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
917         qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
918         qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
919         qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
920         qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
921         qp_attr.qp_type = IB_QPT_RC;
922         qp_attr.send_cq = newxprt->sc_sq_cq;
923         qp_attr.recv_cq = newxprt->sc_rq_cq;
924         dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
925                 "    cm_id->device=%p, sc_pd->device=%p\n"
926                 "    cap.max_send_wr = %d\n"
927                 "    cap.max_recv_wr = %d\n"
928                 "    cap.max_send_sge = %d\n"
929                 "    cap.max_recv_sge = %d\n",
930                 newxprt->sc_cm_id, newxprt->sc_pd,
931                 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
932                 qp_attr.cap.max_send_wr,
933                 qp_attr.cap.max_recv_wr,
934                 qp_attr.cap.max_send_sge,
935                 qp_attr.cap.max_recv_sge);
936
937         ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
938         if (ret) {
939                 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
940                 goto errout;
941         }
942         newxprt->sc_qp = newxprt->sc_cm_id->qp;
943
944         /*
945          * Use the most secure set of MR resources based on the
946          * transport type and available memory management features in
947          * the device. Here's the table implemented below:
948          *
949          *              Fast    Global  DMA     Remote WR
950          *              Reg     LKEY    MR      Access
951          *              Sup'd   Sup'd   Needed  Needed
952          *
953          * IWARP        N       N       Y       Y
954          *              N       Y       Y       Y
955          *              Y       N       Y       N
956          *              Y       Y       N       -
957          *
958          * IB           N       N       Y       N
959          *              N       Y       N       -
960          *              Y       N       Y       N
961          *              Y       Y       N       -
962          *
963          * NB:  iWARP requires remote write access for the data sink
964          *      of an RDMA_READ. IB does not.
965          */
966         newxprt->sc_reader = rdma_read_chunk_lcl;
967         if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
968                 newxprt->sc_frmr_pg_list_len =
969                         devattr.max_fast_reg_page_list_len;
970                 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
971                 newxprt->sc_reader = rdma_read_chunk_frmr;
972         }
973
974         /*
975          * Determine if a DMA MR is required and if so, what privs are required
976          */
977         if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
978                                  newxprt->sc_cm_id->port_num) &&
979             !rdma_ib_or_roce(newxprt->sc_cm_id->device,
980                              newxprt->sc_cm_id->port_num))
981                 goto errout;
982
983         if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
984             !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
985                 need_dma_mr = 1;
986                 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
987                 if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
988                                         newxprt->sc_cm_id->port_num) &&
989                     !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
990                         dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
991         }
992
993         if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
994                                 newxprt->sc_cm_id->port_num))
995                 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
996
997         /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
998         if (need_dma_mr) {
999                 /* Register all of physical memory */
1000                 newxprt->sc_phys_mr =
1001                         ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1002                 if (IS_ERR(newxprt->sc_phys_mr)) {
1003                         dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1004                                 ret);
1005                         goto errout;
1006                 }
1007                 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1008         } else
1009                 newxprt->sc_dma_lkey =
1010                         newxprt->sc_cm_id->device->local_dma_lkey;
1011
1012         /* Post receive buffers */
1013         for (i = 0; i < newxprt->sc_max_requests; i++) {
1014                 ret = svc_rdma_post_recv(newxprt);
1015                 if (ret) {
1016                         dprintk("svcrdma: failure posting receive buffers\n");
1017                         goto errout;
1018                 }
1019         }
1020
1021         /* Swap out the handler */
1022         newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1023
1024         /*
1025          * Arm the CQs for the SQ and RQ before accepting so we can't
1026          * miss the first message
1027          */
1028         ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1029         ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1030
1031         /* Accept Connection */
1032         set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1033         memset(&conn_param, 0, sizeof conn_param);
1034         conn_param.responder_resources = 0;
1035         conn_param.initiator_depth = newxprt->sc_ord;
1036         ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1037         if (ret) {
1038                 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1039                        ret);
1040                 goto errout;
1041         }
1042
1043         dprintk("svcrdma: new connection %p accepted with the following "
1044                 "attributes:\n"
1045                 "    local_ip        : %pI4\n"
1046                 "    local_port      : %d\n"
1047                 "    remote_ip       : %pI4\n"
1048                 "    remote_port     : %d\n"
1049                 "    max_sge         : %d\n"
1050                 "    sq_depth        : %d\n"
1051                 "    max_requests    : %d\n"
1052                 "    ord             : %d\n",
1053                 newxprt,
1054                 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1055                          route.addr.src_addr)->sin_addr.s_addr,
1056                 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1057                        route.addr.src_addr)->sin_port),
1058                 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1059                          route.addr.dst_addr)->sin_addr.s_addr,
1060                 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1061                        route.addr.dst_addr)->sin_port),
1062                 newxprt->sc_max_sge,
1063                 newxprt->sc_sq_depth,
1064                 newxprt->sc_max_requests,
1065                 newxprt->sc_ord);
1066
1067         return &newxprt->sc_xprt;
1068
1069  errout:
1070         dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1071         /* Take a reference in case the DTO handler runs */
1072         svc_xprt_get(&newxprt->sc_xprt);
1073         if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1074                 ib_destroy_qp(newxprt->sc_qp);
1075         rdma_destroy_id(newxprt->sc_cm_id);
1076         /* This call to put will destroy the transport */
1077         svc_xprt_put(&newxprt->sc_xprt);
1078         return NULL;
1079 }
1080
1081 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1082 {
1083 }
1084
1085 /*
1086  * When connected, an svc_xprt has at least two references:
1087  *
1088  * - A reference held by the cm_id between the ESTABLISHED and
1089  *   DISCONNECTED events. If the remote peer disconnected first, this
1090  *   reference could be gone.
1091  *
1092  * - A reference held by the svc_recv code that called this function
1093  *   as part of close processing.
1094  *
1095  * At a minimum one references should still be held.
1096  */
1097 static void svc_rdma_detach(struct svc_xprt *xprt)
1098 {
1099         struct svcxprt_rdma *rdma =
1100                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1101         dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1102
1103         /* Disconnect and flush posted WQE */
1104         rdma_disconnect(rdma->sc_cm_id);
1105 }
1106
1107 static void __svc_rdma_free(struct work_struct *work)
1108 {
1109         struct svcxprt_rdma *rdma =
1110                 container_of(work, struct svcxprt_rdma, sc_work);
1111         dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1112
1113         /* We should only be called from kref_put */
1114         if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1115                 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1116                        atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1117
1118         /*
1119          * Destroy queued, but not processed read completions. Note
1120          * that this cleanup has to be done before destroying the
1121          * cm_id because the device ptr is needed to unmap the dma in
1122          * svc_rdma_put_context.
1123          */
1124         while (!list_empty(&rdma->sc_read_complete_q)) {
1125                 struct svc_rdma_op_ctxt *ctxt;
1126                 ctxt = list_entry(rdma->sc_read_complete_q.next,
1127                                   struct svc_rdma_op_ctxt,
1128                                   dto_q);
1129                 list_del_init(&ctxt->dto_q);
1130                 svc_rdma_put_context(ctxt, 1);
1131         }
1132
1133         /* Destroy queued, but not processed recv completions */
1134         while (!list_empty(&rdma->sc_rq_dto_q)) {
1135                 struct svc_rdma_op_ctxt *ctxt;
1136                 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1137                                   struct svc_rdma_op_ctxt,
1138                                   dto_q);
1139                 list_del_init(&ctxt->dto_q);
1140                 svc_rdma_put_context(ctxt, 1);
1141         }
1142
1143         /* Warn if we leaked a resource or under-referenced */
1144         if (atomic_read(&rdma->sc_ctxt_used) != 0)
1145                 pr_err("svcrdma: ctxt still in use? (%d)\n",
1146                        atomic_read(&rdma->sc_ctxt_used));
1147         if (atomic_read(&rdma->sc_dma_used) != 0)
1148                 pr_err("svcrdma: dma still in use? (%d)\n",
1149                        atomic_read(&rdma->sc_dma_used));
1150
1151         /* De-allocate fastreg mr */
1152         rdma_dealloc_frmr_q(rdma);
1153
1154         /* Destroy the QP if present (not a listener) */
1155         if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1156                 ib_destroy_qp(rdma->sc_qp);
1157
1158         if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1159                 ib_destroy_cq(rdma->sc_sq_cq);
1160
1161         if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1162                 ib_destroy_cq(rdma->sc_rq_cq);
1163
1164         if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1165                 ib_dereg_mr(rdma->sc_phys_mr);
1166
1167         if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1168                 ib_dealloc_pd(rdma->sc_pd);
1169
1170         /* Destroy the CM ID */
1171         rdma_destroy_id(rdma->sc_cm_id);
1172
1173         kfree(rdma);
1174 }
1175
1176 static void svc_rdma_free(struct svc_xprt *xprt)
1177 {
1178         struct svcxprt_rdma *rdma =
1179                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1180         INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1181         queue_work(svc_rdma_wq, &rdma->sc_work);
1182 }
1183
1184 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1185 {
1186         struct svcxprt_rdma *rdma =
1187                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1188
1189         /*
1190          * If there are already waiters on the SQ,
1191          * return false.
1192          */
1193         if (waitqueue_active(&rdma->sc_send_wait))
1194                 return 0;
1195
1196         /* Otherwise return true. */
1197         return 1;
1198 }
1199
1200 static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1201 {
1202         return 1;
1203 }
1204
1205 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1206 {
1207         struct ib_send_wr *bad_wr, *n_wr;
1208         int wr_count;
1209         int i;
1210         int ret;
1211
1212         if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1213                 return -ENOTCONN;
1214
1215         wr_count = 1;
1216         for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1217                 wr_count++;
1218
1219         /* If the SQ is full, wait until an SQ entry is available */
1220         while (1) {
1221                 spin_lock_bh(&xprt->sc_lock);
1222                 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1223                         spin_unlock_bh(&xprt->sc_lock);
1224                         atomic_inc(&rdma_stat_sq_starve);
1225
1226                         /* See if we can opportunistically reap SQ WR to make room */
1227                         sq_cq_reap(xprt);
1228
1229                         /* Wait until SQ WR available if SQ still full */
1230                         wait_event(xprt->sc_send_wait,
1231                                    atomic_read(&xprt->sc_sq_count) <
1232                                    xprt->sc_sq_depth);
1233                         if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1234                                 return -ENOTCONN;
1235                         continue;
1236                 }
1237                 /* Take a transport ref for each WR posted */
1238                 for (i = 0; i < wr_count; i++)
1239                         svc_xprt_get(&xprt->sc_xprt);
1240
1241                 /* Bump used SQ WR count and post */
1242                 atomic_add(wr_count, &xprt->sc_sq_count);
1243                 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1244                 if (ret) {
1245                         set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1246                         atomic_sub(wr_count, &xprt->sc_sq_count);
1247                         for (i = 0; i < wr_count; i ++)
1248                                 svc_xprt_put(&xprt->sc_xprt);
1249                         dprintk("svcrdma: failed to post SQ WR rc=%d, "
1250                                "sc_sq_count=%d, sc_sq_depth=%d\n",
1251                                ret, atomic_read(&xprt->sc_sq_count),
1252                                xprt->sc_sq_depth);
1253                 }
1254                 spin_unlock_bh(&xprt->sc_lock);
1255                 if (ret)
1256                         wake_up(&xprt->sc_send_wait);
1257                 break;
1258         }
1259         return ret;
1260 }
1261
1262 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1263                          enum rpcrdma_errcode err)
1264 {
1265         struct ib_send_wr err_wr;
1266         struct page *p;
1267         struct svc_rdma_op_ctxt *ctxt;
1268         __be32 *va;
1269         int length;
1270         int ret;
1271
1272         p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
1273         va = page_address(p);
1274
1275         /* XDR encode error */
1276         length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1277
1278         ctxt = svc_rdma_get_context(xprt);
1279         ctxt->direction = DMA_FROM_DEVICE;
1280         ctxt->count = 1;
1281         ctxt->pages[0] = p;
1282
1283         /* Prepare SGE for local address */
1284         ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1285                                             p, 0, length, DMA_FROM_DEVICE);
1286         if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1287                 put_page(p);
1288                 svc_rdma_put_context(ctxt, 1);
1289                 return;
1290         }
1291         atomic_inc(&xprt->sc_dma_used);
1292         ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1293         ctxt->sge[0].length = length;
1294
1295         /* Prepare SEND WR */
1296         memset(&err_wr, 0, sizeof err_wr);
1297         ctxt->wr_op = IB_WR_SEND;
1298         err_wr.wr_id = (unsigned long)ctxt;
1299         err_wr.sg_list = ctxt->sge;
1300         err_wr.num_sge = 1;
1301         err_wr.opcode = IB_WR_SEND;
1302         err_wr.send_flags = IB_SEND_SIGNALED;
1303
1304         /* Post It */
1305         ret = svc_rdma_send(xprt, &err_wr);
1306         if (ret) {
1307                 dprintk("svcrdma: Error %d posting send for protocol error\n",
1308                         ret);
1309                 svc_rdma_unmap_dma(ctxt);
1310                 svc_rdma_put_context(ctxt, 1);
1311         }
1312 }