Merge tag 'for-5.11-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / net / sunrpc / xprtrdma / verbs.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41
42 /*
43  * verbs.c
44  *
45  * Encapsulates the major functions managing:
46  *  o adapters
47  *  o endpoints
48  *  o connections
49  *  o buffer memory
50  */
51
52 #include <linux/interrupt.h>
53 #include <linux/slab.h>
54 #include <linux/sunrpc/addr.h>
55 #include <linux/sunrpc/svc_rdma.h>
56 #include <linux/log2.h>
57
58 #include <asm-generic/barrier.h>
59 #include <asm/bitops.h>
60
61 #include <rdma/ib_cm.h>
62
63 #include "xprt_rdma.h"
64 #include <trace/events/rpcrdma.h>
65
66 /*
67  * Globals/Macros
68  */
69
70 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71 # define RPCDBG_FACILITY        RPCDBG_TRANS
72 #endif
73
74 /*
75  * internal functions
76  */
77 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
78 static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
79 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
80                                        struct rpcrdma_sendctx *sc);
81 static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
82 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
83 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
84 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
85 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
86 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
87 static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
88 static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
89 static struct rpcrdma_regbuf *
90 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
91                      gfp_t flags);
92 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
93 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
94
95 /* Wait for outstanding transport work to finish. ib_drain_qp
96  * handles the drains in the wrong order for us, so open code
97  * them here.
98  */
99 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
100 {
101         struct rpcrdma_ep *ep = r_xprt->rx_ep;
102         struct rdma_cm_id *id = ep->re_id;
103
104         /* Flush Receives, then wait for deferred Reply work
105          * to complete.
106          */
107         ib_drain_rq(id->qp);
108
109         /* Deferred Reply processing might have scheduled
110          * local invalidations.
111          */
112         ib_drain_sq(id->qp);
113
114         rpcrdma_ep_put(ep);
115 }
116
117 /**
118  * rpcrdma_qp_event_handler - Handle one QP event (error notification)
119  * @event: details of the event
120  * @context: ep that owns QP where event occurred
121  *
122  * Called from the RDMA provider (device driver) possibly in an interrupt
123  * context. The QP is always destroyed before the ID, so the ID will be
124  * reliably available when this handler is invoked.
125  */
126 static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
127 {
128         struct rpcrdma_ep *ep = context;
129
130         trace_xprtrdma_qp_event(ep, event);
131 }
132
133 /* Ensure xprt_force_disconnect() is invoked exactly once when a
134  * connection is closed or lost. (The important thing is it needs
135  * to be invoked "at least" once).
136  */
137 static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
138 {
139         if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
140                 xprt_force_disconnect(ep->re_xprt);
141 }
142
143 /**
144  * rpcrdma_flush_disconnect - Disconnect on flushed completion
145  * @r_xprt: transport to disconnect
146  * @wc: work completion entry
147  *
148  * Must be called in process context.
149  */
150 void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
151 {
152         if (wc->status != IB_WC_SUCCESS)
153                 rpcrdma_force_disconnect(r_xprt->rx_ep);
154 }
155
156 /**
157  * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
158  * @cq: completion queue
159  * @wc: WCE for a completed Send WR
160  *
161  */
162 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
163 {
164         struct ib_cqe *cqe = wc->wr_cqe;
165         struct rpcrdma_sendctx *sc =
166                 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
167         struct rpcrdma_xprt *r_xprt = cq->cq_context;
168
169         /* WARNING: Only wr_cqe and status are reliable at this point */
170         trace_xprtrdma_wc_send(wc, &sc->sc_cid);
171         rpcrdma_sendctx_put_locked(r_xprt, sc);
172         rpcrdma_flush_disconnect(r_xprt, wc);
173 }
174
175 /**
176  * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
177  * @cq: completion queue
178  * @wc: WCE for a completed Receive WR
179  *
180  */
181 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
182 {
183         struct ib_cqe *cqe = wc->wr_cqe;
184         struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
185                                                rr_cqe);
186         struct rpcrdma_xprt *r_xprt = cq->cq_context;
187
188         /* WARNING: Only wr_cqe and status are reliable at this point */
189         trace_xprtrdma_wc_receive(wc, &rep->rr_cid);
190         --r_xprt->rx_ep->re_receive_count;
191         if (wc->status != IB_WC_SUCCESS)
192                 goto out_flushed;
193
194         /* status == SUCCESS means all fields in wc are trustworthy */
195         rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
196         rep->rr_wc_flags = wc->wc_flags;
197         rep->rr_inv_rkey = wc->ex.invalidate_rkey;
198
199         ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
200                                    rdmab_addr(rep->rr_rdmabuf),
201                                    wc->byte_len, DMA_FROM_DEVICE);
202
203         rpcrdma_reply_handler(rep);
204         return;
205
206 out_flushed:
207         rpcrdma_flush_disconnect(r_xprt, wc);
208         rpcrdma_rep_destroy(rep);
209 }
210
211 static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
212                                       struct rdma_conn_param *param)
213 {
214         const struct rpcrdma_connect_private *pmsg = param->private_data;
215         unsigned int rsize, wsize;
216
217         /* Default settings for RPC-over-RDMA Version One */
218         ep->re_implicit_roundup = xprt_rdma_pad_optimize;
219         rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
220         wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
221
222         if (pmsg &&
223             pmsg->cp_magic == rpcrdma_cmp_magic &&
224             pmsg->cp_version == RPCRDMA_CMP_VERSION) {
225                 ep->re_implicit_roundup = true;
226                 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
227                 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
228         }
229
230         if (rsize < ep->re_inline_recv)
231                 ep->re_inline_recv = rsize;
232         if (wsize < ep->re_inline_send)
233                 ep->re_inline_send = wsize;
234
235         rpcrdma_set_max_header_sizes(ep);
236 }
237
238 /**
239  * rpcrdma_cm_event_handler - Handle RDMA CM events
240  * @id: rdma_cm_id on which an event has occurred
241  * @event: details of the event
242  *
243  * Called with @id's mutex held. Returns 1 if caller should
244  * destroy @id, otherwise 0.
245  */
246 static int
247 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
248 {
249         struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
250         struct rpcrdma_ep *ep = id->context;
251
252         might_sleep();
253
254         switch (event->event) {
255         case RDMA_CM_EVENT_ADDR_RESOLVED:
256         case RDMA_CM_EVENT_ROUTE_RESOLVED:
257                 ep->re_async_rc = 0;
258                 complete(&ep->re_done);
259                 return 0;
260         case RDMA_CM_EVENT_ADDR_ERROR:
261                 ep->re_async_rc = -EPROTO;
262                 complete(&ep->re_done);
263                 return 0;
264         case RDMA_CM_EVENT_ROUTE_ERROR:
265                 ep->re_async_rc = -ENETUNREACH;
266                 complete(&ep->re_done);
267                 return 0;
268         case RDMA_CM_EVENT_DEVICE_REMOVAL:
269                 pr_info("rpcrdma: removing device %s for %pISpc\n",
270                         ep->re_id->device->name, sap);
271                 fallthrough;
272         case RDMA_CM_EVENT_ADDR_CHANGE:
273                 ep->re_connect_status = -ENODEV;
274                 goto disconnected;
275         case RDMA_CM_EVENT_ESTABLISHED:
276                 rpcrdma_ep_get(ep);
277                 ep->re_connect_status = 1;
278                 rpcrdma_update_cm_private(ep, &event->param.conn);
279                 trace_xprtrdma_inline_thresh(ep);
280                 wake_up_all(&ep->re_connect_wait);
281                 break;
282         case RDMA_CM_EVENT_CONNECT_ERROR:
283                 ep->re_connect_status = -ENOTCONN;
284                 goto wake_connect_worker;
285         case RDMA_CM_EVENT_UNREACHABLE:
286                 ep->re_connect_status = -ENETUNREACH;
287                 goto wake_connect_worker;
288         case RDMA_CM_EVENT_REJECTED:
289                 dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
290                         sap, rdma_reject_msg(id, event->status));
291                 ep->re_connect_status = -ECONNREFUSED;
292                 if (event->status == IB_CM_REJ_STALE_CONN)
293                         ep->re_connect_status = -ENOTCONN;
294 wake_connect_worker:
295                 wake_up_all(&ep->re_connect_wait);
296                 return 0;
297         case RDMA_CM_EVENT_DISCONNECTED:
298                 ep->re_connect_status = -ECONNABORTED;
299 disconnected:
300                 rpcrdma_force_disconnect(ep);
301                 return rpcrdma_ep_put(ep);
302         default:
303                 break;
304         }
305
306         dprintk("RPC:       %s: %pISpc on %s/frwr: %s\n", __func__, sap,
307                 ep->re_id->device->name, rdma_event_msg(event->event));
308         return 0;
309 }
310
311 static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
312                                             struct rpcrdma_ep *ep)
313 {
314         unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
315         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
316         struct rdma_cm_id *id;
317         int rc;
318
319         init_completion(&ep->re_done);
320
321         id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
322                             RDMA_PS_TCP, IB_QPT_RC);
323         if (IS_ERR(id))
324                 return id;
325
326         ep->re_async_rc = -ETIMEDOUT;
327         rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
328                                RDMA_RESOLVE_TIMEOUT);
329         if (rc)
330                 goto out;
331         rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
332         if (rc < 0)
333                 goto out;
334
335         rc = ep->re_async_rc;
336         if (rc)
337                 goto out;
338
339         ep->re_async_rc = -ETIMEDOUT;
340         rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
341         if (rc)
342                 goto out;
343         rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
344         if (rc < 0)
345                 goto out;
346         rc = ep->re_async_rc;
347         if (rc)
348                 goto out;
349
350         return id;
351
352 out:
353         rdma_destroy_id(id);
354         return ERR_PTR(rc);
355 }
356
357 static void rpcrdma_ep_destroy(struct kref *kref)
358 {
359         struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
360
361         if (ep->re_id->qp) {
362                 rdma_destroy_qp(ep->re_id);
363                 ep->re_id->qp = NULL;
364         }
365
366         if (ep->re_attr.recv_cq)
367                 ib_free_cq(ep->re_attr.recv_cq);
368         ep->re_attr.recv_cq = NULL;
369         if (ep->re_attr.send_cq)
370                 ib_free_cq(ep->re_attr.send_cq);
371         ep->re_attr.send_cq = NULL;
372
373         if (ep->re_pd)
374                 ib_dealloc_pd(ep->re_pd);
375         ep->re_pd = NULL;
376
377         kfree(ep);
378         module_put(THIS_MODULE);
379 }
380
381 static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
382 {
383         kref_get(&ep->re_kref);
384 }
385
386 /* Returns:
387  *     %0 if @ep still has a positive kref count, or
388  *     %1 if @ep was destroyed successfully.
389  */
390 static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
391 {
392         return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
393 }
394
395 static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
396 {
397         struct rpcrdma_connect_private *pmsg;
398         struct ib_device *device;
399         struct rdma_cm_id *id;
400         struct rpcrdma_ep *ep;
401         int rc;
402
403         ep = kzalloc(sizeof(*ep), GFP_NOFS);
404         if (!ep)
405                 return -ENOTCONN;
406         ep->re_xprt = &r_xprt->rx_xprt;
407         kref_init(&ep->re_kref);
408
409         id = rpcrdma_create_id(r_xprt, ep);
410         if (IS_ERR(id)) {
411                 kfree(ep);
412                 return PTR_ERR(id);
413         }
414         __module_get(THIS_MODULE);
415         device = id->device;
416         ep->re_id = id;
417
418         ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
419         ep->re_inline_send = xprt_rdma_max_inline_write;
420         ep->re_inline_recv = xprt_rdma_max_inline_read;
421         rc = frwr_query_device(ep, device);
422         if (rc)
423                 goto out_destroy;
424
425         r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
426
427         ep->re_attr.event_handler = rpcrdma_qp_event_handler;
428         ep->re_attr.qp_context = ep;
429         ep->re_attr.srq = NULL;
430         ep->re_attr.cap.max_inline_data = 0;
431         ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
432         ep->re_attr.qp_type = IB_QPT_RC;
433         ep->re_attr.port_num = ~0;
434
435         dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
436                 "iovs: send %d recv %d\n",
437                 __func__,
438                 ep->re_attr.cap.max_send_wr,
439                 ep->re_attr.cap.max_recv_wr,
440                 ep->re_attr.cap.max_send_sge,
441                 ep->re_attr.cap.max_recv_sge);
442
443         ep->re_send_batch = ep->re_max_requests >> 3;
444         ep->re_send_count = ep->re_send_batch;
445         init_waitqueue_head(&ep->re_connect_wait);
446
447         ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
448                                               ep->re_attr.cap.max_send_wr,
449                                               IB_POLL_WORKQUEUE);
450         if (IS_ERR(ep->re_attr.send_cq)) {
451                 rc = PTR_ERR(ep->re_attr.send_cq);
452                 goto out_destroy;
453         }
454
455         ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
456                                               ep->re_attr.cap.max_recv_wr,
457                                               IB_POLL_WORKQUEUE);
458         if (IS_ERR(ep->re_attr.recv_cq)) {
459                 rc = PTR_ERR(ep->re_attr.recv_cq);
460                 goto out_destroy;
461         }
462         ep->re_receive_count = 0;
463
464         /* Initialize cma parameters */
465         memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
466
467         /* Prepare RDMA-CM private message */
468         pmsg = &ep->re_cm_private;
469         pmsg->cp_magic = rpcrdma_cmp_magic;
470         pmsg->cp_version = RPCRDMA_CMP_VERSION;
471         pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
472         pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
473         pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
474         ep->re_remote_cma.private_data = pmsg;
475         ep->re_remote_cma.private_data_len = sizeof(*pmsg);
476
477         /* Client offers RDMA Read but does not initiate */
478         ep->re_remote_cma.initiator_depth = 0;
479         ep->re_remote_cma.responder_resources =
480                 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
481
482         /* Limit transport retries so client can detect server
483          * GID changes quickly. RPC layer handles re-establishing
484          * transport connection and retransmission.
485          */
486         ep->re_remote_cma.retry_count = 6;
487
488         /* RPC-over-RDMA handles its own flow control. In addition,
489          * make all RNR NAKs visible so we know that RPC-over-RDMA
490          * flow control is working correctly (no NAKs should be seen).
491          */
492         ep->re_remote_cma.flow_control = 0;
493         ep->re_remote_cma.rnr_retry_count = 0;
494
495         ep->re_pd = ib_alloc_pd(device, 0);
496         if (IS_ERR(ep->re_pd)) {
497                 rc = PTR_ERR(ep->re_pd);
498                 goto out_destroy;
499         }
500
501         rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
502         if (rc)
503                 goto out_destroy;
504
505         r_xprt->rx_ep = ep;
506         return 0;
507
508 out_destroy:
509         rpcrdma_ep_put(ep);
510         rdma_destroy_id(id);
511         return rc;
512 }
513
514 /**
515  * rpcrdma_xprt_connect - Connect an unconnected transport
516  * @r_xprt: controlling transport instance
517  *
518  * Returns 0 on success or a negative errno.
519  */
520 int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
521 {
522         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
523         struct rpcrdma_ep *ep;
524         int rc;
525
526         rc = rpcrdma_ep_create(r_xprt);
527         if (rc)
528                 return rc;
529         ep = r_xprt->rx_ep;
530
531         xprt_clear_connected(xprt);
532         rpcrdma_reset_cwnd(r_xprt);
533
534         /* Bump the ep's reference count while there are
535          * outstanding Receives.
536          */
537         rpcrdma_ep_get(ep);
538         rpcrdma_post_recvs(r_xprt, true);
539
540         rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
541         if (rc)
542                 goto out;
543
544         if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
545                 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
546         wait_event_interruptible(ep->re_connect_wait,
547                                  ep->re_connect_status != 0);
548         if (ep->re_connect_status <= 0) {
549                 rc = ep->re_connect_status;
550                 goto out;
551         }
552
553         rc = rpcrdma_sendctxs_create(r_xprt);
554         if (rc) {
555                 rc = -ENOTCONN;
556                 goto out;
557         }
558
559         rc = rpcrdma_reqs_setup(r_xprt);
560         if (rc) {
561                 rc = -ENOTCONN;
562                 goto out;
563         }
564         rpcrdma_mrs_create(r_xprt);
565
566 out:
567         trace_xprtrdma_connect(r_xprt, rc);
568         return rc;
569 }
570
571 /**
572  * rpcrdma_xprt_disconnect - Disconnect underlying transport
573  * @r_xprt: controlling transport instance
574  *
575  * Caller serializes. Either the transport send lock is held,
576  * or we're being called to destroy the transport.
577  *
578  * On return, @r_xprt is completely divested of all hardware
579  * resources and prepared for the next ->connect operation.
580  */
581 void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
582 {
583         struct rpcrdma_ep *ep = r_xprt->rx_ep;
584         struct rdma_cm_id *id;
585         int rc;
586
587         if (!ep)
588                 return;
589
590         id = ep->re_id;
591         rc = rdma_disconnect(id);
592         trace_xprtrdma_disconnect(r_xprt, rc);
593
594         rpcrdma_xprt_drain(r_xprt);
595         rpcrdma_reps_unmap(r_xprt);
596         rpcrdma_reqs_reset(r_xprt);
597         rpcrdma_mrs_destroy(r_xprt);
598         rpcrdma_sendctxs_destroy(r_xprt);
599
600         if (rpcrdma_ep_put(ep))
601                 rdma_destroy_id(id);
602
603         r_xprt->rx_ep = NULL;
604 }
605
606 /* Fixed-size circular FIFO queue. This implementation is wait-free and
607  * lock-free.
608  *
609  * Consumer is the code path that posts Sends. This path dequeues a
610  * sendctx for use by a Send operation. Multiple consumer threads
611  * are serialized by the RPC transport lock, which allows only one
612  * ->send_request call at a time.
613  *
614  * Producer is the code path that handles Send completions. This path
615  * enqueues a sendctx that has been completed. Multiple producer
616  * threads are serialized by the ib_poll_cq() function.
617  */
618
619 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
620  * queue activity, and rpcrdma_xprt_drain has flushed all remaining
621  * Send requests.
622  */
623 static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
624 {
625         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
626         unsigned long i;
627
628         if (!buf->rb_sc_ctxs)
629                 return;
630         for (i = 0; i <= buf->rb_sc_last; i++)
631                 kfree(buf->rb_sc_ctxs[i]);
632         kfree(buf->rb_sc_ctxs);
633         buf->rb_sc_ctxs = NULL;
634 }
635
636 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
637 {
638         struct rpcrdma_sendctx *sc;
639
640         sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
641                      GFP_KERNEL);
642         if (!sc)
643                 return NULL;
644
645         sc->sc_cqe.done = rpcrdma_wc_send;
646         sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id;
647         sc->sc_cid.ci_completion_id =
648                 atomic_inc_return(&ep->re_completion_ids);
649         return sc;
650 }
651
652 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
653 {
654         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
655         struct rpcrdma_sendctx *sc;
656         unsigned long i;
657
658         /* Maximum number of concurrent outstanding Send WRs. Capping
659          * the circular queue size stops Send Queue overflow by causing
660          * the ->send_request call to fail temporarily before too many
661          * Sends are posted.
662          */
663         i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
664         buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
665         if (!buf->rb_sc_ctxs)
666                 return -ENOMEM;
667
668         buf->rb_sc_last = i - 1;
669         for (i = 0; i <= buf->rb_sc_last; i++) {
670                 sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
671                 if (!sc)
672                         return -ENOMEM;
673
674                 buf->rb_sc_ctxs[i] = sc;
675         }
676
677         buf->rb_sc_head = 0;
678         buf->rb_sc_tail = 0;
679         return 0;
680 }
681
682 /* The sendctx queue is not guaranteed to have a size that is a
683  * power of two, thus the helpers in circ_buf.h cannot be used.
684  * The other option is to use modulus (%), which can be expensive.
685  */
686 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
687                                           unsigned long item)
688 {
689         return likely(item < buf->rb_sc_last) ? item + 1 : 0;
690 }
691
692 /**
693  * rpcrdma_sendctx_get_locked - Acquire a send context
694  * @r_xprt: controlling transport instance
695  *
696  * Returns pointer to a free send completion context; or NULL if
697  * the queue is empty.
698  *
699  * Usage: Called to acquire an SGE array before preparing a Send WR.
700  *
701  * The caller serializes calls to this function (per transport), and
702  * provides an effective memory barrier that flushes the new value
703  * of rb_sc_head.
704  */
705 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
706 {
707         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
708         struct rpcrdma_sendctx *sc;
709         unsigned long next_head;
710
711         next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
712
713         if (next_head == READ_ONCE(buf->rb_sc_tail))
714                 goto out_emptyq;
715
716         /* ORDER: item must be accessed _before_ head is updated */
717         sc = buf->rb_sc_ctxs[next_head];
718
719         /* Releasing the lock in the caller acts as a memory
720          * barrier that flushes rb_sc_head.
721          */
722         buf->rb_sc_head = next_head;
723
724         return sc;
725
726 out_emptyq:
727         /* The queue is "empty" if there have not been enough Send
728          * completions recently. This is a sign the Send Queue is
729          * backing up. Cause the caller to pause and try again.
730          */
731         xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
732         r_xprt->rx_stats.empty_sendctx_q++;
733         return NULL;
734 }
735
736 /**
737  * rpcrdma_sendctx_put_locked - Release a send context
738  * @r_xprt: controlling transport instance
739  * @sc: send context to release
740  *
741  * Usage: Called from Send completion to return a sendctxt
742  * to the queue.
743  *
744  * The caller serializes calls to this function (per transport).
745  */
746 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
747                                        struct rpcrdma_sendctx *sc)
748 {
749         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
750         unsigned long next_tail;
751
752         /* Unmap SGEs of previously completed but unsignaled
753          * Sends by walking up the queue until @sc is found.
754          */
755         next_tail = buf->rb_sc_tail;
756         do {
757                 next_tail = rpcrdma_sendctx_next(buf, next_tail);
758
759                 /* ORDER: item must be accessed _before_ tail is updated */
760                 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
761
762         } while (buf->rb_sc_ctxs[next_tail] != sc);
763
764         /* Paired with READ_ONCE */
765         smp_store_release(&buf->rb_sc_tail, next_tail);
766
767         xprt_write_space(&r_xprt->rx_xprt);
768 }
769
770 static void
771 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
772 {
773         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
774         struct rpcrdma_ep *ep = r_xprt->rx_ep;
775         unsigned int count;
776
777         for (count = 0; count < ep->re_max_rdma_segs; count++) {
778                 struct rpcrdma_mr *mr;
779                 int rc;
780
781                 mr = kzalloc(sizeof(*mr), GFP_NOFS);
782                 if (!mr)
783                         break;
784
785                 rc = frwr_mr_init(r_xprt, mr);
786                 if (rc) {
787                         kfree(mr);
788                         break;
789                 }
790
791                 spin_lock(&buf->rb_lock);
792                 rpcrdma_mr_push(mr, &buf->rb_mrs);
793                 list_add(&mr->mr_all, &buf->rb_all_mrs);
794                 spin_unlock(&buf->rb_lock);
795         }
796
797         r_xprt->rx_stats.mrs_allocated += count;
798         trace_xprtrdma_createmrs(r_xprt, count);
799 }
800
801 static void
802 rpcrdma_mr_refresh_worker(struct work_struct *work)
803 {
804         struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
805                                                   rb_refresh_worker);
806         struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
807                                                    rx_buf);
808
809         rpcrdma_mrs_create(r_xprt);
810         xprt_write_space(&r_xprt->rx_xprt);
811 }
812
813 /**
814  * rpcrdma_mrs_refresh - Wake the MR refresh worker
815  * @r_xprt: controlling transport instance
816  *
817  */
818 void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
819 {
820         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
821         struct rpcrdma_ep *ep = r_xprt->rx_ep;
822
823         /* If there is no underlying connection, it's no use
824          * to wake the refresh worker.
825          */
826         if (ep->re_connect_status == 1) {
827                 /* The work is scheduled on a WQ_MEM_RECLAIM
828                  * workqueue in order to prevent MR allocation
829                  * from recursing into NFS during direct reclaim.
830                  */
831                 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
832         }
833 }
834
835 /**
836  * rpcrdma_req_create - Allocate an rpcrdma_req object
837  * @r_xprt: controlling r_xprt
838  * @size: initial size, in bytes, of send and receive buffers
839  * @flags: GFP flags passed to memory allocators
840  *
841  * Returns an allocated and fully initialized rpcrdma_req or NULL.
842  */
843 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
844                                        gfp_t flags)
845 {
846         struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
847         struct rpcrdma_req *req;
848
849         req = kzalloc(sizeof(*req), flags);
850         if (req == NULL)
851                 goto out1;
852
853         req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
854         if (!req->rl_sendbuf)
855                 goto out2;
856
857         req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
858         if (!req->rl_recvbuf)
859                 goto out3;
860
861         INIT_LIST_HEAD(&req->rl_free_mrs);
862         INIT_LIST_HEAD(&req->rl_registered);
863         spin_lock(&buffer->rb_lock);
864         list_add(&req->rl_all, &buffer->rb_allreqs);
865         spin_unlock(&buffer->rb_lock);
866         return req;
867
868 out3:
869         kfree(req->rl_sendbuf);
870 out2:
871         kfree(req);
872 out1:
873         return NULL;
874 }
875
876 /**
877  * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
878  * @r_xprt: controlling transport instance
879  * @req: rpcrdma_req object to set up
880  *
881  * Returns zero on success, and a negative errno on failure.
882  */
883 int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
884 {
885         struct rpcrdma_regbuf *rb;
886         size_t maxhdrsize;
887
888         /* Compute maximum header buffer size in bytes */
889         maxhdrsize = rpcrdma_fixed_maxsz + 3 +
890                      r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
891         maxhdrsize *= sizeof(__be32);
892         rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
893                                   DMA_TO_DEVICE, GFP_KERNEL);
894         if (!rb)
895                 goto out;
896
897         if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
898                 goto out_free;
899
900         req->rl_rdmabuf = rb;
901         xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
902         return 0;
903
904 out_free:
905         rpcrdma_regbuf_free(rb);
906 out:
907         return -ENOMEM;
908 }
909
910 /* ASSUMPTION: the rb_allreqs list is stable for the duration,
911  * and thus can be walked without holding rb_lock. Eg. the
912  * caller is holding the transport send lock to exclude
913  * device removal or disconnection.
914  */
915 static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
916 {
917         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
918         struct rpcrdma_req *req;
919         int rc;
920
921         list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
922                 rc = rpcrdma_req_setup(r_xprt, req);
923                 if (rc)
924                         return rc;
925         }
926         return 0;
927 }
928
929 static void rpcrdma_req_reset(struct rpcrdma_req *req)
930 {
931         /* Credits are valid for only one connection */
932         req->rl_slot.rq_cong = 0;
933
934         rpcrdma_regbuf_free(req->rl_rdmabuf);
935         req->rl_rdmabuf = NULL;
936
937         rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
938         rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
939
940         frwr_reset(req);
941 }
942
943 /* ASSUMPTION: the rb_allreqs list is stable for the duration,
944  * and thus can be walked without holding rb_lock. Eg. the
945  * caller is holding the transport send lock to exclude
946  * device removal or disconnection.
947  */
948 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
949 {
950         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
951         struct rpcrdma_req *req;
952
953         list_for_each_entry(req, &buf->rb_allreqs, rl_all)
954                 rpcrdma_req_reset(req);
955 }
956
957 /* No locking needed here. This function is called only by the
958  * Receive completion handler.
959  */
960 static noinline
961 struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
962                                        bool temp)
963 {
964         struct rpcrdma_rep *rep;
965
966         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
967         if (rep == NULL)
968                 goto out;
969
970         rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
971                                                DMA_FROM_DEVICE, GFP_KERNEL);
972         if (!rep->rr_rdmabuf)
973                 goto out_free;
974
975         if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
976                 goto out_free_regbuf;
977
978         rep->rr_cid.ci_completion_id =
979                 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
980
981         xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
982                      rdmab_length(rep->rr_rdmabuf));
983         rep->rr_cqe.done = rpcrdma_wc_receive;
984         rep->rr_rxprt = r_xprt;
985         rep->rr_recv_wr.next = NULL;
986         rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
987         rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
988         rep->rr_recv_wr.num_sge = 1;
989         rep->rr_temp = temp;
990         list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
991         return rep;
992
993 out_free_regbuf:
994         rpcrdma_regbuf_free(rep->rr_rdmabuf);
995 out_free:
996         kfree(rep);
997 out:
998         return NULL;
999 }
1000
1001 /* No locking needed here. This function is invoked only by the
1002  * Receive completion handler, or during transport shutdown.
1003  */
1004 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
1005 {
1006         list_del(&rep->rr_all);
1007         rpcrdma_regbuf_free(rep->rr_rdmabuf);
1008         kfree(rep);
1009 }
1010
1011 static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1012 {
1013         struct llist_node *node;
1014
1015         /* Calls to llist_del_first are required to be serialized */
1016         node = llist_del_first(&buf->rb_free_reps);
1017         if (!node)
1018                 return NULL;
1019         return llist_entry(node, struct rpcrdma_rep, rr_node);
1020 }
1021
1022 static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
1023                             struct rpcrdma_rep *rep)
1024 {
1025         llist_add(&rep->rr_node, &buf->rb_free_reps);
1026 }
1027
1028 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
1029 {
1030         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1031         struct rpcrdma_rep *rep;
1032
1033         list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
1034                 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
1035                 rep->rr_temp = true;
1036         }
1037 }
1038
1039 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1040 {
1041         struct rpcrdma_rep *rep;
1042
1043         while ((rep = rpcrdma_rep_get_locked(buf)) != NULL)
1044                 rpcrdma_rep_destroy(rep);
1045 }
1046
1047 /**
1048  * rpcrdma_buffer_create - Create initial set of req/rep objects
1049  * @r_xprt: transport instance to (re)initialize
1050  *
1051  * Returns zero on success, otherwise a negative errno.
1052  */
1053 int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1054 {
1055         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1056         int i, rc;
1057
1058         buf->rb_bc_srv_max_requests = 0;
1059         spin_lock_init(&buf->rb_lock);
1060         INIT_LIST_HEAD(&buf->rb_mrs);
1061         INIT_LIST_HEAD(&buf->rb_all_mrs);
1062         INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
1063
1064         INIT_LIST_HEAD(&buf->rb_send_bufs);
1065         INIT_LIST_HEAD(&buf->rb_allreqs);
1066         INIT_LIST_HEAD(&buf->rb_all_reps);
1067
1068         rc = -ENOMEM;
1069         for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
1070                 struct rpcrdma_req *req;
1071
1072                 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
1073                                          GFP_KERNEL);
1074                 if (!req)
1075                         goto out;
1076                 list_add(&req->rl_list, &buf->rb_send_bufs);
1077         }
1078
1079         init_llist_head(&buf->rb_free_reps);
1080
1081         return 0;
1082 out:
1083         rpcrdma_buffer_destroy(buf);
1084         return rc;
1085 }
1086
1087 /**
1088  * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1089  * @req: unused object to be destroyed
1090  *
1091  * Relies on caller holding the transport send lock to protect
1092  * removing req->rl_all from buf->rb_all_reqs safely.
1093  */
1094 void rpcrdma_req_destroy(struct rpcrdma_req *req)
1095 {
1096         struct rpcrdma_mr *mr;
1097
1098         list_del(&req->rl_all);
1099
1100         while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
1101                 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
1102
1103                 spin_lock(&buf->rb_lock);
1104                 list_del(&mr->mr_all);
1105                 spin_unlock(&buf->rb_lock);
1106
1107                 frwr_release_mr(mr);
1108         }
1109
1110         rpcrdma_regbuf_free(req->rl_recvbuf);
1111         rpcrdma_regbuf_free(req->rl_sendbuf);
1112         rpcrdma_regbuf_free(req->rl_rdmabuf);
1113         kfree(req);
1114 }
1115
1116 /**
1117  * rpcrdma_mrs_destroy - Release all of a transport's MRs
1118  * @r_xprt: controlling transport instance
1119  *
1120  * Relies on caller holding the transport send lock to protect
1121  * removing mr->mr_list from req->rl_free_mrs safely.
1122  */
1123 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
1124 {
1125         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1126         struct rpcrdma_mr *mr;
1127
1128         cancel_work_sync(&buf->rb_refresh_worker);
1129
1130         spin_lock(&buf->rb_lock);
1131         while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1132                                               struct rpcrdma_mr,
1133                                               mr_all)) != NULL) {
1134                 list_del(&mr->mr_list);
1135                 list_del(&mr->mr_all);
1136                 spin_unlock(&buf->rb_lock);
1137
1138                 frwr_release_mr(mr);
1139
1140                 spin_lock(&buf->rb_lock);
1141         }
1142         spin_unlock(&buf->rb_lock);
1143 }
1144
1145 /**
1146  * rpcrdma_buffer_destroy - Release all hw resources
1147  * @buf: root control block for resources
1148  *
1149  * ORDERING: relies on a prior rpcrdma_xprt_drain :
1150  * - No more Send or Receive completions can occur
1151  * - All MRs, reps, and reqs are returned to their free lists
1152  */
1153 void
1154 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1155 {
1156         rpcrdma_reps_destroy(buf);
1157
1158         while (!list_empty(&buf->rb_send_bufs)) {
1159                 struct rpcrdma_req *req;
1160
1161                 req = list_first_entry(&buf->rb_send_bufs,
1162                                        struct rpcrdma_req, rl_list);
1163                 list_del(&req->rl_list);
1164                 rpcrdma_req_destroy(req);
1165         }
1166 }
1167
1168 /**
1169  * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1170  * @r_xprt: controlling transport
1171  *
1172  * Returns an initialized rpcrdma_mr or NULL if no free
1173  * rpcrdma_mr objects are available.
1174  */
1175 struct rpcrdma_mr *
1176 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1177 {
1178         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1179         struct rpcrdma_mr *mr;
1180
1181         spin_lock(&buf->rb_lock);
1182         mr = rpcrdma_mr_pop(&buf->rb_mrs);
1183         spin_unlock(&buf->rb_lock);
1184         return mr;
1185 }
1186
1187 /**
1188  * rpcrdma_buffer_get - Get a request buffer
1189  * @buffers: Buffer pool from which to obtain a buffer
1190  *
1191  * Returns a fresh rpcrdma_req, or NULL if none are available.
1192  */
1193 struct rpcrdma_req *
1194 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1195 {
1196         struct rpcrdma_req *req;
1197
1198         spin_lock(&buffers->rb_lock);
1199         req = list_first_entry_or_null(&buffers->rb_send_bufs,
1200                                        struct rpcrdma_req, rl_list);
1201         if (req)
1202                 list_del_init(&req->rl_list);
1203         spin_unlock(&buffers->rb_lock);
1204         return req;
1205 }
1206
1207 /**
1208  * rpcrdma_buffer_put - Put request/reply buffers back into pool
1209  * @buffers: buffer pool
1210  * @req: object to return
1211  *
1212  */
1213 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1214 {
1215         if (req->rl_reply)
1216                 rpcrdma_rep_put(buffers, req->rl_reply);
1217         req->rl_reply = NULL;
1218
1219         spin_lock(&buffers->rb_lock);
1220         list_add(&req->rl_list, &buffers->rb_send_bufs);
1221         spin_unlock(&buffers->rb_lock);
1222 }
1223
1224 /**
1225  * rpcrdma_recv_buffer_put - Release rpcrdma_rep back to free list
1226  * @rep: rep to release
1227  *
1228  * Used after error conditions.
1229  */
1230 void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1231 {
1232         rpcrdma_rep_put(&rep->rr_rxprt->rx_buf, rep);
1233 }
1234
1235 /* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1236  *
1237  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1238  * receiving the payload of RDMA RECV operations. During Long Calls
1239  * or Replies they may be registered externally via frwr_map.
1240  */
1241 static struct rpcrdma_regbuf *
1242 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1243                      gfp_t flags)
1244 {
1245         struct rpcrdma_regbuf *rb;
1246
1247         rb = kmalloc(sizeof(*rb), flags);
1248         if (!rb)
1249                 return NULL;
1250         rb->rg_data = kmalloc(size, flags);
1251         if (!rb->rg_data) {
1252                 kfree(rb);
1253                 return NULL;
1254         }
1255
1256         rb->rg_device = NULL;
1257         rb->rg_direction = direction;
1258         rb->rg_iov.length = size;
1259         return rb;
1260 }
1261
1262 /**
1263  * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1264  * @rb: regbuf to reallocate
1265  * @size: size of buffer to be allocated, in bytes
1266  * @flags: GFP flags
1267  *
1268  * Returns true if reallocation was successful. If false is
1269  * returned, @rb is left untouched.
1270  */
1271 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1272 {
1273         void *buf;
1274
1275         buf = kmalloc(size, flags);
1276         if (!buf)
1277                 return false;
1278
1279         rpcrdma_regbuf_dma_unmap(rb);
1280         kfree(rb->rg_data);
1281
1282         rb->rg_data = buf;
1283         rb->rg_iov.length = size;
1284         return true;
1285 }
1286
1287 /**
1288  * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1289  * @r_xprt: controlling transport instance
1290  * @rb: regbuf to be mapped
1291  *
1292  * Returns true if the buffer is now DMA mapped to @r_xprt's device
1293  */
1294 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1295                               struct rpcrdma_regbuf *rb)
1296 {
1297         struct ib_device *device = r_xprt->rx_ep->re_id->device;
1298
1299         if (rb->rg_direction == DMA_NONE)
1300                 return false;
1301
1302         rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1303                                             rdmab_length(rb), rb->rg_direction);
1304         if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1305                 trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1306                 return false;
1307         }
1308
1309         rb->rg_device = device;
1310         rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
1311         return true;
1312 }
1313
1314 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1315 {
1316         if (!rb)
1317                 return;
1318
1319         if (!rpcrdma_regbuf_is_mapped(rb))
1320                 return;
1321
1322         ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1323                             rb->rg_direction);
1324         rb->rg_device = NULL;
1325 }
1326
1327 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1328 {
1329         rpcrdma_regbuf_dma_unmap(rb);
1330         if (rb)
1331                 kfree(rb->rg_data);
1332         kfree(rb);
1333 }
1334
1335 /**
1336  * rpcrdma_post_sends - Post WRs to a transport's Send Queue
1337  * @r_xprt: controlling transport instance
1338  * @req: rpcrdma_req containing the Send WR to post
1339  *
1340  * Returns 0 if the post was successful, otherwise -ENOTCONN
1341  * is returned.
1342  */
1343 int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1344 {
1345         struct ib_send_wr *send_wr = &req->rl_wr;
1346         struct rpcrdma_ep *ep = r_xprt->rx_ep;
1347         int rc;
1348
1349         if (!ep->re_send_count || kref_read(&req->rl_kref) > 1) {
1350                 send_wr->send_flags |= IB_SEND_SIGNALED;
1351                 ep->re_send_count = ep->re_send_batch;
1352         } else {
1353                 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1354                 --ep->re_send_count;
1355         }
1356
1357         trace_xprtrdma_post_send(req);
1358         rc = frwr_send(r_xprt, req);
1359         if (rc)
1360                 return -ENOTCONN;
1361         return 0;
1362 }
1363
1364 /**
1365  * rpcrdma_post_recvs - Refill the Receive Queue
1366  * @r_xprt: controlling transport instance
1367  * @temp: mark Receive buffers to be deleted after use
1368  *
1369  */
1370 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
1371 {
1372         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1373         struct rpcrdma_ep *ep = r_xprt->rx_ep;
1374         struct ib_recv_wr *wr, *bad_wr;
1375         struct rpcrdma_rep *rep;
1376         int needed, count, rc;
1377
1378         rc = 0;
1379         count = 0;
1380
1381         needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1382         if (likely(ep->re_receive_count > needed))
1383                 goto out;
1384         needed -= ep->re_receive_count;
1385         if (!temp)
1386                 needed += RPCRDMA_MAX_RECV_BATCH;
1387
1388         /* fast path: all needed reps can be found on the free list */
1389         wr = NULL;
1390         while (needed) {
1391                 rep = rpcrdma_rep_get_locked(buf);
1392                 if (rep && rep->rr_temp) {
1393                         rpcrdma_rep_destroy(rep);
1394                         continue;
1395                 }
1396                 if (!rep)
1397                         rep = rpcrdma_rep_create(r_xprt, temp);
1398                 if (!rep)
1399                         break;
1400
1401                 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
1402                 trace_xprtrdma_post_recv(rep);
1403                 rep->rr_recv_wr.next = wr;
1404                 wr = &rep->rr_recv_wr;
1405                 --needed;
1406                 ++count;
1407         }
1408         if (!wr)
1409                 goto out;
1410
1411         rc = ib_post_recv(ep->re_id->qp, wr,
1412                           (const struct ib_recv_wr **)&bad_wr);
1413 out:
1414         trace_xprtrdma_post_recvs(r_xprt, count, rc);
1415         if (rc) {
1416                 for (wr = bad_wr; wr;) {
1417                         struct rpcrdma_rep *rep;
1418
1419                         rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1420                         wr = wr->next;
1421                         rpcrdma_recv_buffer_put(rep);
1422                         --count;
1423                 }
1424         }
1425         ep->re_receive_count += count;
1426         return;
1427 }