clk: Drop the rate range on clk_put()
[linux-2.6-microblaze.git] / net / sunrpc / xprtrdma / verbs.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
4  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  */
41
42 /*
43  * verbs.c
44  *
45  * Encapsulates the major functions managing:
46  *  o adapters
47  *  o endpoints
48  *  o connections
49  *  o buffer memory
50  */
51
52 #include <linux/interrupt.h>
53 #include <linux/slab.h>
54 #include <linux/sunrpc/addr.h>
55 #include <linux/sunrpc/svc_rdma.h>
56 #include <linux/log2.h>
57
58 #include <asm-generic/barrier.h>
59 #include <asm/bitops.h>
60
61 #include <rdma/ib_cm.h>
62
63 #include "xprt_rdma.h"
64 #include <trace/events/rpcrdma.h>
65
66 /*
67  * Globals/Macros
68  */
69
70 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
71 # define RPCDBG_FACILITY        RPCDBG_TRANS
72 #endif
73
74 /*
75  * internal functions
76  */
77 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt);
78 static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt);
79 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
80                                        struct rpcrdma_sendctx *sc);
81 static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt);
82 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
83 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
84 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
85 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
86 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
87 static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
88 static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
89 static struct rpcrdma_regbuf *
90 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
91                      gfp_t flags);
92 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
93 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
94
95 /* Wait for outstanding transport work to finish. ib_drain_qp
96  * handles the drains in the wrong order for us, so open code
97  * them here.
98  */
99 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
100 {
101         struct rpcrdma_ep *ep = r_xprt->rx_ep;
102         struct rdma_cm_id *id = ep->re_id;
103
104         /* Wait for rpcrdma_post_recvs() to leave its critical
105          * section.
106          */
107         if (atomic_inc_return(&ep->re_receiving) > 1)
108                 wait_for_completion(&ep->re_done);
109
110         /* Flush Receives, then wait for deferred Reply work
111          * to complete.
112          */
113         ib_drain_rq(id->qp);
114
115         /* Deferred Reply processing might have scheduled
116          * local invalidations.
117          */
118         ib_drain_sq(id->qp);
119
120         rpcrdma_ep_put(ep);
121 }
122
123 /* Ensure xprt_force_disconnect() is invoked exactly once when a
124  * connection is closed or lost. (The important thing is it needs
125  * to be invoked "at least" once).
126  */
127 void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
128 {
129         if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
130                 xprt_force_disconnect(ep->re_xprt);
131 }
132
133 /**
134  * rpcrdma_flush_disconnect - Disconnect on flushed completion
135  * @r_xprt: transport to disconnect
136  * @wc: work completion entry
137  *
138  * Must be called in process context.
139  */
140 void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
141 {
142         if (wc->status != IB_WC_SUCCESS)
143                 rpcrdma_force_disconnect(r_xprt->rx_ep);
144 }
145
146 /**
147  * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
148  * @cq: completion queue
149  * @wc: WCE for a completed Send WR
150  *
151  */
152 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
153 {
154         struct ib_cqe *cqe = wc->wr_cqe;
155         struct rpcrdma_sendctx *sc =
156                 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
157         struct rpcrdma_xprt *r_xprt = cq->cq_context;
158
159         /* WARNING: Only wr_cqe and status are reliable at this point */
160         trace_xprtrdma_wc_send(wc, &sc->sc_cid);
161         rpcrdma_sendctx_put_locked(r_xprt, sc);
162         rpcrdma_flush_disconnect(r_xprt, wc);
163 }
164
165 /**
166  * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
167  * @cq: completion queue
168  * @wc: WCE for a completed Receive WR
169  *
170  */
171 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
172 {
173         struct ib_cqe *cqe = wc->wr_cqe;
174         struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
175                                                rr_cqe);
176         struct rpcrdma_xprt *r_xprt = cq->cq_context;
177
178         /* WARNING: Only wr_cqe and status are reliable at this point */
179         trace_xprtrdma_wc_receive(wc, &rep->rr_cid);
180         --r_xprt->rx_ep->re_receive_count;
181         if (wc->status != IB_WC_SUCCESS)
182                 goto out_flushed;
183
184         /* status == SUCCESS means all fields in wc are trustworthy */
185         rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
186         rep->rr_wc_flags = wc->wc_flags;
187         rep->rr_inv_rkey = wc->ex.invalidate_rkey;
188
189         ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
190                                    rdmab_addr(rep->rr_rdmabuf),
191                                    wc->byte_len, DMA_FROM_DEVICE);
192
193         rpcrdma_reply_handler(rep);
194         return;
195
196 out_flushed:
197         rpcrdma_flush_disconnect(r_xprt, wc);
198         rpcrdma_rep_put(&r_xprt->rx_buf, rep);
199 }
200
201 static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
202                                       struct rdma_conn_param *param)
203 {
204         const struct rpcrdma_connect_private *pmsg = param->private_data;
205         unsigned int rsize, wsize;
206
207         /* Default settings for RPC-over-RDMA Version One */
208         rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
209         wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
210
211         if (pmsg &&
212             pmsg->cp_magic == rpcrdma_cmp_magic &&
213             pmsg->cp_version == RPCRDMA_CMP_VERSION) {
214                 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
215                 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
216         }
217
218         if (rsize < ep->re_inline_recv)
219                 ep->re_inline_recv = rsize;
220         if (wsize < ep->re_inline_send)
221                 ep->re_inline_send = wsize;
222
223         rpcrdma_set_max_header_sizes(ep);
224 }
225
226 /**
227  * rpcrdma_cm_event_handler - Handle RDMA CM events
228  * @id: rdma_cm_id on which an event has occurred
229  * @event: details of the event
230  *
231  * Called with @id's mutex held. Returns 1 if caller should
232  * destroy @id, otherwise 0.
233  */
234 static int
235 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
236 {
237         struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
238         struct rpcrdma_ep *ep = id->context;
239
240         might_sleep();
241
242         switch (event->event) {
243         case RDMA_CM_EVENT_ADDR_RESOLVED:
244         case RDMA_CM_EVENT_ROUTE_RESOLVED:
245                 ep->re_async_rc = 0;
246                 complete(&ep->re_done);
247                 return 0;
248         case RDMA_CM_EVENT_ADDR_ERROR:
249                 ep->re_async_rc = -EPROTO;
250                 complete(&ep->re_done);
251                 return 0;
252         case RDMA_CM_EVENT_ROUTE_ERROR:
253                 ep->re_async_rc = -ENETUNREACH;
254                 complete(&ep->re_done);
255                 return 0;
256         case RDMA_CM_EVENT_DEVICE_REMOVAL:
257                 pr_info("rpcrdma: removing device %s for %pISpc\n",
258                         ep->re_id->device->name, sap);
259                 fallthrough;
260         case RDMA_CM_EVENT_ADDR_CHANGE:
261                 ep->re_connect_status = -ENODEV;
262                 goto disconnected;
263         case RDMA_CM_EVENT_ESTABLISHED:
264                 rpcrdma_ep_get(ep);
265                 ep->re_connect_status = 1;
266                 rpcrdma_update_cm_private(ep, &event->param.conn);
267                 trace_xprtrdma_inline_thresh(ep);
268                 wake_up_all(&ep->re_connect_wait);
269                 break;
270         case RDMA_CM_EVENT_CONNECT_ERROR:
271                 ep->re_connect_status = -ENOTCONN;
272                 goto wake_connect_worker;
273         case RDMA_CM_EVENT_UNREACHABLE:
274                 ep->re_connect_status = -ENETUNREACH;
275                 goto wake_connect_worker;
276         case RDMA_CM_EVENT_REJECTED:
277                 dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
278                         sap, rdma_reject_msg(id, event->status));
279                 ep->re_connect_status = -ECONNREFUSED;
280                 if (event->status == IB_CM_REJ_STALE_CONN)
281                         ep->re_connect_status = -ENOTCONN;
282 wake_connect_worker:
283                 wake_up_all(&ep->re_connect_wait);
284                 return 0;
285         case RDMA_CM_EVENT_DISCONNECTED:
286                 ep->re_connect_status = -ECONNABORTED;
287 disconnected:
288                 rpcrdma_force_disconnect(ep);
289                 return rpcrdma_ep_put(ep);
290         default:
291                 break;
292         }
293
294         dprintk("RPC:       %s: %pISpc on %s/frwr: %s\n", __func__, sap,
295                 ep->re_id->device->name, rdma_event_msg(event->event));
296         return 0;
297 }
298
299 static struct rdma_cm_id *rpcrdma_create_id(struct rpcrdma_xprt *r_xprt,
300                                             struct rpcrdma_ep *ep)
301 {
302         unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
303         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
304         struct rdma_cm_id *id;
305         int rc;
306
307         init_completion(&ep->re_done);
308
309         id = rdma_create_id(xprt->xprt_net, rpcrdma_cm_event_handler, ep,
310                             RDMA_PS_TCP, IB_QPT_RC);
311         if (IS_ERR(id))
312                 return id;
313
314         ep->re_async_rc = -ETIMEDOUT;
315         rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)&xprt->addr,
316                                RDMA_RESOLVE_TIMEOUT);
317         if (rc)
318                 goto out;
319         rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
320         if (rc < 0)
321                 goto out;
322
323         rc = ep->re_async_rc;
324         if (rc)
325                 goto out;
326
327         ep->re_async_rc = -ETIMEDOUT;
328         rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
329         if (rc)
330                 goto out;
331         rc = wait_for_completion_interruptible_timeout(&ep->re_done, wtimeout);
332         if (rc < 0)
333                 goto out;
334         rc = ep->re_async_rc;
335         if (rc)
336                 goto out;
337
338         return id;
339
340 out:
341         rdma_destroy_id(id);
342         return ERR_PTR(rc);
343 }
344
345 static void rpcrdma_ep_destroy(struct kref *kref)
346 {
347         struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
348
349         if (ep->re_id->qp) {
350                 rdma_destroy_qp(ep->re_id);
351                 ep->re_id->qp = NULL;
352         }
353
354         if (ep->re_attr.recv_cq)
355                 ib_free_cq(ep->re_attr.recv_cq);
356         ep->re_attr.recv_cq = NULL;
357         if (ep->re_attr.send_cq)
358                 ib_free_cq(ep->re_attr.send_cq);
359         ep->re_attr.send_cq = NULL;
360
361         if (ep->re_pd)
362                 ib_dealloc_pd(ep->re_pd);
363         ep->re_pd = NULL;
364
365         kfree(ep);
366         module_put(THIS_MODULE);
367 }
368
369 static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
370 {
371         kref_get(&ep->re_kref);
372 }
373
374 /* Returns:
375  *     %0 if @ep still has a positive kref count, or
376  *     %1 if @ep was destroyed successfully.
377  */
378 static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
379 {
380         return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
381 }
382
383 static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
384 {
385         struct rpcrdma_connect_private *pmsg;
386         struct ib_device *device;
387         struct rdma_cm_id *id;
388         struct rpcrdma_ep *ep;
389         int rc;
390
391         ep = kzalloc(sizeof(*ep), GFP_NOFS);
392         if (!ep)
393                 return -ENOTCONN;
394         ep->re_xprt = &r_xprt->rx_xprt;
395         kref_init(&ep->re_kref);
396
397         id = rpcrdma_create_id(r_xprt, ep);
398         if (IS_ERR(id)) {
399                 kfree(ep);
400                 return PTR_ERR(id);
401         }
402         __module_get(THIS_MODULE);
403         device = id->device;
404         ep->re_id = id;
405         reinit_completion(&ep->re_done);
406
407         ep->re_max_requests = r_xprt->rx_xprt.max_reqs;
408         ep->re_inline_send = xprt_rdma_max_inline_write;
409         ep->re_inline_recv = xprt_rdma_max_inline_read;
410         rc = frwr_query_device(ep, device);
411         if (rc)
412                 goto out_destroy;
413
414         r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->re_max_requests);
415
416         ep->re_attr.srq = NULL;
417         ep->re_attr.cap.max_inline_data = 0;
418         ep->re_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
419         ep->re_attr.qp_type = IB_QPT_RC;
420         ep->re_attr.port_num = ~0;
421
422         dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
423                 "iovs: send %d recv %d\n",
424                 __func__,
425                 ep->re_attr.cap.max_send_wr,
426                 ep->re_attr.cap.max_recv_wr,
427                 ep->re_attr.cap.max_send_sge,
428                 ep->re_attr.cap.max_recv_sge);
429
430         ep->re_send_batch = ep->re_max_requests >> 3;
431         ep->re_send_count = ep->re_send_batch;
432         init_waitqueue_head(&ep->re_connect_wait);
433
434         ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt,
435                                               ep->re_attr.cap.max_send_wr,
436                                               IB_POLL_WORKQUEUE);
437         if (IS_ERR(ep->re_attr.send_cq)) {
438                 rc = PTR_ERR(ep->re_attr.send_cq);
439                 goto out_destroy;
440         }
441
442         ep->re_attr.recv_cq = ib_alloc_cq_any(device, r_xprt,
443                                               ep->re_attr.cap.max_recv_wr,
444                                               IB_POLL_WORKQUEUE);
445         if (IS_ERR(ep->re_attr.recv_cq)) {
446                 rc = PTR_ERR(ep->re_attr.recv_cq);
447                 goto out_destroy;
448         }
449         ep->re_receive_count = 0;
450
451         /* Initialize cma parameters */
452         memset(&ep->re_remote_cma, 0, sizeof(ep->re_remote_cma));
453
454         /* Prepare RDMA-CM private message */
455         pmsg = &ep->re_cm_private;
456         pmsg->cp_magic = rpcrdma_cmp_magic;
457         pmsg->cp_version = RPCRDMA_CMP_VERSION;
458         pmsg->cp_flags |= RPCRDMA_CMP_F_SND_W_INV_OK;
459         pmsg->cp_send_size = rpcrdma_encode_buffer_size(ep->re_inline_send);
460         pmsg->cp_recv_size = rpcrdma_encode_buffer_size(ep->re_inline_recv);
461         ep->re_remote_cma.private_data = pmsg;
462         ep->re_remote_cma.private_data_len = sizeof(*pmsg);
463
464         /* Client offers RDMA Read but does not initiate */
465         ep->re_remote_cma.initiator_depth = 0;
466         ep->re_remote_cma.responder_resources =
467                 min_t(int, U8_MAX, device->attrs.max_qp_rd_atom);
468
469         /* Limit transport retries so client can detect server
470          * GID changes quickly. RPC layer handles re-establishing
471          * transport connection and retransmission.
472          */
473         ep->re_remote_cma.retry_count = 6;
474
475         /* RPC-over-RDMA handles its own flow control. In addition,
476          * make all RNR NAKs visible so we know that RPC-over-RDMA
477          * flow control is working correctly (no NAKs should be seen).
478          */
479         ep->re_remote_cma.flow_control = 0;
480         ep->re_remote_cma.rnr_retry_count = 0;
481
482         ep->re_pd = ib_alloc_pd(device, 0);
483         if (IS_ERR(ep->re_pd)) {
484                 rc = PTR_ERR(ep->re_pd);
485                 goto out_destroy;
486         }
487
488         rc = rdma_create_qp(id, ep->re_pd, &ep->re_attr);
489         if (rc)
490                 goto out_destroy;
491
492         r_xprt->rx_ep = ep;
493         return 0;
494
495 out_destroy:
496         rpcrdma_ep_put(ep);
497         rdma_destroy_id(id);
498         return rc;
499 }
500
501 /**
502  * rpcrdma_xprt_connect - Connect an unconnected transport
503  * @r_xprt: controlling transport instance
504  *
505  * Returns 0 on success or a negative errno.
506  */
507 int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
508 {
509         struct rpc_xprt *xprt = &r_xprt->rx_xprt;
510         struct rpcrdma_ep *ep;
511         int rc;
512
513         rc = rpcrdma_ep_create(r_xprt);
514         if (rc)
515                 return rc;
516         ep = r_xprt->rx_ep;
517
518         xprt_clear_connected(xprt);
519         rpcrdma_reset_cwnd(r_xprt);
520
521         /* Bump the ep's reference count while there are
522          * outstanding Receives.
523          */
524         rpcrdma_ep_get(ep);
525         rpcrdma_post_recvs(r_xprt, 1, true);
526
527         rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
528         if (rc)
529                 goto out;
530
531         if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
532                 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
533         wait_event_interruptible(ep->re_connect_wait,
534                                  ep->re_connect_status != 0);
535         if (ep->re_connect_status <= 0) {
536                 rc = ep->re_connect_status;
537                 goto out;
538         }
539
540         rc = rpcrdma_sendctxs_create(r_xprt);
541         if (rc) {
542                 rc = -ENOTCONN;
543                 goto out;
544         }
545
546         rc = rpcrdma_reqs_setup(r_xprt);
547         if (rc) {
548                 rc = -ENOTCONN;
549                 goto out;
550         }
551         rpcrdma_mrs_create(r_xprt);
552         frwr_wp_create(r_xprt);
553
554 out:
555         trace_xprtrdma_connect(r_xprt, rc);
556         return rc;
557 }
558
559 /**
560  * rpcrdma_xprt_disconnect - Disconnect underlying transport
561  * @r_xprt: controlling transport instance
562  *
563  * Caller serializes. Either the transport send lock is held,
564  * or we're being called to destroy the transport.
565  *
566  * On return, @r_xprt is completely divested of all hardware
567  * resources and prepared for the next ->connect operation.
568  */
569 void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
570 {
571         struct rpcrdma_ep *ep = r_xprt->rx_ep;
572         struct rdma_cm_id *id;
573         int rc;
574
575         if (!ep)
576                 return;
577
578         id = ep->re_id;
579         rc = rdma_disconnect(id);
580         trace_xprtrdma_disconnect(r_xprt, rc);
581
582         rpcrdma_xprt_drain(r_xprt);
583         rpcrdma_reps_unmap(r_xprt);
584         rpcrdma_reqs_reset(r_xprt);
585         rpcrdma_mrs_destroy(r_xprt);
586         rpcrdma_sendctxs_destroy(r_xprt);
587
588         if (rpcrdma_ep_put(ep))
589                 rdma_destroy_id(id);
590
591         r_xprt->rx_ep = NULL;
592 }
593
594 /* Fixed-size circular FIFO queue. This implementation is wait-free and
595  * lock-free.
596  *
597  * Consumer is the code path that posts Sends. This path dequeues a
598  * sendctx for use by a Send operation. Multiple consumer threads
599  * are serialized by the RPC transport lock, which allows only one
600  * ->send_request call at a time.
601  *
602  * Producer is the code path that handles Send completions. This path
603  * enqueues a sendctx that has been completed. Multiple producer
604  * threads are serialized by the ib_poll_cq() function.
605  */
606
607 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
608  * queue activity, and rpcrdma_xprt_drain has flushed all remaining
609  * Send requests.
610  */
611 static void rpcrdma_sendctxs_destroy(struct rpcrdma_xprt *r_xprt)
612 {
613         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
614         unsigned long i;
615
616         if (!buf->rb_sc_ctxs)
617                 return;
618         for (i = 0; i <= buf->rb_sc_last; i++)
619                 kfree(buf->rb_sc_ctxs[i]);
620         kfree(buf->rb_sc_ctxs);
621         buf->rb_sc_ctxs = NULL;
622 }
623
624 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
625 {
626         struct rpcrdma_sendctx *sc;
627
628         sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
629                      GFP_KERNEL);
630         if (!sc)
631                 return NULL;
632
633         sc->sc_cqe.done = rpcrdma_wc_send;
634         sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id;
635         sc->sc_cid.ci_completion_id =
636                 atomic_inc_return(&ep->re_completion_ids);
637         return sc;
638 }
639
640 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
641 {
642         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
643         struct rpcrdma_sendctx *sc;
644         unsigned long i;
645
646         /* Maximum number of concurrent outstanding Send WRs. Capping
647          * the circular queue size stops Send Queue overflow by causing
648          * the ->send_request call to fail temporarily before too many
649          * Sends are posted.
650          */
651         i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
652         buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
653         if (!buf->rb_sc_ctxs)
654                 return -ENOMEM;
655
656         buf->rb_sc_last = i - 1;
657         for (i = 0; i <= buf->rb_sc_last; i++) {
658                 sc = rpcrdma_sendctx_create(r_xprt->rx_ep);
659                 if (!sc)
660                         return -ENOMEM;
661
662                 buf->rb_sc_ctxs[i] = sc;
663         }
664
665         buf->rb_sc_head = 0;
666         buf->rb_sc_tail = 0;
667         return 0;
668 }
669
670 /* The sendctx queue is not guaranteed to have a size that is a
671  * power of two, thus the helpers in circ_buf.h cannot be used.
672  * The other option is to use modulus (%), which can be expensive.
673  */
674 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
675                                           unsigned long item)
676 {
677         return likely(item < buf->rb_sc_last) ? item + 1 : 0;
678 }
679
680 /**
681  * rpcrdma_sendctx_get_locked - Acquire a send context
682  * @r_xprt: controlling transport instance
683  *
684  * Returns pointer to a free send completion context; or NULL if
685  * the queue is empty.
686  *
687  * Usage: Called to acquire an SGE array before preparing a Send WR.
688  *
689  * The caller serializes calls to this function (per transport), and
690  * provides an effective memory barrier that flushes the new value
691  * of rb_sc_head.
692  */
693 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt)
694 {
695         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
696         struct rpcrdma_sendctx *sc;
697         unsigned long next_head;
698
699         next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
700
701         if (next_head == READ_ONCE(buf->rb_sc_tail))
702                 goto out_emptyq;
703
704         /* ORDER: item must be accessed _before_ head is updated */
705         sc = buf->rb_sc_ctxs[next_head];
706
707         /* Releasing the lock in the caller acts as a memory
708          * barrier that flushes rb_sc_head.
709          */
710         buf->rb_sc_head = next_head;
711
712         return sc;
713
714 out_emptyq:
715         /* The queue is "empty" if there have not been enough Send
716          * completions recently. This is a sign the Send Queue is
717          * backing up. Cause the caller to pause and try again.
718          */
719         xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
720         r_xprt->rx_stats.empty_sendctx_q++;
721         return NULL;
722 }
723
724 /**
725  * rpcrdma_sendctx_put_locked - Release a send context
726  * @r_xprt: controlling transport instance
727  * @sc: send context to release
728  *
729  * Usage: Called from Send completion to return a sendctxt
730  * to the queue.
731  *
732  * The caller serializes calls to this function (per transport).
733  */
734 static void rpcrdma_sendctx_put_locked(struct rpcrdma_xprt *r_xprt,
735                                        struct rpcrdma_sendctx *sc)
736 {
737         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
738         unsigned long next_tail;
739
740         /* Unmap SGEs of previously completed but unsignaled
741          * Sends by walking up the queue until @sc is found.
742          */
743         next_tail = buf->rb_sc_tail;
744         do {
745                 next_tail = rpcrdma_sendctx_next(buf, next_tail);
746
747                 /* ORDER: item must be accessed _before_ tail is updated */
748                 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]);
749
750         } while (buf->rb_sc_ctxs[next_tail] != sc);
751
752         /* Paired with READ_ONCE */
753         smp_store_release(&buf->rb_sc_tail, next_tail);
754
755         xprt_write_space(&r_xprt->rx_xprt);
756 }
757
758 static void
759 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
760 {
761         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
762         struct rpcrdma_ep *ep = r_xprt->rx_ep;
763         unsigned int count;
764
765         for (count = 0; count < ep->re_max_rdma_segs; count++) {
766                 struct rpcrdma_mr *mr;
767                 int rc;
768
769                 mr = kzalloc(sizeof(*mr), GFP_NOFS);
770                 if (!mr)
771                         break;
772
773                 rc = frwr_mr_init(r_xprt, mr);
774                 if (rc) {
775                         kfree(mr);
776                         break;
777                 }
778
779                 spin_lock(&buf->rb_lock);
780                 rpcrdma_mr_push(mr, &buf->rb_mrs);
781                 list_add(&mr->mr_all, &buf->rb_all_mrs);
782                 spin_unlock(&buf->rb_lock);
783         }
784
785         r_xprt->rx_stats.mrs_allocated += count;
786         trace_xprtrdma_createmrs(r_xprt, count);
787 }
788
789 static void
790 rpcrdma_mr_refresh_worker(struct work_struct *work)
791 {
792         struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
793                                                   rb_refresh_worker);
794         struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
795                                                    rx_buf);
796
797         rpcrdma_mrs_create(r_xprt);
798         xprt_write_space(&r_xprt->rx_xprt);
799 }
800
801 /**
802  * rpcrdma_mrs_refresh - Wake the MR refresh worker
803  * @r_xprt: controlling transport instance
804  *
805  */
806 void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
807 {
808         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
809         struct rpcrdma_ep *ep = r_xprt->rx_ep;
810
811         /* If there is no underlying connection, it's no use
812          * to wake the refresh worker.
813          */
814         if (ep->re_connect_status == 1) {
815                 /* The work is scheduled on a WQ_MEM_RECLAIM
816                  * workqueue in order to prevent MR allocation
817                  * from recursing into NFS during direct reclaim.
818                  */
819                 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
820         }
821 }
822
823 /**
824  * rpcrdma_req_create - Allocate an rpcrdma_req object
825  * @r_xprt: controlling r_xprt
826  * @size: initial size, in bytes, of send and receive buffers
827  * @flags: GFP flags passed to memory allocators
828  *
829  * Returns an allocated and fully initialized rpcrdma_req or NULL.
830  */
831 struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
832                                        gfp_t flags)
833 {
834         struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
835         struct rpcrdma_req *req;
836
837         req = kzalloc(sizeof(*req), flags);
838         if (req == NULL)
839                 goto out1;
840
841         req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
842         if (!req->rl_sendbuf)
843                 goto out2;
844
845         req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
846         if (!req->rl_recvbuf)
847                 goto out3;
848
849         INIT_LIST_HEAD(&req->rl_free_mrs);
850         INIT_LIST_HEAD(&req->rl_registered);
851         spin_lock(&buffer->rb_lock);
852         list_add(&req->rl_all, &buffer->rb_allreqs);
853         spin_unlock(&buffer->rb_lock);
854         return req;
855
856 out3:
857         kfree(req->rl_sendbuf);
858 out2:
859         kfree(req);
860 out1:
861         return NULL;
862 }
863
864 /**
865  * rpcrdma_req_setup - Per-connection instance setup of an rpcrdma_req object
866  * @r_xprt: controlling transport instance
867  * @req: rpcrdma_req object to set up
868  *
869  * Returns zero on success, and a negative errno on failure.
870  */
871 int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
872 {
873         struct rpcrdma_regbuf *rb;
874         size_t maxhdrsize;
875
876         /* Compute maximum header buffer size in bytes */
877         maxhdrsize = rpcrdma_fixed_maxsz + 3 +
878                      r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
879         maxhdrsize *= sizeof(__be32);
880         rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
881                                   DMA_TO_DEVICE, GFP_KERNEL);
882         if (!rb)
883                 goto out;
884
885         if (!__rpcrdma_regbuf_dma_map(r_xprt, rb))
886                 goto out_free;
887
888         req->rl_rdmabuf = rb;
889         xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb));
890         return 0;
891
892 out_free:
893         rpcrdma_regbuf_free(rb);
894 out:
895         return -ENOMEM;
896 }
897
898 /* ASSUMPTION: the rb_allreqs list is stable for the duration,
899  * and thus can be walked without holding rb_lock. Eg. the
900  * caller is holding the transport send lock to exclude
901  * device removal or disconnection.
902  */
903 static int rpcrdma_reqs_setup(struct rpcrdma_xprt *r_xprt)
904 {
905         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
906         struct rpcrdma_req *req;
907         int rc;
908
909         list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
910                 rc = rpcrdma_req_setup(r_xprt, req);
911                 if (rc)
912                         return rc;
913         }
914         return 0;
915 }
916
917 static void rpcrdma_req_reset(struct rpcrdma_req *req)
918 {
919         /* Credits are valid for only one connection */
920         req->rl_slot.rq_cong = 0;
921
922         rpcrdma_regbuf_free(req->rl_rdmabuf);
923         req->rl_rdmabuf = NULL;
924
925         rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
926         rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
927
928         frwr_reset(req);
929 }
930
931 /* ASSUMPTION: the rb_allreqs list is stable for the duration,
932  * and thus can be walked without holding rb_lock. Eg. the
933  * caller is holding the transport send lock to exclude
934  * device removal or disconnection.
935  */
936 static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
937 {
938         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
939         struct rpcrdma_req *req;
940
941         list_for_each_entry(req, &buf->rb_allreqs, rl_all)
942                 rpcrdma_req_reset(req);
943 }
944
945 static noinline
946 struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
947                                        bool temp)
948 {
949         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
950         struct rpcrdma_rep *rep;
951
952         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
953         if (rep == NULL)
954                 goto out;
955
956         rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
957                                                DMA_FROM_DEVICE, GFP_KERNEL);
958         if (!rep->rr_rdmabuf)
959                 goto out_free;
960
961         if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
962                 goto out_free_regbuf;
963
964         rep->rr_cid.ci_completion_id =
965                 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
966
967         xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
968                      rdmab_length(rep->rr_rdmabuf));
969         rep->rr_cqe.done = rpcrdma_wc_receive;
970         rep->rr_rxprt = r_xprt;
971         rep->rr_recv_wr.next = NULL;
972         rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
973         rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
974         rep->rr_recv_wr.num_sge = 1;
975         rep->rr_temp = temp;
976
977         spin_lock(&buf->rb_lock);
978         list_add(&rep->rr_all, &buf->rb_all_reps);
979         spin_unlock(&buf->rb_lock);
980         return rep;
981
982 out_free_regbuf:
983         rpcrdma_regbuf_free(rep->rr_rdmabuf);
984 out_free:
985         kfree(rep);
986 out:
987         return NULL;
988 }
989
990 static void rpcrdma_rep_free(struct rpcrdma_rep *rep)
991 {
992         rpcrdma_regbuf_free(rep->rr_rdmabuf);
993         kfree(rep);
994 }
995
996 static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
997 {
998         struct rpcrdma_buffer *buf = &rep->rr_rxprt->rx_buf;
999
1000         spin_lock(&buf->rb_lock);
1001         list_del(&rep->rr_all);
1002         spin_unlock(&buf->rb_lock);
1003
1004         rpcrdma_rep_free(rep);
1005 }
1006
1007 static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
1008 {
1009         struct llist_node *node;
1010
1011         /* Calls to llist_del_first are required to be serialized */
1012         node = llist_del_first(&buf->rb_free_reps);
1013         if (!node)
1014                 return NULL;
1015         return llist_entry(node, struct rpcrdma_rep, rr_node);
1016 }
1017
1018 /**
1019  * rpcrdma_rep_put - Release rpcrdma_rep back to free list
1020  * @buf: buffer pool
1021  * @rep: rep to release
1022  *
1023  */
1024 void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep)
1025 {
1026         llist_add(&rep->rr_node, &buf->rb_free_reps);
1027 }
1028
1029 /* Caller must ensure the QP is quiescent (RQ is drained) before
1030  * invoking this function, to guarantee rb_all_reps is not
1031  * changing.
1032  */
1033 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
1034 {
1035         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1036         struct rpcrdma_rep *rep;
1037
1038         list_for_each_entry(rep, &buf->rb_all_reps, rr_all) {
1039                 rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
1040                 rep->rr_temp = true;    /* Mark this rep for destruction */
1041         }
1042 }
1043
1044 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
1045 {
1046         struct rpcrdma_rep *rep;
1047
1048         spin_lock(&buf->rb_lock);
1049         while ((rep = list_first_entry_or_null(&buf->rb_all_reps,
1050                                                struct rpcrdma_rep,
1051                                                rr_all)) != NULL) {
1052                 list_del(&rep->rr_all);
1053                 spin_unlock(&buf->rb_lock);
1054
1055                 rpcrdma_rep_free(rep);
1056
1057                 spin_lock(&buf->rb_lock);
1058         }
1059         spin_unlock(&buf->rb_lock);
1060 }
1061
1062 /**
1063  * rpcrdma_buffer_create - Create initial set of req/rep objects
1064  * @r_xprt: transport instance to (re)initialize
1065  *
1066  * Returns zero on success, otherwise a negative errno.
1067  */
1068 int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1069 {
1070         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1071         int i, rc;
1072
1073         buf->rb_bc_srv_max_requests = 0;
1074         spin_lock_init(&buf->rb_lock);
1075         INIT_LIST_HEAD(&buf->rb_mrs);
1076         INIT_LIST_HEAD(&buf->rb_all_mrs);
1077         INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker);
1078
1079         INIT_LIST_HEAD(&buf->rb_send_bufs);
1080         INIT_LIST_HEAD(&buf->rb_allreqs);
1081         INIT_LIST_HEAD(&buf->rb_all_reps);
1082
1083         rc = -ENOMEM;
1084         for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
1085                 struct rpcrdma_req *req;
1086
1087                 req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
1088                                          GFP_KERNEL);
1089                 if (!req)
1090                         goto out;
1091                 list_add(&req->rl_list, &buf->rb_send_bufs);
1092         }
1093
1094         init_llist_head(&buf->rb_free_reps);
1095
1096         return 0;
1097 out:
1098         rpcrdma_buffer_destroy(buf);
1099         return rc;
1100 }
1101
1102 /**
1103  * rpcrdma_req_destroy - Destroy an rpcrdma_req object
1104  * @req: unused object to be destroyed
1105  *
1106  * Relies on caller holding the transport send lock to protect
1107  * removing req->rl_all from buf->rb_all_reqs safely.
1108  */
1109 void rpcrdma_req_destroy(struct rpcrdma_req *req)
1110 {
1111         struct rpcrdma_mr *mr;
1112
1113         list_del(&req->rl_all);
1114
1115         while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
1116                 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
1117
1118                 spin_lock(&buf->rb_lock);
1119                 list_del(&mr->mr_all);
1120                 spin_unlock(&buf->rb_lock);
1121
1122                 frwr_mr_release(mr);
1123         }
1124
1125         rpcrdma_regbuf_free(req->rl_recvbuf);
1126         rpcrdma_regbuf_free(req->rl_sendbuf);
1127         rpcrdma_regbuf_free(req->rl_rdmabuf);
1128         kfree(req);
1129 }
1130
1131 /**
1132  * rpcrdma_mrs_destroy - Release all of a transport's MRs
1133  * @r_xprt: controlling transport instance
1134  *
1135  * Relies on caller holding the transport send lock to protect
1136  * removing mr->mr_list from req->rl_free_mrs safely.
1137  */
1138 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt)
1139 {
1140         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1141         struct rpcrdma_mr *mr;
1142
1143         cancel_work_sync(&buf->rb_refresh_worker);
1144
1145         spin_lock(&buf->rb_lock);
1146         while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
1147                                               struct rpcrdma_mr,
1148                                               mr_all)) != NULL) {
1149                 list_del(&mr->mr_list);
1150                 list_del(&mr->mr_all);
1151                 spin_unlock(&buf->rb_lock);
1152
1153                 frwr_mr_release(mr);
1154
1155                 spin_lock(&buf->rb_lock);
1156         }
1157         spin_unlock(&buf->rb_lock);
1158 }
1159
1160 /**
1161  * rpcrdma_buffer_destroy - Release all hw resources
1162  * @buf: root control block for resources
1163  *
1164  * ORDERING: relies on a prior rpcrdma_xprt_drain :
1165  * - No more Send or Receive completions can occur
1166  * - All MRs, reps, and reqs are returned to their free lists
1167  */
1168 void
1169 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1170 {
1171         rpcrdma_reps_destroy(buf);
1172
1173         while (!list_empty(&buf->rb_send_bufs)) {
1174                 struct rpcrdma_req *req;
1175
1176                 req = list_first_entry(&buf->rb_send_bufs,
1177                                        struct rpcrdma_req, rl_list);
1178                 list_del(&req->rl_list);
1179                 rpcrdma_req_destroy(req);
1180         }
1181 }
1182
1183 /**
1184  * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1185  * @r_xprt: controlling transport
1186  *
1187  * Returns an initialized rpcrdma_mr or NULL if no free
1188  * rpcrdma_mr objects are available.
1189  */
1190 struct rpcrdma_mr *
1191 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1192 {
1193         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1194         struct rpcrdma_mr *mr;
1195
1196         spin_lock(&buf->rb_lock);
1197         mr = rpcrdma_mr_pop(&buf->rb_mrs);
1198         spin_unlock(&buf->rb_lock);
1199         return mr;
1200 }
1201
1202 /**
1203  * rpcrdma_reply_put - Put reply buffers back into pool
1204  * @buffers: buffer pool
1205  * @req: object to return
1206  *
1207  */
1208 void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1209 {
1210         if (req->rl_reply) {
1211                 rpcrdma_rep_put(buffers, req->rl_reply);
1212                 req->rl_reply = NULL;
1213         }
1214 }
1215
1216 /**
1217  * rpcrdma_buffer_get - Get a request buffer
1218  * @buffers: Buffer pool from which to obtain a buffer
1219  *
1220  * Returns a fresh rpcrdma_req, or NULL if none are available.
1221  */
1222 struct rpcrdma_req *
1223 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1224 {
1225         struct rpcrdma_req *req;
1226
1227         spin_lock(&buffers->rb_lock);
1228         req = list_first_entry_or_null(&buffers->rb_send_bufs,
1229                                        struct rpcrdma_req, rl_list);
1230         if (req)
1231                 list_del_init(&req->rl_list);
1232         spin_unlock(&buffers->rb_lock);
1233         return req;
1234 }
1235
1236 /**
1237  * rpcrdma_buffer_put - Put request/reply buffers back into pool
1238  * @buffers: buffer pool
1239  * @req: object to return
1240  *
1241  */
1242 void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
1243 {
1244         rpcrdma_reply_put(buffers, req);
1245
1246         spin_lock(&buffers->rb_lock);
1247         list_add(&req->rl_list, &buffers->rb_send_bufs);
1248         spin_unlock(&buffers->rb_lock);
1249 }
1250
1251 /* Returns a pointer to a rpcrdma_regbuf object, or NULL.
1252  *
1253  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1254  * receiving the payload of RDMA RECV operations. During Long Calls
1255  * or Replies they may be registered externally via frwr_map.
1256  */
1257 static struct rpcrdma_regbuf *
1258 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
1259                      gfp_t flags)
1260 {
1261         struct rpcrdma_regbuf *rb;
1262
1263         rb = kmalloc(sizeof(*rb), flags);
1264         if (!rb)
1265                 return NULL;
1266         rb->rg_data = kmalloc(size, flags);
1267         if (!rb->rg_data) {
1268                 kfree(rb);
1269                 return NULL;
1270         }
1271
1272         rb->rg_device = NULL;
1273         rb->rg_direction = direction;
1274         rb->rg_iov.length = size;
1275         return rb;
1276 }
1277
1278 /**
1279  * rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
1280  * @rb: regbuf to reallocate
1281  * @size: size of buffer to be allocated, in bytes
1282  * @flags: GFP flags
1283  *
1284  * Returns true if reallocation was successful. If false is
1285  * returned, @rb is left untouched.
1286  */
1287 bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
1288 {
1289         void *buf;
1290
1291         buf = kmalloc(size, flags);
1292         if (!buf)
1293                 return false;
1294
1295         rpcrdma_regbuf_dma_unmap(rb);
1296         kfree(rb->rg_data);
1297
1298         rb->rg_data = buf;
1299         rb->rg_iov.length = size;
1300         return true;
1301 }
1302
1303 /**
1304  * __rpcrdma_regbuf_dma_map - DMA-map a regbuf
1305  * @r_xprt: controlling transport instance
1306  * @rb: regbuf to be mapped
1307  *
1308  * Returns true if the buffer is now DMA mapped to @r_xprt's device
1309  */
1310 bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt,
1311                               struct rpcrdma_regbuf *rb)
1312 {
1313         struct ib_device *device = r_xprt->rx_ep->re_id->device;
1314
1315         if (rb->rg_direction == DMA_NONE)
1316                 return false;
1317
1318         rb->rg_iov.addr = ib_dma_map_single(device, rdmab_data(rb),
1319                                             rdmab_length(rb), rb->rg_direction);
1320         if (ib_dma_mapping_error(device, rdmab_addr(rb))) {
1321                 trace_xprtrdma_dma_maperr(rdmab_addr(rb));
1322                 return false;
1323         }
1324
1325         rb->rg_device = device;
1326         rb->rg_iov.lkey = r_xprt->rx_ep->re_pd->local_dma_lkey;
1327         return true;
1328 }
1329
1330 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb)
1331 {
1332         if (!rb)
1333                 return;
1334
1335         if (!rpcrdma_regbuf_is_mapped(rb))
1336                 return;
1337
1338         ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), rdmab_length(rb),
1339                             rb->rg_direction);
1340         rb->rg_device = NULL;
1341 }
1342
1343 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
1344 {
1345         rpcrdma_regbuf_dma_unmap(rb);
1346         if (rb)
1347                 kfree(rb->rg_data);
1348         kfree(rb);
1349 }
1350
1351 /**
1352  * rpcrdma_post_recvs - Refill the Receive Queue
1353  * @r_xprt: controlling transport instance
1354  * @needed: current credit grant
1355  * @temp: mark Receive buffers to be deleted after one use
1356  *
1357  */
1358 void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
1359 {
1360         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1361         struct rpcrdma_ep *ep = r_xprt->rx_ep;
1362         struct ib_recv_wr *wr, *bad_wr;
1363         struct rpcrdma_rep *rep;
1364         int count, rc;
1365
1366         rc = 0;
1367         count = 0;
1368
1369         if (likely(ep->re_receive_count > needed))
1370                 goto out;
1371         needed -= ep->re_receive_count;
1372         if (!temp)
1373                 needed += RPCRDMA_MAX_RECV_BATCH;
1374
1375         if (atomic_inc_return(&ep->re_receiving) > 1)
1376                 goto out;
1377
1378         /* fast path: all needed reps can be found on the free list */
1379         wr = NULL;
1380         while (needed) {
1381                 rep = rpcrdma_rep_get_locked(buf);
1382                 if (rep && rep->rr_temp) {
1383                         rpcrdma_rep_destroy(rep);
1384                         continue;
1385                 }
1386                 if (!rep)
1387                         rep = rpcrdma_rep_create(r_xprt, temp);
1388                 if (!rep)
1389                         break;
1390
1391                 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
1392                 trace_xprtrdma_post_recv(rep);
1393                 rep->rr_recv_wr.next = wr;
1394                 wr = &rep->rr_recv_wr;
1395                 --needed;
1396                 ++count;
1397         }
1398         if (!wr)
1399                 goto out;
1400
1401         rc = ib_post_recv(ep->re_id->qp, wr,
1402                           (const struct ib_recv_wr **)&bad_wr);
1403         if (rc) {
1404                 trace_xprtrdma_post_recvs_err(r_xprt, rc);
1405                 for (wr = bad_wr; wr;) {
1406                         struct rpcrdma_rep *rep;
1407
1408                         rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1409                         wr = wr->next;
1410                         rpcrdma_rep_put(buf, rep);
1411                         --count;
1412                 }
1413         }
1414         if (atomic_dec_return(&ep->re_receiving) > 0)
1415                 complete(&ep->re_done);
1416
1417 out:
1418         trace_xprtrdma_post_recvs(r_xprt, count);
1419         ep->re_receive_count += count;
1420         return;
1421 }