1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and invalidation
11 * of arbitrarily-sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_unmap_async and frwr_unmap_sync).
22 * Typically FAST_REG Work Requests are not signaled, and neither are
23 * RDMA Send Work Requests (with the exception of signaling occasionally
24 * to prevent provider work queue overflows). This greatly reduces HCA
30 * frwr_map and frwr_unmap_* cannot run at the same time the transport
31 * connect worker is running. The connect worker holds the transport
32 * send lock, just as ->send_request does. This prevents frwr_map and
33 * the connect worker from running concurrently. When a connection is
34 * closed, the Receive completion queue is drained before the allowing
35 * the connect worker to get control. This prevents frwr_unmap and the
36 * connect worker from running concurrently.
38 * When the underlying transport disconnects, MRs that are in flight
39 * are flushed and are likely unusable. Thus all MRs are destroyed.
40 * New MRs are created on demand.
43 #include <linux/sunrpc/svc_rdma.h>
45 #include "xprt_rdma.h"
46 #include <trace/events/rpcrdma.h>
48 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
49 # define RPCDBG_FACILITY RPCDBG_TRANS
53 * frwr_release_mr - Destroy one MR
54 * @mr: MR allocated by frwr_mr_init
57 void frwr_release_mr(struct rpcrdma_mr *mr)
61 rc = ib_dereg_mr(mr->frwr.fr_mr);
63 trace_xprtrdma_frwr_dereg(mr, rc);
68 static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
71 trace_xprtrdma_mr_unmap(mr);
72 ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
78 static void frwr_mr_recycle(struct rpcrdma_mr *mr)
80 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
82 trace_xprtrdma_mr_recycle(mr);
84 frwr_mr_unmap(r_xprt, mr);
86 spin_lock(&r_xprt->rx_buf.rb_lock);
87 list_del(&mr->mr_all);
88 r_xprt->rx_stats.mrs_recycled++;
89 spin_unlock(&r_xprt->rx_buf.rb_lock);
94 static void frwr_mr_put(struct rpcrdma_mr *mr)
96 frwr_mr_unmap(mr->mr_xprt, mr);
98 /* The MR is returned to the req's MR free list instead
99 * of to the xprt's MR free list. No spinlock is needed.
101 rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
104 /* frwr_reset - Place MRs back on the free list
105 * @req: request to reset
107 * Used after a failed marshal. For FRWR, this means the MRs
108 * don't have to be fully released and recreated.
110 * NB: This is safe only as long as none of @req's MRs are
111 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
114 void frwr_reset(struct rpcrdma_req *req)
116 struct rpcrdma_mr *mr;
118 while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
123 * frwr_mr_init - Initialize one MR
124 * @r_xprt: controlling transport instance
125 * @mr: generic MR to prepare for FRWR
127 * Returns zero if successful. Otherwise a negative errno
130 int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
132 struct rpcrdma_ep *ep = r_xprt->rx_ep;
133 unsigned int depth = ep->re_max_fr_depth;
134 struct scatterlist *sg;
138 frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
142 sg = kmalloc_array(depth, sizeof(*sg), GFP_NOFS);
146 mr->mr_xprt = r_xprt;
147 mr->frwr.fr_mr = frmr;
148 mr->mr_device = NULL;
149 INIT_LIST_HEAD(&mr->mr_list);
150 init_completion(&mr->frwr.fr_linv_done);
152 sg_init_table(sg, depth);
158 trace_xprtrdma_frwr_alloc(mr, rc);
167 * frwr_query_device - Prepare a transport for use with FRWR
168 * @ep: endpoint to fill in
169 * @device: RDMA device to query
173 * ep->re_max_requests
174 * ep->re_max_rdma_segs
175 * ep->re_max_fr_depth
179 * On success, returns zero.
180 * %-EINVAL - the device does not support FRWR memory registration
181 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
183 int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
185 const struct ib_device_attr *attrs = &device->attrs;
186 int max_qp_wr, depth, delta;
187 unsigned int max_sge;
189 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
190 attrs->max_fast_reg_page_list_len == 0) {
191 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
196 max_sge = min_t(unsigned int, attrs->max_send_sge,
197 RPCRDMA_MAX_SEND_SGES);
198 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
199 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge);
202 ep->re_attr.cap.max_send_sge = max_sge;
203 ep->re_attr.cap.max_recv_sge = 1;
205 ep->re_mrtype = IB_MR_TYPE_MEM_REG;
206 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
207 ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
209 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
210 * capability, but perform optimally when the MRs are not larger
213 if (attrs->max_sge_rd > RPCRDMA_MAX_HDR_SEGS)
214 ep->re_max_fr_depth = attrs->max_sge_rd;
216 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
217 if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
218 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
220 /* Add room for frwr register and invalidate WRs.
221 * 1. FRWR reg WR for head
222 * 2. FRWR invalidate WR for head
223 * 3. N FRWR reg WRs for pagelist
224 * 4. N FRWR invalidate WRs for pagelist
225 * 5. FRWR reg WR for tail
226 * 6. FRWR invalidate WR for tail
227 * 7. The RDMA_SEND WR
231 /* Calculate N if the device max FRWR depth is smaller than
232 * RPCRDMA_MAX_DATA_SEGS.
234 if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
235 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
237 depth += 2; /* FRWR reg + invalidate */
238 delta -= ep->re_max_fr_depth;
242 max_qp_wr = attrs->max_qp_wr;
243 max_qp_wr -= RPCRDMA_BACKWARD_WRS;
245 if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
247 if (ep->re_max_requests > max_qp_wr)
248 ep->re_max_requests = max_qp_wr;
249 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
250 if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
251 ep->re_max_requests = max_qp_wr / depth;
252 if (!ep->re_max_requests)
254 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
256 ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
257 ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
258 ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
259 ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
260 ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
262 ep->re_max_rdma_segs =
263 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
264 /* Reply chunks require segments for head and tail buffers */
265 ep->re_max_rdma_segs += 2;
266 if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
267 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
269 /* Ensure the underlying device is capable of conveying the
270 * largest r/wsize NFS will ask for. This guarantees that
271 * failing over from one RDMA device to another will not
274 if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
281 * frwr_map - Register a memory region
282 * @r_xprt: controlling transport
283 * @seg: memory region co-ordinates
284 * @nsegs: number of segments remaining
285 * @writing: true when RDMA Write will be used
286 * @xid: XID of RPC using the registered memory
289 * Prepare a REG_MR Work Request to register a memory region
290 * for remote access via RDMA READ or RDMA WRITE.
292 * Returns the next segment or a negative errno pointer.
293 * On success, @mr is filled in.
295 struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
296 struct rpcrdma_mr_seg *seg,
297 int nsegs, bool writing, __be32 xid,
298 struct rpcrdma_mr *mr)
300 struct rpcrdma_ep *ep = r_xprt->rx_ep;
301 struct ib_reg_wr *reg_wr;
306 if (nsegs > ep->re_max_fr_depth)
307 nsegs = ep->re_max_fr_depth;
308 for (i = 0; i < nsegs;) {
309 sg_set_page(&mr->mr_sg[i], seg->mr_page,
310 seg->mr_len, seg->mr_offset);
314 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
316 if ((i < nsegs && seg->mr_offset) ||
317 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
320 mr->mr_dir = rpcrdma_data_dir(writing);
323 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
327 mr->mr_device = ep->re_id->device;
329 ibmr = mr->frwr.fr_mr;
330 n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
334 ibmr->iova &= 0x00000000ffffffff;
335 ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
336 key = (u8)(ibmr->rkey & 0x000000FF);
337 ib_update_fast_reg_key(ibmr, ++key);
339 reg_wr = &mr->frwr.fr_regwr;
341 reg_wr->key = ibmr->rkey;
342 reg_wr->access = writing ?
343 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
344 IB_ACCESS_REMOTE_READ;
346 mr->mr_handle = ibmr->rkey;
347 mr->mr_length = ibmr->length;
348 mr->mr_offset = ibmr->iova;
349 trace_xprtrdma_mr_map(mr);
354 trace_xprtrdma_frwr_sgerr(mr, i);
355 return ERR_PTR(-EIO);
358 trace_xprtrdma_frwr_maperr(mr, n);
359 return ERR_PTR(-EIO);
363 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
364 * @cq: completion queue
365 * @wc: WCE for a completed FastReg WR
368 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
370 struct ib_cqe *cqe = wc->wr_cqe;
371 struct rpcrdma_frwr *frwr =
372 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
374 /* WARNING: Only wr_cqe and status are reliable at this point */
375 trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid);
376 /* The MR will get recycled when the associated req is retransmitted */
378 rpcrdma_flush_disconnect(cq->cq_context, wc);
381 static void frwr_cid_init(struct rpcrdma_ep *ep,
382 struct rpcrdma_frwr *frwr)
384 struct rpc_rdma_cid *cid = &frwr->fr_cid;
386 cid->ci_queue_id = ep->re_attr.send_cq->res.id;
387 cid->ci_completion_id = frwr->fr_mr->res.id;
391 * frwr_send - post Send WRs containing the RPC Call message
392 * @r_xprt: controlling transport instance
393 * @req: prepared RPC Call
395 * For FRWR, chain any FastReg WRs to the Send WR. Only a
396 * single ib_post_send call is needed to register memory
397 * and then post the Send WR.
399 * Returns the return code from ib_post_send.
401 * Caller must hold the transport send lock to ensure that the
402 * pointers to the transport's rdma_cm_id and QP are stable.
404 int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
406 struct rpcrdma_ep *ep = r_xprt->rx_ep;
407 struct ib_send_wr *post_wr;
408 struct rpcrdma_mr *mr;
410 post_wr = &req->rl_wr;
411 list_for_each_entry(mr, &req->rl_registered, mr_list) {
412 struct rpcrdma_frwr *frwr;
416 frwr->fr_cqe.done = frwr_wc_fastreg;
417 frwr_cid_init(ep, frwr);
418 frwr->fr_regwr.wr.next = post_wr;
419 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
420 frwr->fr_regwr.wr.num_sge = 0;
421 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
422 frwr->fr_regwr.wr.send_flags = 0;
424 post_wr = &frwr->fr_regwr.wr;
427 return ib_post_send(ep->re_id->qp, post_wr, NULL);
431 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
432 * @rep: Received reply
433 * @mrs: list of MRs to check
436 void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
438 struct rpcrdma_mr *mr;
440 list_for_each_entry(mr, mrs, mr_list)
441 if (mr->mr_handle == rep->rr_inv_rkey) {
442 list_del_init(&mr->mr_list);
444 break; /* only one invalidated MR per RPC */
448 static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
450 if (wc->status != IB_WC_SUCCESS)
457 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
458 * @cq: completion queue
459 * @wc: WCE for a completed LocalInv WR
462 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
464 struct ib_cqe *cqe = wc->wr_cqe;
465 struct rpcrdma_frwr *frwr =
466 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
467 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
469 /* WARNING: Only wr_cqe and status are reliable at this point */
470 trace_xprtrdma_wc_li(wc, &frwr->fr_cid);
471 frwr_mr_done(wc, mr);
473 rpcrdma_flush_disconnect(cq->cq_context, wc);
477 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
478 * @cq: completion queue
479 * @wc: WCE for a completed LocalInv WR
481 * Awaken anyone waiting for an MR to finish being fenced.
483 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
485 struct ib_cqe *cqe = wc->wr_cqe;
486 struct rpcrdma_frwr *frwr =
487 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
488 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
490 /* WARNING: Only wr_cqe and status are reliable at this point */
491 trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid);
492 frwr_mr_done(wc, mr);
493 complete(&frwr->fr_linv_done);
495 rpcrdma_flush_disconnect(cq->cq_context, wc);
499 * frwr_unmap_sync - invalidate memory regions that were registered for @req
500 * @r_xprt: controlling transport instance
501 * @req: rpcrdma_req with a non-empty list of MRs to process
503 * Sleeps until it is safe for the host CPU to access the previously mapped
504 * memory regions. This guarantees that registered MRs are properly fenced
505 * from the server before the RPC consumer accesses the data in them. It
506 * also ensures proper Send flow control: waking the next RPC waits until
507 * this RPC has relinquished all its Send Queue entries.
509 void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
511 struct ib_send_wr *first, **prev, *last;
512 struct rpcrdma_ep *ep = r_xprt->rx_ep;
513 const struct ib_send_wr *bad_wr;
514 struct rpcrdma_frwr *frwr;
515 struct rpcrdma_mr *mr;
518 /* ORDER: Invalidate all of the MRs first
520 * Chain the LOCAL_INV Work Requests and post them with
521 * a single ib_post_send() call.
525 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
527 trace_xprtrdma_mr_localinv(mr);
528 r_xprt->rx_stats.local_inv_needed++;
531 frwr->fr_cqe.done = frwr_wc_localinv;
532 frwr_cid_init(ep, frwr);
533 last = &frwr->fr_invwr;
535 last->wr_cqe = &frwr->fr_cqe;
536 last->sg_list = NULL;
538 last->opcode = IB_WR_LOCAL_INV;
539 last->send_flags = IB_SEND_SIGNALED;
540 last->ex.invalidate_rkey = mr->mr_handle;
546 /* Strong send queue ordering guarantees that when the
547 * last WR in the chain completes, all WRs in the chain
550 frwr->fr_cqe.done = frwr_wc_localinv_wake;
551 reinit_completion(&frwr->fr_linv_done);
553 /* Transport disconnect drains the receive CQ before it
554 * replaces the QP. The RPC reply handler won't call us
555 * unless re_id->qp is a valid pointer.
558 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
560 /* The final LOCAL_INV WR in the chain is supposed to
561 * do the wake. If it was never posted, the wake will
562 * not happen, so don't wait in that case.
565 wait_for_completion(&frwr->fr_linv_done);
569 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
571 trace_xprtrdma_post_linv_err(req, rc);
573 frwr = container_of(bad_wr, struct rpcrdma_frwr,
575 mr = container_of(frwr, struct rpcrdma_mr, frwr);
576 bad_wr = bad_wr->next;
578 list_del_init(&mr->mr_list);
584 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
585 * @cq: completion queue
586 * @wc: WCE for a completed LocalInv WR
589 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
591 struct ib_cqe *cqe = wc->wr_cqe;
592 struct rpcrdma_frwr *frwr =
593 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
594 struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
595 struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
597 /* WARNING: Only wr_cqe and status are reliable at this point */
598 trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid);
599 frwr_mr_done(wc, mr);
601 /* Ensure @rep is generated before frwr_mr_done */
603 rpcrdma_complete_rqst(rep);
605 rpcrdma_flush_disconnect(cq->cq_context, wc);
609 * frwr_unmap_async - invalidate memory regions that were registered for @req
610 * @r_xprt: controlling transport instance
611 * @req: rpcrdma_req with a non-empty list of MRs to process
613 * This guarantees that registered MRs are properly fenced from the
614 * server before the RPC consumer accesses the data in them. It also
615 * ensures proper Send flow control: waking the next RPC waits until
616 * this RPC has relinquished all its Send Queue entries.
618 void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
620 struct ib_send_wr *first, *last, **prev;
621 struct rpcrdma_ep *ep = r_xprt->rx_ep;
622 const struct ib_send_wr *bad_wr;
623 struct rpcrdma_frwr *frwr;
624 struct rpcrdma_mr *mr;
627 /* Chain the LOCAL_INV Work Requests and post them with
628 * a single ib_post_send() call.
632 while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
634 trace_xprtrdma_mr_localinv(mr);
635 r_xprt->rx_stats.local_inv_needed++;
638 frwr->fr_cqe.done = frwr_wc_localinv;
639 frwr_cid_init(ep, frwr);
640 last = &frwr->fr_invwr;
642 last->wr_cqe = &frwr->fr_cqe;
643 last->sg_list = NULL;
645 last->opcode = IB_WR_LOCAL_INV;
646 last->send_flags = IB_SEND_SIGNALED;
647 last->ex.invalidate_rkey = mr->mr_handle;
653 /* Strong send queue ordering guarantees that when the
654 * last WR in the chain completes, all WRs in the chain
655 * are complete. The last completion will wake up the
658 frwr->fr_cqe.done = frwr_wc_localinv_done;
660 /* Transport disconnect drains the receive CQ before it
661 * replaces the QP. The RPC reply handler won't call us
662 * unless re_id->qp is a valid pointer.
665 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
669 /* Recycle MRs in the LOCAL_INV chain that did not get posted.
671 trace_xprtrdma_post_linv_err(req, rc);
673 frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
674 mr = container_of(frwr, struct rpcrdma_mr, frwr);
675 bad_wr = bad_wr->next;
680 /* The final LOCAL_INV WR in the chain is supposed to
681 * do the wake. If it was never posted, the wake will
682 * not happen, so wake here in that case.
684 rpcrdma_complete_rqst(req->rl_reply);