1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_op_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_op_unmap_sync).
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
75 #include "xprt_rdma.h"
77 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
78 # define RPCDBG_FACILITY RPCDBG_TRANS
82 frwr_is_supported(struct rpcrdma_ia *ia)
84 struct ib_device_attr *attrs = &ia->ri_device->attrs;
86 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
87 goto out_not_supported;
88 if (attrs->max_fast_reg_page_list_len == 0)
89 goto out_not_supported;
93 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
99 frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
101 unsigned int depth = ia->ri_max_frwr_depth;
102 struct rpcrdma_frwr *frwr = &mr->frwr;
105 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
106 if (IS_ERR(frwr->fr_mr))
109 mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
113 INIT_LIST_HEAD(&mr->mr_list);
114 sg_init_table(mr->mr_sg, depth);
115 init_completion(&frwr->fr_linv_done);
119 rc = PTR_ERR(frwr->fr_mr);
120 dprintk("RPC: %s: ib_alloc_mr status %i\n",
126 dprintk("RPC: %s: sg allocation failure\n",
128 ib_dereg_mr(frwr->fr_mr);
133 frwr_op_release_mr(struct rpcrdma_mr *mr)
137 rc = ib_dereg_mr(mr->frwr.fr_mr);
139 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
146 __frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
148 struct rpcrdma_frwr *frwr = &mr->frwr;
151 rc = ib_dereg_mr(frwr->fr_mr);
153 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
158 frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
159 ia->ri_max_frwr_depth);
160 if (IS_ERR(frwr->fr_mr)) {
161 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
162 PTR_ERR(frwr->fr_mr), mr);
163 return PTR_ERR(frwr->fr_mr);
166 dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
167 frwr->fr_state = FRWR_IS_INVALID;
171 /* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
174 frwr_op_recover_mr(struct rpcrdma_mr *mr)
176 enum rpcrdma_frwr_state state = mr->frwr.fr_state;
177 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
178 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
181 rc = __frwr_mr_reset(ia, mr);
182 if (state != FRWR_FLUSHED_LI) {
183 trace_xprtrdma_dma_unmap(mr);
184 ib_dma_unmap_sg(ia->ri_device,
185 mr->mr_sg, mr->mr_nents, mr->mr_dir);
191 r_xprt->rx_stats.mrs_recovered++;
195 pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr);
196 r_xprt->rx_stats.mrs_orphaned++;
198 spin_lock(&r_xprt->rx_buf.rb_mrlock);
199 list_del(&mr->mr_all);
200 spin_unlock(&r_xprt->rx_buf.rb_mrlock);
202 frwr_op_release_mr(mr);
206 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
207 struct rpcrdma_create_data_internal *cdata)
209 struct ib_device_attr *attrs = &ia->ri_device->attrs;
212 ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
213 if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
214 ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
216 ia->ri_max_frwr_depth =
217 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
218 attrs->max_fast_reg_page_list_len);
219 dprintk("RPC: %s: device's max FR page list len = %u\n",
220 __func__, ia->ri_max_frwr_depth);
222 /* Add room for frwr register and invalidate WRs.
223 * 1. FRWR reg WR for head
224 * 2. FRWR invalidate WR for head
225 * 3. N FRWR reg WRs for pagelist
226 * 4. N FRWR invalidate WRs for pagelist
227 * 5. FRWR reg WR for tail
228 * 6. FRWR invalidate WR for tail
229 * 7. The RDMA_SEND WR
233 /* Calculate N if the device max FRWR depth is smaller than
234 * RPCRDMA_MAX_DATA_SEGS.
236 if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
237 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
239 depth += 2; /* FRWR reg + invalidate */
240 delta -= ia->ri_max_frwr_depth;
244 ep->rep_attr.cap.max_send_wr *= depth;
245 if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
246 cdata->max_requests = attrs->max_qp_wr / depth;
247 if (!cdata->max_requests)
249 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
253 ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
254 ia->ri_max_frwr_depth);
258 /* FRWR mode conveys a list of pages per chunk segment. The
259 * maximum length of that list is the FRWR page list depth.
262 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
264 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
266 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
267 RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
271 __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
273 if (wc->status != IB_WC_WR_FLUSH_ERR)
274 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
275 wr, ib_wc_status_msg(wc->status),
276 wc->status, wc->vendor_err);
280 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
281 * @cq: completion queue (ignored)
286 frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
288 struct ib_cqe *cqe = wc->wr_cqe;
289 struct rpcrdma_frwr *frwr =
290 container_of(cqe, struct rpcrdma_frwr, fr_cqe);
292 /* WARNING: Only wr_cqe and status are reliable at this point */
293 if (wc->status != IB_WC_SUCCESS) {
294 frwr->fr_state = FRWR_FLUSHED_FR;
295 __frwr_sendcompletion_flush(wc, "fastreg");
297 trace_xprtrdma_wc_fastreg(wc, frwr);
301 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
302 * @cq: completion queue (ignored)
307 frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
309 struct ib_cqe *cqe = wc->wr_cqe;
310 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
313 /* WARNING: Only wr_cqe and status are reliable at this point */
314 if (wc->status != IB_WC_SUCCESS) {
315 frwr->fr_state = FRWR_FLUSHED_LI;
316 __frwr_sendcompletion_flush(wc, "localinv");
318 trace_xprtrdma_wc_li(wc, frwr);
322 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
323 * @cq: completion queue (ignored)
326 * Awaken anyone waiting for an MR to finish being fenced.
329 frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
331 struct ib_cqe *cqe = wc->wr_cqe;
332 struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
335 /* WARNING: Only wr_cqe and status are reliable at this point */
336 if (wc->status != IB_WC_SUCCESS) {
337 frwr->fr_state = FRWR_FLUSHED_LI;
338 __frwr_sendcompletion_flush(wc, "localinv");
340 complete(&frwr->fr_linv_done);
341 trace_xprtrdma_wc_li_wake(wc, frwr);
344 /* Post a REG_MR Work Request to register a memory region
345 * for remote access via RDMA READ or RDMA WRITE.
347 static struct rpcrdma_mr_seg *
348 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
349 int nsegs, bool writing, struct rpcrdma_mr **out)
351 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
352 bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
353 struct rpcrdma_frwr *frwr;
354 struct rpcrdma_mr *mr;
356 struct ib_reg_wr *reg_wr;
363 rpcrdma_mr_defer_recovery(mr);
364 mr = rpcrdma_mr_get(r_xprt);
366 return ERR_PTR(-EAGAIN);
367 } while (mr->frwr.fr_state != FRWR_IS_INVALID);
369 frwr->fr_state = FRWR_IS_VALID;
371 if (nsegs > ia->ri_max_frwr_depth)
372 nsegs = ia->ri_max_frwr_depth;
373 for (i = 0; i < nsegs;) {
375 sg_set_page(&mr->mr_sg[i],
378 offset_in_page(seg->mr_offset));
380 sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
387 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
388 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
391 mr->mr_dir = rpcrdma_data_dir(writing);
393 mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
398 n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
399 if (unlikely(n != mr->mr_nents))
402 key = (u8)(ibmr->rkey & 0x000000FF);
403 ib_update_fast_reg_key(ibmr, ++key);
405 reg_wr = &frwr->fr_regwr;
407 reg_wr->key = ibmr->rkey;
408 reg_wr->access = writing ?
409 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
410 IB_ACCESS_REMOTE_READ;
412 mr->mr_handle = ibmr->rkey;
413 mr->mr_length = ibmr->length;
414 mr->mr_offset = ibmr->iova;
420 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
422 frwr->fr_state = FRWR_IS_INVALID;
424 return ERR_PTR(-EIO);
427 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
428 frwr->fr_mr, n, mr->mr_nents);
429 rpcrdma_mr_defer_recovery(mr);
430 return ERR_PTR(-EIO);
433 /* Post Send WR containing the RPC Call message.
435 * For FRMR, chain any FastReg WRs to the Send WR. Only a
436 * single ib_post_send call is needed to register memory
437 * and then post the Send WR.
440 frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
442 struct ib_send_wr *post_wr, *bad_wr;
443 struct rpcrdma_mr *mr;
445 post_wr = &req->rl_sendctx->sc_wr;
446 list_for_each_entry(mr, &req->rl_registered, mr_list) {
447 struct rpcrdma_frwr *frwr;
451 frwr->fr_cqe.done = frwr_wc_fastreg;
452 frwr->fr_regwr.wr.next = post_wr;
453 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
454 frwr->fr_regwr.wr.num_sge = 0;
455 frwr->fr_regwr.wr.opcode = IB_WR_REG_MR;
456 frwr->fr_regwr.wr.send_flags = 0;
458 post_wr = &frwr->fr_regwr.wr;
461 /* If ib_post_send fails, the next ->send_request for
462 * @req will queue these MWs for recovery.
464 return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
467 /* Handle a remotely invalidated mr on the @mrs list
470 frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
472 struct rpcrdma_mr *mr;
474 list_for_each_entry(mr, mrs, mr_list)
475 if (mr->mr_handle == rep->rr_inv_rkey) {
476 list_del_init(&mr->mr_list);
477 trace_xprtrdma_remoteinv(mr);
478 mr->frwr.fr_state = FRWR_IS_INVALID;
479 rpcrdma_mr_unmap_and_put(mr);
480 break; /* only one invalidated MR per RPC */
484 /* Invalidate all memory regions that were registered for "req".
486 * Sleeps until it is safe for the host CPU to access the
487 * previously mapped memory regions.
489 * Caller ensures that @mrs is not empty before the call. This
490 * function empties the list.
493 frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
495 struct ib_send_wr *first, **prev, *last, *bad_wr;
496 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
497 struct rpcrdma_frwr *frwr;
498 struct rpcrdma_mr *mr;
501 /* ORDER: Invalidate all of the MRs first
503 * Chain the LOCAL_INV Work Requests and post them with
504 * a single ib_post_send() call.
509 list_for_each_entry(mr, mrs, mr_list) {
510 mr->frwr.fr_state = FRWR_IS_INVALID;
513 trace_xprtrdma_localinv(mr);
515 frwr->fr_cqe.done = frwr_wc_localinv;
516 last = &frwr->fr_invwr;
517 memset(last, 0, sizeof(*last));
518 last->wr_cqe = &frwr->fr_cqe;
519 last->opcode = IB_WR_LOCAL_INV;
520 last->ex.invalidate_rkey = mr->mr_handle;
529 /* Strong send queue ordering guarantees that when the
530 * last WR in the chain completes, all WRs in the chain
533 last->send_flags = IB_SEND_SIGNALED;
534 frwr->fr_cqe.done = frwr_wc_localinv_wake;
535 reinit_completion(&frwr->fr_linv_done);
537 /* Transport disconnect drains the receive CQ before it
538 * replaces the QP. The RPC reply handler won't call us
539 * unless ri_id->qp is a valid pointer.
541 r_xprt->rx_stats.local_inv_needed++;
543 rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
545 wait_for_completion(&frwr->fr_linv_done);
549 /* ORDER: Now DMA unmap all of the MRs, and return
550 * them to the free MR list.
553 while (!list_empty(mrs)) {
554 mr = rpcrdma_mr_pop(mrs);
555 rpcrdma_mr_unmap_and_put(mr);
560 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
562 /* Find and reset the MRs in the LOCAL_INV WRs that did not
566 frwr = container_of(bad_wr, struct rpcrdma_frwr,
568 mr = container_of(frwr, struct rpcrdma_mr, frwr);
570 __frwr_mr_reset(ia, mr);
572 bad_wr = bad_wr->next;
577 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
578 .ro_map = frwr_op_map,
579 .ro_send = frwr_op_send,
580 .ro_reminv = frwr_op_reminv,
581 .ro_unmap_sync = frwr_op_unmap_sync,
582 .ro_recover_mr = frwr_op_recover_mr,
583 .ro_open = frwr_op_open,
584 .ro_maxpages = frwr_op_maxpages,
585 .ro_init_mr = frwr_op_init_mr,
586 .ro_release_mr = frwr_op_release_mr,
587 .ro_displayname = "frwr",
588 .ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,