1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_sendto. This is called by the
48 * RPC server when an RPC Reply is ready to be transmitted to a client.
50 * The passed-in svc_rqst contains a struct xdr_buf which holds an
51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
52 * transport header, post all Write WRs needed for this Reply, then post
53 * a Send WR conveying the transport header and the RPC message itself to
56 * svc_rdma_sendto must fully transmit the Reply before returning, as
57 * the svc_rqst will be recycled as soon as sendto returns. Remaining
58 * resources referred to by the svc_rqst are also recycled at that time.
59 * Therefore any resources that must remain longer must be detached
60 * from the svc_rqst and released later.
64 * The I/O that performs Reply transmission is asynchronous, and may
65 * complete well after sendto returns. Thus pages under I/O must be
66 * removed from the svc_rqst before sendto returns.
68 * The logic here depends on Send Queue and completion ordering. Since
69 * the Send WR is always posted last, it will always complete last. Thus
70 * when it completes, it is guaranteed that all previous Write WRs have
73 * Write WRs are constructed and posted. Each Write segment gets its own
74 * svc_rdma_rw_ctxt, allowing the Write completion handler to find and
75 * DMA-unmap the pages under I/O for that Write segment. The Write
76 * completion handler does not release any pages.
78 * When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
79 * The ownership of all of the Reply's pages are transferred into that
80 * ctxt, the Send WR is posted, and sendto returns.
82 * The svc_rdma_send_ctxt is presented when the Send WR completes. The
83 * Send completion handler finally releases the Reply's pages.
85 * This mechanism also assumes that completions on the transport's Send
86 * Completion Queue do not run in parallel. Otherwise a Write completion
87 * and Send completion running at the same time could release pages that
88 * are still DMA-mapped.
92 * - If the Send WR is posted successfully, it will either complete
93 * successfully, or get flushed. Either way, the Send completion
94 * handler releases the Reply's pages.
95 * - If the Send WR cannot be not posted, the forward path releases
98 * This handles the case, without the use of page reference counting,
99 * where two different Write segments send portions of the same page.
102 #include <linux/spinlock.h>
103 #include <asm/unaligned.h>
105 #include <rdma/ib_verbs.h>
106 #include <rdma/rdma_cm.h>
108 #include <linux/sunrpc/debug.h>
109 #include <linux/sunrpc/svc_rdma.h>
111 #include "xprt_rdma.h"
112 #include <trace/events/rpcrdma.h>
114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
116 static inline struct svc_rdma_send_ctxt *
117 svc_rdma_next_send_ctxt(struct list_head *list)
119 return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
123 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
124 struct rpc_rdma_cid *cid)
126 cid->ci_queue_id = rdma->sc_sq_cq->res.id;
127 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
130 static struct svc_rdma_send_ctxt *
131 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
133 struct svc_rdma_send_ctxt *ctxt;
139 size = sizeof(*ctxt);
140 size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
141 ctxt = kmalloc(size, GFP_KERNEL);
144 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
147 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
148 rdma->sc_max_req_size, DMA_TO_DEVICE);
149 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
152 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
154 ctxt->sc_send_wr.next = NULL;
155 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
156 ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
157 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
158 init_completion(&ctxt->sc_done);
159 ctxt->sc_cqe.done = svc_rdma_wc_send;
160 ctxt->sc_xprt_buf = buffer;
161 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
162 rdma->sc_max_req_size);
163 ctxt->sc_sges[0].addr = addr;
165 for (i = 0; i < rdma->sc_max_send_sges; i++)
166 ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
178 * svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
179 * @rdma: svcxprt_rdma being torn down
182 void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
184 struct svc_rdma_send_ctxt *ctxt;
186 while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
187 list_del(&ctxt->sc_list);
188 ib_dma_unmap_single(rdma->sc_pd->device,
189 ctxt->sc_sges[0].addr,
190 rdma->sc_max_req_size,
192 kfree(ctxt->sc_xprt_buf);
198 * svc_rdma_send_ctxt_get - Get a free send_ctxt
199 * @rdma: controlling svcxprt_rdma
201 * Returns a ready-to-use send_ctxt, or NULL if none are
202 * available and a fresh one cannot be allocated.
204 struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
206 struct svc_rdma_send_ctxt *ctxt;
208 spin_lock(&rdma->sc_send_lock);
209 ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
212 list_del(&ctxt->sc_list);
213 spin_unlock(&rdma->sc_send_lock);
216 rpcrdma_set_xdrlen(&ctxt->sc_hdrbuf, 0);
217 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf,
218 ctxt->sc_xprt_buf, NULL);
220 ctxt->sc_send_wr.num_sge = 0;
221 ctxt->sc_cur_sge_no = 0;
225 spin_unlock(&rdma->sc_send_lock);
226 ctxt = svc_rdma_send_ctxt_alloc(rdma);
233 * svc_rdma_send_ctxt_put - Return send_ctxt to free list
234 * @rdma: controlling svcxprt_rdma
235 * @ctxt: object to return to the free list
237 void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
238 struct svc_rdma_send_ctxt *ctxt)
240 struct ib_device *device = rdma->sc_cm_id->device;
243 /* The first SGE contains the transport header, which
244 * remains mapped until @ctxt is destroyed.
246 for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
247 ib_dma_unmap_page(device,
248 ctxt->sc_sges[i].addr,
249 ctxt->sc_sges[i].length,
251 trace_svcrdma_dma_unmap_page(rdma,
252 ctxt->sc_sges[i].addr,
253 ctxt->sc_sges[i].length);
256 spin_lock(&rdma->sc_send_lock);
257 list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
258 spin_unlock(&rdma->sc_send_lock);
262 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
263 * @cq: Completion Queue context
264 * @wc: Work Completion object
266 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
267 * the Send completion handler could be running.
269 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
271 struct svcxprt_rdma *rdma = cq->cq_context;
272 struct ib_cqe *cqe = wc->wr_cqe;
273 struct svc_rdma_send_ctxt *ctxt =
274 container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
276 trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
278 complete(&ctxt->sc_done);
280 atomic_inc(&rdma->sc_sq_avail);
281 wake_up(&rdma->sc_send_wait);
283 if (unlikely(wc->status != IB_WC_SUCCESS))
284 svc_xprt_deferred_close(&rdma->sc_xprt);
288 * svc_rdma_send - Post a single Send WR
289 * @rdma: transport on which to post the WR
290 * @ctxt: send ctxt with a Send WR ready to post
292 * Returns zero if the Send WR was posted successfully. Otherwise, a
293 * negative errno is returned.
295 int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
297 struct ib_send_wr *wr = &ctxt->sc_send_wr;
300 reinit_completion(&ctxt->sc_done);
302 /* Sync the transport header buffer */
303 ib_dma_sync_single_for_device(rdma->sc_pd->device,
305 wr->sg_list[0].length,
308 /* If the SQ is full, wait until an SQ entry is available */
310 if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
311 percpu_counter_inc(&svcrdma_stat_sq_starve);
312 trace_svcrdma_sq_full(rdma);
313 atomic_inc(&rdma->sc_sq_avail);
314 wait_event(rdma->sc_send_wait,
315 atomic_read(&rdma->sc_sq_avail) > 1);
316 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
318 trace_svcrdma_sq_retry(rdma);
322 trace_svcrdma_post_send(ctxt);
323 ret = ib_post_send(rdma->sc_qp, wr, NULL);
329 trace_svcrdma_sq_post_err(rdma, ret);
330 svc_xprt_deferred_close(&rdma->sc_xprt);
331 wake_up(&rdma->sc_send_wait);
336 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list
337 * @sctxt: Send context for the RPC Reply
340 * On success, returns length in bytes of the Reply XDR buffer
341 * that was consumed by the Reply Read list
342 * %-EMSGSIZE on XDR buffer overflow
344 static ssize_t svc_rdma_encode_read_list(struct svc_rdma_send_ctxt *sctxt)
346 /* RPC-over-RDMA version 1 replies never have a Read list. */
347 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
351 * svc_rdma_encode_write_segment - Encode one Write segment
352 * @sctxt: Send context for the RPC Reply
353 * @chunk: Write chunk to push
354 * @remaining: remaining bytes of the payload left in the Write chunk
355 * @segno: which segment in the chunk
358 * On success, returns length in bytes of the Reply XDR buffer
359 * that was consumed by the Write segment, and updates @remaining
360 * %-EMSGSIZE on XDR buffer overflow
362 static ssize_t svc_rdma_encode_write_segment(struct svc_rdma_send_ctxt *sctxt,
363 const struct svc_rdma_chunk *chunk,
364 u32 *remaining, unsigned int segno)
366 const struct svc_rdma_segment *segment = &chunk->ch_segments[segno];
367 const size_t len = rpcrdma_segment_maxsz * sizeof(__be32);
371 p = xdr_reserve_space(&sctxt->sc_stream, len);
375 length = min_t(u32, *remaining, segment->rs_length);
376 *remaining -= length;
377 xdr_encode_rdma_segment(p, segment->rs_handle, length,
379 trace_svcrdma_encode_wseg(sctxt, segno, segment->rs_handle, length,
385 * svc_rdma_encode_write_chunk - Encode one Write chunk
386 * @sctxt: Send context for the RPC Reply
387 * @chunk: Write chunk to push
389 * Copy a Write chunk from the Call transport header to the
390 * Reply transport header. Update each segment's length field
391 * to reflect the number of bytes written in that segment.
394 * On success, returns length in bytes of the Reply XDR buffer
395 * that was consumed by the Write chunk
396 * %-EMSGSIZE on XDR buffer overflow
398 static ssize_t svc_rdma_encode_write_chunk(struct svc_rdma_send_ctxt *sctxt,
399 const struct svc_rdma_chunk *chunk)
401 u32 remaining = chunk->ch_payload_length;
406 ret = xdr_stream_encode_item_present(&sctxt->sc_stream);
411 ret = xdr_stream_encode_u32(&sctxt->sc_stream, chunk->ch_segcount);
416 for (segno = 0; segno < chunk->ch_segcount; segno++) {
417 ret = svc_rdma_encode_write_segment(sctxt, chunk, &remaining, segno);
427 * svc_rdma_encode_write_list - Encode RPC Reply's Write chunk list
428 * @rctxt: Reply context with information about the RPC Call
429 * @sctxt: Send context for the RPC Reply
432 * On success, returns length in bytes of the Reply XDR buffer
433 * that was consumed by the Reply's Write list
434 * %-EMSGSIZE on XDR buffer overflow
436 static ssize_t svc_rdma_encode_write_list(struct svc_rdma_recv_ctxt *rctxt,
437 struct svc_rdma_send_ctxt *sctxt)
439 struct svc_rdma_chunk *chunk;
443 pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
444 ret = svc_rdma_encode_write_chunk(sctxt, chunk);
450 /* Terminate the Write list */
451 ret = xdr_stream_encode_item_absent(&sctxt->sc_stream);
459 * svc_rdma_encode_reply_chunk - Encode RPC Reply's Reply chunk
460 * @rctxt: Reply context with information about the RPC Call
461 * @sctxt: Send context for the RPC Reply
462 * @length: size in bytes of the payload in the Reply chunk
465 * On success, returns length in bytes of the Reply XDR buffer
466 * that was consumed by the Reply's Reply chunk
467 * %-EMSGSIZE on XDR buffer overflow
468 * %-E2BIG if the RPC message is larger than the Reply chunk
471 svc_rdma_encode_reply_chunk(struct svc_rdma_recv_ctxt *rctxt,
472 struct svc_rdma_send_ctxt *sctxt,
475 struct svc_rdma_chunk *chunk;
477 if (pcl_is_empty(&rctxt->rc_reply_pcl))
478 return xdr_stream_encode_item_absent(&sctxt->sc_stream);
480 chunk = pcl_first_chunk(&rctxt->rc_reply_pcl);
481 if (length > chunk->ch_length)
484 chunk->ch_payload_length = length;
485 return svc_rdma_encode_write_chunk(sctxt, chunk);
488 struct svc_rdma_map_data {
489 struct svcxprt_rdma *md_rdma;
490 struct svc_rdma_send_ctxt *md_ctxt;
494 * svc_rdma_page_dma_map - DMA map one page
495 * @data: pointer to arguments
496 * @page: struct page to DMA map
497 * @offset: offset into the page
498 * @len: number of bytes to map
501 * %0 if DMA mapping was successful
502 * %-EIO if the page cannot be DMA mapped
504 static int svc_rdma_page_dma_map(void *data, struct page *page,
505 unsigned long offset, unsigned int len)
507 struct svc_rdma_map_data *args = data;
508 struct svcxprt_rdma *rdma = args->md_rdma;
509 struct svc_rdma_send_ctxt *ctxt = args->md_ctxt;
510 struct ib_device *dev = rdma->sc_cm_id->device;
513 ++ctxt->sc_cur_sge_no;
515 dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
516 if (ib_dma_mapping_error(dev, dma_addr))
519 trace_svcrdma_dma_map_page(rdma, dma_addr, len);
520 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
521 ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
522 ctxt->sc_send_wr.num_sge++;
526 trace_svcrdma_dma_map_err(rdma, dma_addr, len);
531 * svc_rdma_iov_dma_map - DMA map an iovec
532 * @data: pointer to arguments
533 * @iov: kvec to DMA map
535 * ib_dma_map_page() is used here because svc_rdma_dma_unmap()
536 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
539 * %0 if DMA mapping was successful
540 * %-EIO if the iovec cannot be DMA mapped
542 static int svc_rdma_iov_dma_map(void *data, const struct kvec *iov)
546 return svc_rdma_page_dma_map(data, virt_to_page(iov->iov_base),
547 offset_in_page(iov->iov_base),
552 * svc_rdma_xb_dma_map - DMA map all segments of an xdr_buf
553 * @xdr: xdr_buf containing portion of an RPC message to transmit
554 * @data: pointer to arguments
557 * %0 if DMA mapping was successful
558 * %-EIO if DMA mapping failed
560 * On failure, any DMA mappings that have been already done must be
561 * unmapped by the caller.
563 static int svc_rdma_xb_dma_map(const struct xdr_buf *xdr, void *data)
565 unsigned int len, remaining;
566 unsigned long pageoff;
567 struct page **ppages;
570 ret = svc_rdma_iov_dma_map(data, &xdr->head[0]);
574 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
575 pageoff = offset_in_page(xdr->page_base);
576 remaining = xdr->page_len;
578 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
580 ret = svc_rdma_page_dma_map(data, *ppages++, pageoff, len);
588 ret = svc_rdma_iov_dma_map(data, &xdr->tail[0]);
595 struct svc_rdma_pullup_data {
597 unsigned int pd_length;
598 unsigned int pd_num_sges;
602 * svc_rdma_xb_count_sges - Count how many SGEs will be needed
603 * @xdr: xdr_buf containing portion of an RPC message to transmit
604 * @data: pointer to arguments
607 * Number of SGEs needed to Send the contents of @xdr inline
609 static int svc_rdma_xb_count_sges(const struct xdr_buf *xdr,
612 struct svc_rdma_pullup_data *args = data;
613 unsigned int remaining;
614 unsigned long offset;
616 if (xdr->head[0].iov_len)
619 offset = offset_in_page(xdr->page_base);
620 remaining = xdr->page_len;
623 remaining -= min_t(u32, PAGE_SIZE - offset, remaining);
627 if (xdr->tail[0].iov_len)
630 args->pd_length += xdr->len;
635 * svc_rdma_pull_up_needed - Determine whether to use pull-up
636 * @rdma: controlling transport
637 * @sctxt: send_ctxt for the Send WR
638 * @rctxt: Write and Reply chunks provided by client
639 * @xdr: xdr_buf containing RPC message to transmit
642 * %true if pull-up must be used
645 static bool svc_rdma_pull_up_needed(const struct svcxprt_rdma *rdma,
646 const struct svc_rdma_send_ctxt *sctxt,
647 const struct svc_rdma_recv_ctxt *rctxt,
648 const struct xdr_buf *xdr)
650 /* Resources needed for the transport header */
651 struct svc_rdma_pullup_data args = {
652 .pd_length = sctxt->sc_hdrbuf.len,
657 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
658 svc_rdma_xb_count_sges, &args);
662 if (args.pd_length < RPCRDMA_PULLUP_THRESH)
664 return args.pd_num_sges >= rdma->sc_max_send_sges;
668 * svc_rdma_xb_linearize - Copy region of xdr_buf to flat buffer
669 * @xdr: xdr_buf containing portion of an RPC message to copy
670 * @data: pointer to arguments
675 static int svc_rdma_xb_linearize(const struct xdr_buf *xdr,
678 struct svc_rdma_pullup_data *args = data;
679 unsigned int len, remaining;
680 unsigned long pageoff;
681 struct page **ppages;
683 if (xdr->head[0].iov_len) {
684 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len);
685 args->pd_dest += xdr->head[0].iov_len;
688 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
689 pageoff = offset_in_page(xdr->page_base);
690 remaining = xdr->page_len;
692 len = min_t(u32, PAGE_SIZE - pageoff, remaining);
693 memcpy(args->pd_dest, page_address(*ppages) + pageoff, len);
695 args->pd_dest += len;
700 if (xdr->tail[0].iov_len) {
701 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
702 args->pd_dest += xdr->tail[0].iov_len;
705 args->pd_length += xdr->len;
710 * svc_rdma_pull_up_reply_msg - Copy Reply into a single buffer
711 * @rdma: controlling transport
712 * @sctxt: send_ctxt for the Send WR; xprt hdr is already prepared
713 * @rctxt: Write and Reply chunks provided by client
714 * @xdr: prepared xdr_buf containing RPC message
716 * The device is not capable of sending the reply directly.
717 * Assemble the elements of @xdr into the transport header buffer.
720 * pull_up_needed has determined that @xdr will fit in the buffer.
723 * %0 if pull-up was successful
724 * %-EMSGSIZE if a buffer manipulation problem occurred
726 static int svc_rdma_pull_up_reply_msg(const struct svcxprt_rdma *rdma,
727 struct svc_rdma_send_ctxt *sctxt,
728 const struct svc_rdma_recv_ctxt *rctxt,
729 const struct xdr_buf *xdr)
731 struct svc_rdma_pullup_data args = {
732 .pd_dest = sctxt->sc_xprt_buf + sctxt->sc_hdrbuf.len,
736 ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
737 svc_rdma_xb_linearize, &args);
741 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len + args.pd_length;
742 trace_svcrdma_send_pullup(sctxt, args.pd_length);
746 /* svc_rdma_map_reply_msg - DMA map the buffer holding RPC message
747 * @rdma: controlling transport
748 * @sctxt: send_ctxt for the Send WR
749 * @rctxt: Write and Reply chunks provided by client
750 * @xdr: prepared xdr_buf containing RPC message
753 * %0 if DMA mapping was successful.
754 * %-EMSGSIZE if a buffer manipulation problem occurred
755 * %-EIO if DMA mapping failed
757 * The Send WR's num_sge field is set in all cases.
759 int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
760 struct svc_rdma_send_ctxt *sctxt,
761 const struct svc_rdma_recv_ctxt *rctxt,
762 const struct xdr_buf *xdr)
764 struct svc_rdma_map_data args = {
769 /* Set up the (persistently-mapped) transport header SGE. */
770 sctxt->sc_send_wr.num_sge = 1;
771 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
773 /* If there is a Reply chunk, nothing follows the transport
774 * header, and we're done here.
776 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
779 /* For pull-up, svc_rdma_send() will sync the transport header.
780 * No additional DMA mapping is necessary.
782 if (svc_rdma_pull_up_needed(rdma, sctxt, rctxt, xdr))
783 return svc_rdma_pull_up_reply_msg(rdma, sctxt, rctxt, xdr);
785 return pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr,
786 svc_rdma_xb_dma_map, &args);
789 /* Prepare the portion of the RPC Reply that will be transmitted
790 * via RDMA Send. The RPC-over-RDMA transport header is prepared
791 * in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
793 * Depending on whether a Write list or Reply chunk is present,
794 * the server may send all, a portion of, or none of the xdr_buf.
795 * In the latter case, only the transport header (sc_sges[0]) is
798 * RDMA Send is the last step of transmitting an RPC reply. Pages
799 * involved in the earlier RDMA Writes are here transferred out
800 * of the rqstp and into the sctxt's page array. These pages are
801 * DMA unmapped by each Write completion, but the subsequent Send
802 * completion finally releases these pages.
805 * - The Reply's transport header will never be larger than a page.
807 static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
808 struct svc_rdma_send_ctxt *sctxt,
809 const struct svc_rdma_recv_ctxt *rctxt,
810 struct svc_rqst *rqstp)
814 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqstp->rq_res);
818 if (rctxt->rc_inv_rkey) {
819 sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
820 sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
822 sctxt->sc_send_wr.opcode = IB_WR_SEND;
825 ret = svc_rdma_send(rdma, sctxt);
829 ret = wait_for_completion_killable(&sctxt->sc_done);
830 svc_rdma_send_ctxt_put(rdma, sctxt);
835 * svc_rdma_send_error_msg - Send an RPC/RDMA v1 error response
836 * @rdma: controlling transport context
837 * @sctxt: Send context for the response
838 * @rctxt: Receive context for incoming bad message
839 * @status: negative errno indicating error that occurred
841 * Given the client-provided Read, Write, and Reply chunks, the
842 * server was not able to parse the Call or form a complete Reply.
843 * Return an RDMA_ERROR message so the client can retire the RPC
846 * The caller does not have to release @sctxt. It is released by
847 * Send completion, or by this function on error.
849 void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
850 struct svc_rdma_send_ctxt *sctxt,
851 struct svc_rdma_recv_ctxt *rctxt,
854 __be32 *rdma_argp = rctxt->rc_recv_buf;
857 rpcrdma_set_xdrlen(&sctxt->sc_hdrbuf, 0);
858 xdr_init_encode(&sctxt->sc_stream, &sctxt->sc_hdrbuf,
859 sctxt->sc_xprt_buf, NULL);
861 p = xdr_reserve_space(&sctxt->sc_stream,
862 rpcrdma_fixed_maxsz * sizeof(*p));
867 *p++ = *(rdma_argp + 1);
868 *p++ = rdma->sc_fc_credits;
872 case -EPROTONOSUPPORT:
873 p = xdr_reserve_space(&sctxt->sc_stream, 3 * sizeof(*p));
878 *p++ = rpcrdma_version;
879 *p = rpcrdma_version;
880 trace_svcrdma_err_vers(*rdma_argp);
883 p = xdr_reserve_space(&sctxt->sc_stream, sizeof(*p));
888 trace_svcrdma_err_chunk(*rdma_argp);
891 /* Remote Invalidation is skipped for simplicity. */
892 sctxt->sc_send_wr.num_sge = 1;
893 sctxt->sc_send_wr.opcode = IB_WR_SEND;
894 sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len;
895 if (svc_rdma_send(rdma, sctxt))
898 wait_for_completion_killable(&sctxt->sc_done);
901 svc_rdma_send_ctxt_put(rdma, sctxt);
905 * svc_rdma_sendto - Transmit an RPC reply
906 * @rqstp: processed RPC request, reply XDR already in ::rq_res
908 * Any resources still associated with @rqstp are released upon return.
909 * If no reply message was possible, the connection is closed.
912 * %0 if an RPC reply has been successfully posted,
913 * %-ENOMEM if a resource shortage occurred (connection is lost),
914 * %-ENOTCONN if posting failed (connection is lost).
916 int svc_rdma_sendto(struct svc_rqst *rqstp)
918 struct svc_xprt *xprt = rqstp->rq_xprt;
919 struct svcxprt_rdma *rdma =
920 container_of(xprt, struct svcxprt_rdma, sc_xprt);
921 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
922 __be32 *rdma_argp = rctxt->rc_recv_buf;
923 struct svc_rdma_send_ctxt *sctxt;
928 if (svc_xprt_is_dead(xprt))
932 sctxt = svc_rdma_send_ctxt_get(rdma);
936 p = xdr_reserve_space(&sctxt->sc_stream,
937 rpcrdma_fixed_maxsz * sizeof(*p));
941 ret = svc_rdma_send_reply_chunk(rdma, rctxt, &rqstp->rq_res);
946 *p++ = *(rdma_argp + 1);
947 *p++ = rdma->sc_fc_credits;
948 *p = pcl_is_empty(&rctxt->rc_reply_pcl) ? rdma_msg : rdma_nomsg;
950 if (svc_rdma_encode_read_list(sctxt) < 0)
952 if (svc_rdma_encode_write_list(rctxt, sctxt) < 0)
954 if (svc_rdma_encode_reply_chunk(rctxt, sctxt, ret) < 0)
957 ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp);
963 if (ret != -E2BIG && ret != -EINVAL)
966 svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret);
970 svc_rdma_send_ctxt_put(rdma, sctxt);
972 trace_svcrdma_send_err(rqstp, ret);
973 svc_xprt_deferred_close(&rdma->sc_xprt);
978 * svc_rdma_result_payload - special processing for a result payload
979 * @rqstp: svc_rqst to operate on
980 * @offset: payload's byte offset in @xdr
981 * @length: size of payload, in bytes
984 * %0 if successful or nothing needed to be done
985 * %-EMSGSIZE on XDR buffer overflow
986 * %-E2BIG if the payload was larger than the Write chunk
987 * %-EINVAL if client provided too many segments
988 * %-ENOMEM if rdma_rw context pool was exhausted
989 * %-ENOTCONN if posting failed (connection is lost)
990 * %-EIO if rdma_rw initialization failed (DMA mapping, etc)
992 int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
995 struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
996 struct svc_rdma_chunk *chunk;
997 struct svcxprt_rdma *rdma;
998 struct xdr_buf subbuf;
1001 chunk = rctxt->rc_cur_result_payload;
1002 if (!length || !chunk)
1004 rctxt->rc_cur_result_payload =
1005 pcl_next_chunk(&rctxt->rc_write_pcl, chunk);
1006 if (length > chunk->ch_length)
1009 chunk->ch_position = offset;
1010 chunk->ch_payload_length = length;
1012 if (xdr_buf_subsegment(&rqstp->rq_res, &subbuf, offset, length))
1015 rdma = container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
1016 ret = svc_rdma_send_write_chunk(rdma, chunk, &subbuf);