1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93 * (see rdma_read_complete() below).
96 #include <linux/slab.h>
97 #include <linux/spinlock.h>
98 #include <asm/unaligned.h>
99 #include <rdma/ib_verbs.h>
100 #include <rdma/rdma_cm.h>
102 #include <linux/sunrpc/xdr.h>
103 #include <linux/sunrpc/debug.h>
104 #include <linux/sunrpc/rpc_rdma.h>
105 #include <linux/sunrpc/svc_rdma.h>
107 #include "xprt_rdma.h"
108 #include <trace/events/rpcrdma.h>
110 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
112 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
114 static inline struct svc_rdma_recv_ctxt *
115 svc_rdma_next_recv_ctxt(struct list_head *list)
117 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
121 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
122 struct rpc_rdma_cid *cid)
124 cid->ci_queue_id = rdma->sc_rq_cq->res.id;
125 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
128 static struct svc_rdma_recv_ctxt *
129 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
131 struct svc_rdma_recv_ctxt *ctxt;
135 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
138 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
141 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
142 rdma->sc_max_req_size, DMA_FROM_DEVICE);
143 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
146 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
147 pcl_init(&ctxt->rc_call_pcl);
148 pcl_init(&ctxt->rc_read_pcl);
149 pcl_init(&ctxt->rc_write_pcl);
150 pcl_init(&ctxt->rc_reply_pcl);
152 ctxt->rc_recv_wr.next = NULL;
153 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
154 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
155 ctxt->rc_recv_wr.num_sge = 1;
156 ctxt->rc_cqe.done = svc_rdma_wc_receive;
157 ctxt->rc_recv_sge.addr = addr;
158 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
159 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
160 ctxt->rc_recv_buf = buffer;
161 ctxt->rc_temp = false;
172 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
173 struct svc_rdma_recv_ctxt *ctxt)
175 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
176 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
177 kfree(ctxt->rc_recv_buf);
182 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
183 * @rdma: svcxprt_rdma being torn down
186 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
188 struct svc_rdma_recv_ctxt *ctxt;
189 struct llist_node *node;
191 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
192 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
193 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
198 * svc_rdma_recv_ctxt_get - Allocate a recv_ctxt
199 * @rdma: controlling svcxprt_rdma
201 * Returns a recv_ctxt or (rarely) NULL if none are available.
203 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
205 struct svc_rdma_recv_ctxt *ctxt;
206 struct llist_node *node;
208 node = llist_del_first(&rdma->sc_recv_ctxts);
211 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
214 ctxt->rc_page_count = 0;
218 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
225 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
226 * @rdma: controlling svcxprt_rdma
227 * @ctxt: object to return to the free list
230 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
231 struct svc_rdma_recv_ctxt *ctxt)
235 for (i = 0; i < ctxt->rc_page_count; i++)
236 put_page(ctxt->rc_pages[i]);
238 pcl_free(&ctxt->rc_call_pcl);
239 pcl_free(&ctxt->rc_read_pcl);
240 pcl_free(&ctxt->rc_write_pcl);
241 pcl_free(&ctxt->rc_reply_pcl);
244 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
246 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
250 * svc_rdma_release_rqst - Release transport-specific per-rqst resources
251 * @rqstp: svc_rqst being released
253 * Ensure that the recv_ctxt is released whether or not a Reply
254 * was sent. For example, the client could close the connection,
255 * or svc_process could drop an RPC, before the Reply is sent.
257 void svc_rdma_release_rqst(struct svc_rqst *rqstp)
259 struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
260 struct svc_xprt *xprt = rqstp->rq_xprt;
261 struct svcxprt_rdma *rdma =
262 container_of(xprt, struct svcxprt_rdma, sc_xprt);
264 rqstp->rq_xprt_ctxt = NULL;
266 svc_rdma_recv_ctxt_put(rdma, ctxt);
269 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
270 unsigned int wanted, bool temp)
272 const struct ib_recv_wr *bad_wr = NULL;
273 struct svc_rdma_recv_ctxt *ctxt;
274 struct ib_recv_wr *recv_chain;
279 ctxt = svc_rdma_recv_ctxt_get(rdma);
283 trace_svcrdma_post_recv(ctxt);
284 ctxt->rc_temp = temp;
285 ctxt->rc_recv_wr.next = recv_chain;
286 recv_chain = &ctxt->rc_recv_wr;
287 rdma->sc_pending_recvs++;
292 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
299 ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
301 bad_wr = bad_wr->next;
302 svc_rdma_recv_ctxt_put(rdma, ctxt);
305 trace_svcrdma_rq_post_err(rdma, ret);
306 /* Since we're destroying the xprt, no need to reset
307 * sc_pending_recvs. */
312 * svc_rdma_post_recvs - Post initial set of Recv WRs
313 * @rdma: fresh svcxprt_rdma
315 * Returns true if successful, otherwise false.
317 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
319 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
323 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
324 * @cq: Completion Queue context
325 * @wc: Work Completion object
328 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
330 struct svcxprt_rdma *rdma = cq->cq_context;
331 struct ib_cqe *cqe = wc->wr_cqe;
332 struct svc_rdma_recv_ctxt *ctxt;
334 rdma->sc_pending_recvs--;
336 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
337 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
339 trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
340 if (wc->status != IB_WC_SUCCESS)
343 /* All wc fields are now known to be valid */
344 ctxt->rc_byte_len = wc->byte_len;
346 spin_lock(&rdma->sc_rq_dto_lock);
347 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
348 /* Note the unlock pairs with the smp_rmb in svc_xprt_ready: */
349 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
350 spin_unlock(&rdma->sc_rq_dto_lock);
351 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
352 svc_xprt_enqueue(&rdma->sc_xprt);
354 if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) &&
355 rdma->sc_pending_recvs < rdma->sc_max_requests)
356 if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH,
363 svc_rdma_recv_ctxt_put(rdma, ctxt);
365 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
366 svc_xprt_enqueue(&rdma->sc_xprt);
370 * svc_rdma_flush_recv_queues - Drain pending Receive work
371 * @rdma: svcxprt_rdma being shut down
374 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
376 struct svc_rdma_recv_ctxt *ctxt;
378 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
379 list_del(&ctxt->rc_list);
380 svc_rdma_recv_ctxt_put(rdma, ctxt);
382 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
383 list_del(&ctxt->rc_list);
384 svc_rdma_recv_ctxt_put(rdma, ctxt);
388 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
389 struct svc_rdma_recv_ctxt *ctxt)
391 struct xdr_buf *arg = &rqstp->rq_arg;
393 arg->head[0].iov_base = ctxt->rc_recv_buf;
394 arg->head[0].iov_len = ctxt->rc_byte_len;
395 arg->tail[0].iov_base = NULL;
396 arg->tail[0].iov_len = 0;
399 arg->buflen = ctxt->rc_byte_len;
400 arg->len = ctxt->rc_byte_len;
404 * xdr_count_read_segments - Count number of Read segments in Read list
405 * @rctxt: Ingress receive context
406 * @p: Start of an un-decoded Read list
408 * Before allocating anything, ensure the ingress Read list is safe
411 * The segment count is limited to how many segments can fit in the
412 * transport header without overflowing the buffer. That's about 40
413 * Read segments for a 1KB inline threshold.
416 * %true: Read list is valid. @rctxt's xdr_stream is updated to point
417 * to the first byte past the Read list. rc_read_pcl and
418 * rc_call_pcl cl_count fields are set to the number of
419 * Read segments in the list.
420 * %false: Read list is corrupt. @rctxt's xdr_stream is left in an
423 static bool xdr_count_read_segments(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
425 rctxt->rc_call_pcl.cl_count = 0;
426 rctxt->rc_read_pcl.cl_count = 0;
427 while (xdr_item_is_present(p)) {
428 u32 position, handle, length;
431 p = xdr_inline_decode(&rctxt->rc_stream,
432 rpcrdma_readseg_maxsz * sizeof(*p));
436 xdr_decode_read_segment(p, &position, &handle,
441 ++rctxt->rc_read_pcl.cl_count;
443 ++rctxt->rc_call_pcl.cl_count;
446 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
453 /* Sanity check the Read list.
456 * - Read list does not overflow Receive buffer.
457 * - Chunk size limited by largest NFS data payload.
460 * %true: Read list is valid. @rctxt's xdr_stream is updated
461 * to point to the first byte past the Read list.
462 * %false: Read list is corrupt. @rctxt's xdr_stream is left
463 * in an unknown state.
465 static bool xdr_check_read_list(struct svc_rdma_recv_ctxt *rctxt)
469 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
472 if (!xdr_count_read_segments(rctxt, p))
474 if (!pcl_alloc_call(rctxt, p))
476 return pcl_alloc_read(rctxt, p);
479 static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt)
484 if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount))
487 /* A bogus segcount causes this buffer overflow check to fail. */
488 p = xdr_inline_decode(&rctxt->rc_stream,
489 segcount * rpcrdma_segment_maxsz * sizeof(*p));
494 * xdr_count_write_chunks - Count number of Write chunks in Write list
495 * @rctxt: Received header and decoding state
496 * @p: start of an un-decoded Write list
498 * Before allocating anything, ensure the ingress Write list is
502 * %true: Write list is valid. @rctxt's xdr_stream is updated
503 * to point to the first byte past the Write list, and
504 * the number of Write chunks is in rc_write_pcl.cl_count.
505 * %false: Write list is corrupt. @rctxt's xdr_stream is left
506 * in an indeterminate state.
508 static bool xdr_count_write_chunks(struct svc_rdma_recv_ctxt *rctxt, __be32 *p)
510 rctxt->rc_write_pcl.cl_count = 0;
511 while (xdr_item_is_present(p)) {
512 if (!xdr_check_write_chunk(rctxt))
514 ++rctxt->rc_write_pcl.cl_count;
515 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
522 /* Sanity check the Write list.
524 * Implementation limits:
525 * - This implementation currently supports only one Write chunk.
528 * - Write list does not overflow Receive buffer.
529 * - Chunk size limited by largest NFS data payload.
532 * %true: Write list is valid. @rctxt's xdr_stream is updated
533 * to point to the first byte past the Write list.
534 * %false: Write list is corrupt. @rctxt's xdr_stream is left
535 * in an unknown state.
537 static bool xdr_check_write_list(struct svc_rdma_recv_ctxt *rctxt)
541 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
544 if (!xdr_count_write_chunks(rctxt, p))
546 if (!pcl_alloc_write(rctxt, &rctxt->rc_write_pcl, p))
549 rctxt->rc_cur_result_payload = pcl_first_chunk(&rctxt->rc_write_pcl);
553 /* Sanity check the Reply chunk.
556 * - Reply chunk does not overflow Receive buffer.
557 * - Chunk size limited by largest NFS data payload.
560 * %true: Reply chunk is valid. @rctxt's xdr_stream is updated
561 * to point to the first byte past the Reply chunk.
562 * %false: Reply chunk is corrupt. @rctxt's xdr_stream is left
563 * in an unknown state.
565 static bool xdr_check_reply_chunk(struct svc_rdma_recv_ctxt *rctxt)
569 p = xdr_inline_decode(&rctxt->rc_stream, sizeof(*p));
573 if (!xdr_item_is_present(p))
575 if (!xdr_check_write_chunk(rctxt))
578 rctxt->rc_reply_pcl.cl_count = 1;
579 return pcl_alloc_write(rctxt, &rctxt->rc_reply_pcl, p);
582 /* RPC-over-RDMA Version One private extension: Remote Invalidation.
583 * Responder's choice: requester signals it can handle Send With
584 * Invalidate, and responder chooses one R_key to invalidate.
586 * If there is exactly one distinct R_key in the received transport
587 * header, set rc_inv_rkey to that R_key. Otherwise, set it to zero.
589 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
590 struct svc_rdma_recv_ctxt *ctxt)
592 struct svc_rdma_segment *segment;
593 struct svc_rdma_chunk *chunk;
596 ctxt->rc_inv_rkey = 0;
598 if (!rdma->sc_snd_w_inv)
602 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
603 pcl_for_each_segment(segment, chunk) {
605 inv_rkey = segment->rs_handle;
606 else if (inv_rkey != segment->rs_handle)
610 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
611 pcl_for_each_segment(segment, chunk) {
613 inv_rkey = segment->rs_handle;
614 else if (inv_rkey != segment->rs_handle)
618 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
619 pcl_for_each_segment(segment, chunk) {
621 inv_rkey = segment->rs_handle;
622 else if (inv_rkey != segment->rs_handle)
626 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
627 pcl_for_each_segment(segment, chunk) {
629 inv_rkey = segment->rs_handle;
630 else if (inv_rkey != segment->rs_handle)
634 ctxt->rc_inv_rkey = inv_rkey;
638 * svc_rdma_xdr_decode_req - Decode the transport header
639 * @rq_arg: xdr_buf containing ingress RPC/RDMA message
640 * @rctxt: state of decoding
642 * On entry, xdr->head[0].iov_base points to first byte of the
643 * RPC-over-RDMA transport header.
645 * On successful exit, head[0] points to first byte past the
646 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
648 * The length of the RPC-over-RDMA header is returned.
651 * - The transport header is entirely contained in the head iovec.
653 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg,
654 struct svc_rdma_recv_ctxt *rctxt)
656 __be32 *p, *rdma_argp;
657 unsigned int hdr_len;
659 rdma_argp = rq_arg->head[0].iov_base;
660 xdr_init_decode(&rctxt->rc_stream, rq_arg, rdma_argp, NULL);
662 p = xdr_inline_decode(&rctxt->rc_stream,
663 rpcrdma_fixed_maxsz * sizeof(*p));
667 if (*p != rpcrdma_version)
670 rctxt->rc_msgtype = *p;
671 switch (rctxt->rc_msgtype) {
684 if (!xdr_check_read_list(rctxt))
686 if (!xdr_check_write_list(rctxt))
688 if (!xdr_check_reply_chunk(rctxt))
691 rq_arg->head[0].iov_base = rctxt->rc_stream.p;
692 hdr_len = xdr_stream_pos(&rctxt->rc_stream);
693 rq_arg->head[0].iov_len -= hdr_len;
694 rq_arg->len -= hdr_len;
695 trace_svcrdma_decode_rqst(rctxt, rdma_argp, hdr_len);
699 trace_svcrdma_decode_short_err(rctxt, rq_arg->len);
703 trace_svcrdma_decode_badvers_err(rctxt, rdma_argp);
704 return -EPROTONOSUPPORT;
707 trace_svcrdma_decode_drop_err(rctxt, rdma_argp);
711 trace_svcrdma_decode_badproc_err(rctxt, rdma_argp);
715 trace_svcrdma_decode_parse_err(rctxt, rdma_argp);
719 static void rdma_read_complete(struct svc_rqst *rqstp,
720 struct svc_rdma_recv_ctxt *head)
724 /* Move Read chunk pages to rqstp so that they will be released
725 * when svc_process is done with them.
727 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
728 put_page(rqstp->rq_pages[page_no]);
729 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
731 head->rc_page_count = 0;
733 /* Point rq_arg.pages past header */
734 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
735 rqstp->rq_arg.page_len = head->rc_arg.page_len;
737 /* rq_respages starts after the last arg page */
738 rqstp->rq_respages = &rqstp->rq_pages[page_no];
739 rqstp->rq_next_page = rqstp->rq_respages + 1;
741 /* Rebuild rq_arg head and tail. */
742 rqstp->rq_arg.head[0] = head->rc_arg.head[0];
743 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
744 rqstp->rq_arg.len = head->rc_arg.len;
745 rqstp->rq_arg.buflen = head->rc_arg.buflen;
748 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
749 struct svc_rdma_recv_ctxt *rctxt,
752 struct svc_rdma_send_ctxt *sctxt;
754 sctxt = svc_rdma_send_ctxt_get(rdma);
757 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);
760 /* By convention, backchannel calls arrive via rdma_msg type
761 * messages, and never populate the chunk lists. This makes
762 * the RPC/RDMA header small and fixed in size, so it is
763 * straightforward to check the RPC header's direction field.
765 static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt,
766 struct svc_rdma_recv_ctxt *rctxt)
768 __be32 *p = rctxt->rc_recv_buf;
770 if (!xprt->xpt_bc_xprt)
773 if (rctxt->rc_msgtype != rdma_msg)
776 if (!pcl_is_empty(&rctxt->rc_call_pcl))
778 if (!pcl_is_empty(&rctxt->rc_read_pcl))
780 if (!pcl_is_empty(&rctxt->rc_write_pcl))
782 if (!pcl_is_empty(&rctxt->rc_reply_pcl))
785 /* RPC call direction */
786 if (*(p + 8) == cpu_to_be32(RPC_CALL))
793 * svc_rdma_recvfrom - Receive an RPC call
794 * @rqstp: request structure into which to receive an RPC Call
797 * The positive number of bytes in the RPC Call message,
798 * %0 if there were no Calls ready to return,
799 * %-EINVAL if the Read chunk data is too large,
800 * %-ENOMEM if rdma_rw context pool was exhausted,
801 * %-ENOTCONN if posting failed (connection is lost),
802 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
804 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
805 * when there are no remaining ctxt's to process.
807 * The next ctxt is removed from the "receive" lists.
809 * - If the ctxt completes a Read, then finish assembling the Call
810 * message and return the number of bytes in the message.
812 * - If the ctxt completes a Receive, then construct the Call
813 * message from the contents of the Receive buffer.
815 * - If there are no Read chunks in this message, then finish
816 * assembling the Call message and return the number of bytes
819 * - If there are Read chunks in this message, post Read WRs to
820 * pull that payload and return 0.
822 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
824 struct svc_xprt *xprt = rqstp->rq_xprt;
825 struct svcxprt_rdma *rdma_xprt =
826 container_of(xprt, struct svcxprt_rdma, sc_xprt);
827 struct svc_rdma_recv_ctxt *ctxt;
830 rqstp->rq_xprt_ctxt = NULL;
832 spin_lock(&rdma_xprt->sc_rq_dto_lock);
833 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
835 list_del(&ctxt->rc_list);
836 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
837 rdma_read_complete(rqstp, ctxt);
840 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
842 /* No new incoming requests, terminate the loop */
843 clear_bit(XPT_DATA, &xprt->xpt_flags);
844 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
847 list_del(&ctxt->rc_list);
848 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
849 percpu_counter_inc(&svcrdma_stat_recv);
851 ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
852 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
854 svc_rdma_build_arg_xdr(rqstp, ctxt);
856 /* Prevent svc_xprt_release from releasing pages in rq_pages
857 * if we return 0 or an error.
859 rqstp->rq_respages = rqstp->rq_pages;
860 rqstp->rq_next_page = rqstp->rq_respages;
862 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
867 rqstp->rq_xprt_hlen = ret;
869 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
870 goto out_backchannel;
872 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
874 if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
875 !pcl_is_empty(&ctxt->rc_call_pcl))
879 rqstp->rq_xprt_ctxt = ctxt;
880 rqstp->rq_prot = IPPROTO_MAX;
881 svc_xprt_copy_addrs(rqstp, xprt);
882 return rqstp->rq_arg.len;
885 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
891 svc_rdma_send_error(rdma_xprt, ctxt, ret);
892 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
897 svc_rdma_send_error(rdma_xprt, ctxt, ret);
898 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
902 svc_rdma_handle_bc_reply(rqstp, ctxt);
904 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);