1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
11 #include "rxe_queue.h"
27 RESPST_DUPLICATE_REQUEST,
28 RESPST_ERR_MALFORMED_WQE,
29 RESPST_ERR_UNSUPPORTED_OPCODE,
30 RESPST_ERR_MISALIGNED_ATOMIC,
31 RESPST_ERR_PSN_OUT_OF_SEQ,
32 RESPST_ERR_MISSING_OPCODE_FIRST,
33 RESPST_ERR_MISSING_OPCODE_LAST_C,
34 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
37 RESPST_ERR_RKEY_VIOLATION,
38 RESPST_ERR_INVALIDATE_RKEY,
40 RESPST_ERR_CQ_OVERFLOW,
47 static char *resp_state_name[] = {
48 [RESPST_NONE] = "NONE",
49 [RESPST_GET_REQ] = "GET_REQ",
50 [RESPST_CHK_PSN] = "CHK_PSN",
51 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
52 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
53 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
54 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
55 [RESPST_CHK_RKEY] = "CHK_RKEY",
56 [RESPST_EXECUTE] = "EXECUTE",
57 [RESPST_READ_REPLY] = "READ_REPLY",
58 [RESPST_COMPLETE] = "COMPLETE",
59 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
60 [RESPST_CLEANUP] = "CLEANUP",
61 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
62 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
63 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
64 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
65 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
66 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
67 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
68 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
69 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
70 [RESPST_ERR_RNR] = "ERR_RNR",
71 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
72 [RESPST_ERR_INVALIDATE_RKEY] = "ERR_INVALIDATE_RKEY_VIOLATION",
73 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
74 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
75 [RESPST_ERROR] = "ERROR",
76 [RESPST_RESET] = "RESET",
77 [RESPST_DONE] = "DONE",
78 [RESPST_EXIT] = "EXIT",
81 /* rxe_recv calls here to add a request packet to the input queue */
82 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
85 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
87 skb_queue_tail(&qp->req_pkts, skb);
89 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
90 (skb_queue_len(&qp->req_pkts) > 1);
92 rxe_run_task(&qp->resp.task, must_sched);
95 static inline enum resp_states get_req(struct rxe_qp *qp,
96 struct rxe_pkt_info **pkt_p)
100 if (qp->resp.state == QP_STATE_ERROR) {
101 while ((skb = skb_dequeue(&qp->req_pkts))) {
104 ib_device_put(qp->ibqp.device);
107 /* go drain recv wr queue */
108 return RESPST_CHK_RESOURCE;
111 skb = skb_peek(&qp->req_pkts);
115 *pkt_p = SKB_TO_PKT(skb);
117 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
120 static enum resp_states check_psn(struct rxe_qp *qp,
121 struct rxe_pkt_info *pkt)
123 int diff = psn_compare(pkt->psn, qp->resp.psn);
124 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
126 switch (qp_type(qp)) {
129 if (qp->resp.sent_psn_nak)
130 return RESPST_CLEANUP;
132 qp->resp.sent_psn_nak = 1;
133 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
134 return RESPST_ERR_PSN_OUT_OF_SEQ;
136 } else if (diff < 0) {
137 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
138 return RESPST_DUPLICATE_REQUEST;
141 if (qp->resp.sent_psn_nak)
142 qp->resp.sent_psn_nak = 0;
147 if (qp->resp.drop_msg || diff != 0) {
148 if (pkt->mask & RXE_START_MASK) {
149 qp->resp.drop_msg = 0;
150 return RESPST_CHK_OP_SEQ;
153 qp->resp.drop_msg = 1;
154 return RESPST_CLEANUP;
161 return RESPST_CHK_OP_SEQ;
164 static enum resp_states check_op_seq(struct rxe_qp *qp,
165 struct rxe_pkt_info *pkt)
167 switch (qp_type(qp)) {
169 switch (qp->resp.opcode) {
170 case IB_OPCODE_RC_SEND_FIRST:
171 case IB_OPCODE_RC_SEND_MIDDLE:
172 switch (pkt->opcode) {
173 case IB_OPCODE_RC_SEND_MIDDLE:
174 case IB_OPCODE_RC_SEND_LAST:
175 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
176 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
177 return RESPST_CHK_OP_VALID;
179 return RESPST_ERR_MISSING_OPCODE_LAST_C;
182 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
183 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
184 switch (pkt->opcode) {
185 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
186 case IB_OPCODE_RC_RDMA_WRITE_LAST:
187 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
188 return RESPST_CHK_OP_VALID;
190 return RESPST_ERR_MISSING_OPCODE_LAST_C;
194 switch (pkt->opcode) {
195 case IB_OPCODE_RC_SEND_MIDDLE:
196 case IB_OPCODE_RC_SEND_LAST:
197 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
198 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
199 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
200 case IB_OPCODE_RC_RDMA_WRITE_LAST:
201 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
202 return RESPST_ERR_MISSING_OPCODE_FIRST;
204 return RESPST_CHK_OP_VALID;
210 switch (qp->resp.opcode) {
211 case IB_OPCODE_UC_SEND_FIRST:
212 case IB_OPCODE_UC_SEND_MIDDLE:
213 switch (pkt->opcode) {
214 case IB_OPCODE_UC_SEND_MIDDLE:
215 case IB_OPCODE_UC_SEND_LAST:
216 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
217 return RESPST_CHK_OP_VALID;
219 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
222 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
223 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
224 switch (pkt->opcode) {
225 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
226 case IB_OPCODE_UC_RDMA_WRITE_LAST:
227 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
228 return RESPST_CHK_OP_VALID;
230 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
234 switch (pkt->opcode) {
235 case IB_OPCODE_UC_SEND_MIDDLE:
236 case IB_OPCODE_UC_SEND_LAST:
237 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
238 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
239 case IB_OPCODE_UC_RDMA_WRITE_LAST:
240 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
241 qp->resp.drop_msg = 1;
242 return RESPST_CLEANUP;
244 return RESPST_CHK_OP_VALID;
250 return RESPST_CHK_OP_VALID;
254 static enum resp_states check_op_valid(struct rxe_qp *qp,
255 struct rxe_pkt_info *pkt)
257 switch (qp_type(qp)) {
259 if (((pkt->mask & RXE_READ_MASK) &&
260 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
261 ((pkt->mask & RXE_WRITE_MASK) &&
262 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
263 ((pkt->mask & RXE_ATOMIC_MASK) &&
264 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
265 return RESPST_ERR_UNSUPPORTED_OPCODE;
271 if ((pkt->mask & RXE_WRITE_MASK) &&
272 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
273 qp->resp.drop_msg = 1;
274 return RESPST_CLEANUP;
289 return RESPST_CHK_RESOURCE;
292 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
294 struct rxe_srq *srq = qp->srq;
295 struct rxe_queue *q = srq->rq.queue;
296 struct rxe_recv_wqe *wqe;
303 return RESPST_ERR_RNR;
305 spin_lock_irqsave(&srq->rq.consumer_lock, flags);
307 wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
309 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
310 return RESPST_ERR_RNR;
313 /* don't trust user space data */
314 if (unlikely(wqe->dma.num_sge > srq->rq.max_sge)) {
315 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
316 pr_warn("%s: invalid num_sge in SRQ entry\n", __func__);
317 return RESPST_ERR_MALFORMED_WQE;
319 size = sizeof(*wqe) + wqe->dma.num_sge*sizeof(struct rxe_sge);
320 memcpy(&qp->resp.srq_wqe, wqe, size);
322 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
323 queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
324 count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
326 if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
331 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
332 return RESPST_CHK_LENGTH;
335 spin_unlock_irqrestore(&srq->rq.consumer_lock, flags);
336 ev.device = qp->ibqp.device;
337 ev.element.srq = qp->ibqp.srq;
338 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
339 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
340 return RESPST_CHK_LENGTH;
343 static enum resp_states check_resource(struct rxe_qp *qp,
344 struct rxe_pkt_info *pkt)
346 struct rxe_srq *srq = qp->srq;
348 if (qp->resp.state == QP_STATE_ERROR) {
350 qp->resp.status = IB_WC_WR_FLUSH_ERR;
351 return RESPST_COMPLETE;
353 qp->resp.wqe = queue_head(qp->rq.queue,
354 QUEUE_TYPE_FROM_CLIENT);
356 qp->resp.status = IB_WC_WR_FLUSH_ERR;
357 return RESPST_COMPLETE;
366 if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
367 /* it is the requesters job to not send
368 * too many read/atomic ops, we just
369 * recycle the responder resource queue
371 if (likely(qp->attr.max_dest_rd_atomic > 0))
372 return RESPST_CHK_LENGTH;
374 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
377 if (pkt->mask & RXE_RWR_MASK) {
379 return get_srq_wqe(qp);
381 qp->resp.wqe = queue_head(qp->rq.queue,
382 QUEUE_TYPE_FROM_CLIENT);
383 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
386 return RESPST_CHK_LENGTH;
389 static enum resp_states check_length(struct rxe_qp *qp,
390 struct rxe_pkt_info *pkt)
392 switch (qp_type(qp)) {
394 return RESPST_CHK_RKEY;
397 return RESPST_CHK_RKEY;
400 return RESPST_CHK_RKEY;
404 static enum resp_states check_rkey(struct rxe_qp *qp,
405 struct rxe_pkt_info *pkt)
407 struct rxe_mr *mr = NULL;
408 struct rxe_mw *mw = NULL;
414 enum resp_states state;
417 if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
418 if (pkt->mask & RXE_RETH_MASK) {
419 qp->resp.va = reth_va(pkt);
421 qp->resp.rkey = reth_rkey(pkt);
422 qp->resp.resid = reth_len(pkt);
423 qp->resp.length = reth_len(pkt);
425 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
426 : IB_ACCESS_REMOTE_WRITE;
427 } else if (pkt->mask & RXE_ATOMIC_MASK) {
428 qp->resp.va = atmeth_va(pkt);
430 qp->resp.rkey = atmeth_rkey(pkt);
431 qp->resp.resid = sizeof(u64);
432 access = IB_ACCESS_REMOTE_ATOMIC;
434 return RESPST_EXECUTE;
437 /* A zero-byte op is not required to set an addr or rkey. */
438 if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
439 (pkt->mask & RXE_RETH_MASK) &&
440 reth_len(pkt) == 0) {
441 return RESPST_EXECUTE;
445 rkey = qp->resp.rkey;
446 resid = qp->resp.resid;
447 pktlen = payload_size(pkt);
449 if (rkey_is_mw(rkey)) {
450 mw = rxe_lookup_mw(qp, access, rkey);
452 pr_err("%s: no MW matches rkey %#x\n", __func__, rkey);
453 state = RESPST_ERR_RKEY_VIOLATION;
459 pr_err("%s: MW doesn't have an MR\n", __func__);
460 state = RESPST_ERR_RKEY_VIOLATION;
464 if (mw->access & IB_ZERO_BASED)
465 qp->resp.offset = mw->addr;
470 mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
472 pr_err("%s: no MR matches rkey %#x\n", __func__, rkey);
473 state = RESPST_ERR_RKEY_VIOLATION;
478 if (mr_check_range(mr, va + qp->resp.offset, resid)) {
479 state = RESPST_ERR_RKEY_VIOLATION;
483 if (pkt->mask & RXE_WRITE_MASK) {
485 if (pktlen != mtu || bth_pad(pkt)) {
486 state = RESPST_ERR_LENGTH;
490 if (pktlen != resid) {
491 state = RESPST_ERR_LENGTH;
494 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
495 /* This case may not be exactly that
496 * but nothing else fits.
498 state = RESPST_ERR_LENGTH;
504 WARN_ON_ONCE(qp->resp.mr);
507 return RESPST_EXECUTE;
518 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
523 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
524 data_addr, data_len, RXE_TO_MR_OBJ);
526 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
527 : RESPST_ERR_MALFORMED_WQE;
532 static enum resp_states write_data_in(struct rxe_qp *qp,
533 struct rxe_pkt_info *pkt)
535 enum resp_states rc = RESPST_NONE;
537 int data_len = payload_size(pkt);
539 err = rxe_mr_copy(qp->resp.mr, qp->resp.va + qp->resp.offset,
540 payload_addr(pkt), data_len, RXE_TO_MR_OBJ);
542 rc = RESPST_ERR_RKEY_VIOLATION;
546 qp->resp.va += data_len;
547 qp->resp.resid -= data_len;
553 /* Guarantee atomicity of atomic operations at the machine level. */
554 static DEFINE_SPINLOCK(atomic_ops_lock);
556 static enum resp_states process_atomic(struct rxe_qp *qp,
557 struct rxe_pkt_info *pkt)
560 enum resp_states ret;
561 struct rxe_mr *mr = qp->resp.mr;
563 if (mr->state != RXE_MR_STATE_VALID) {
564 ret = RESPST_ERR_RKEY_VIOLATION;
568 vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, sizeof(u64));
570 /* check vaddr is 8 bytes aligned. */
571 if (!vaddr || (uintptr_t)vaddr & 7) {
572 ret = RESPST_ERR_MISALIGNED_ATOMIC;
576 spin_lock_bh(&atomic_ops_lock);
578 qp->resp.atomic_orig = *vaddr;
580 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
581 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
582 if (*vaddr == atmeth_comp(pkt))
583 *vaddr = atmeth_swap_add(pkt);
585 *vaddr += atmeth_swap_add(pkt);
588 spin_unlock_bh(&atomic_ops_lock);
595 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
596 struct rxe_pkt_info *pkt,
597 struct rxe_pkt_info *ack,
603 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
612 pad = (-payload) & 0x3;
613 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
615 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
620 ack->opcode = opcode;
621 ack->mask = rxe_opcode[opcode].mask;
622 ack->paylen = paylen;
625 bth_init(ack, opcode, 0, 0, pad, IB_DEFAULT_PKEY_FULL,
626 qp->attr.dest_qp_num, 0, psn);
628 if (ack->mask & RXE_AETH_MASK) {
629 aeth_set_syn(ack, syndrome);
630 aeth_set_msn(ack, qp->resp.msn);
633 if (ack->mask & RXE_ATMACK_MASK)
634 atmack_set_orig(ack, qp->resp.atomic_orig);
636 err = rxe_prepare(&qp->pri_av, ack, skb);
645 static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
646 struct rxe_pkt_info *pkt)
648 struct resp_res *res;
651 res = &qp->resp.resources[qp->resp.res_head];
652 rxe_advance_resp_resource(qp);
653 free_rd_atomic_resource(qp, res);
655 res->type = RXE_READ_MASK;
657 res->read.va = qp->resp.va + qp->resp.offset;
658 res->read.va_org = qp->resp.va + qp->resp.offset;
659 res->read.resid = qp->resp.resid;
660 res->read.length = qp->resp.resid;
661 res->read.rkey = qp->resp.rkey;
663 pkts = max_t(u32, (reth_len(pkt) + qp->mtu - 1)/qp->mtu, 1);
664 res->first_psn = pkt->psn;
665 res->cur_psn = pkt->psn;
666 res->last_psn = (pkt->psn + pkts - 1) & BTH_PSN_MASK;
668 res->state = rdatm_res_state_new;
674 * rxe_recheck_mr - revalidate MR from rkey and get a reference
678 * This code allows the MR to be invalidated or deregistered or
679 * the MW if one was used to be invalidated or deallocated.
680 * It is assumed that the access permissions if originally good
681 * are OK and the mappings to be unchanged.
683 * TODO: If someone reregisters an MR to change its size or
684 * access permissions during the processing of an RDMA read
685 * we should kill the responder resource and complete the
686 * operation with an error.
688 * Return: mr on success else NULL
690 static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
692 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
696 if (rkey_is_mw(rkey)) {
697 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
702 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
703 !mr || mr->state != RXE_MR_STATE_VALID) {
714 mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
718 if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
726 /* RDMA read response. If res is not NULL, then we have a current RDMA request
727 * being processed or replayed.
729 static enum resp_states read_reply(struct rxe_qp *qp,
730 struct rxe_pkt_info *req_pkt)
732 struct rxe_pkt_info ack_pkt;
735 enum resp_states state;
739 struct resp_res *res = qp->resp.res;
743 res = rxe_prepare_read_res(qp, req_pkt);
747 if (res->state == rdatm_res_state_new) {
752 mr = rxe_recheck_mr(qp, res->read.rkey);
754 return RESPST_ERR_RKEY_VIOLATION;
757 if (res->read.resid <= mtu)
758 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
760 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
762 mr = rxe_recheck_mr(qp, res->read.rkey);
764 return RESPST_ERR_RKEY_VIOLATION;
766 if (res->read.resid > mtu)
767 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
769 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
772 res->state = rdatm_res_state_next;
774 payload = min_t(int, res->read.resid, mtu);
776 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
777 res->cur_psn, AETH_ACK_UNLIMITED);
779 return RESPST_ERR_RNR;
781 err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
782 payload, RXE_FROM_MR_OBJ);
784 pr_err("Failed copying memory\n");
788 if (bth_pad(&ack_pkt)) {
789 u8 *pad = payload_addr(&ack_pkt) + payload;
791 memset(pad, 0, bth_pad(&ack_pkt));
794 err = rxe_xmit_packet(qp, &ack_pkt, skb);
796 pr_err("Failed sending RDMA reply.\n");
797 return RESPST_ERR_RNR;
800 res->read.va += payload;
801 res->read.resid -= payload;
802 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
804 if (res->read.resid > 0) {
809 qp->resp.opcode = -1;
810 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
811 qp->resp.psn = res->cur_psn;
812 state = RESPST_CLEANUP;
818 static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
820 if (rkey_is_mw(rkey))
821 return rxe_invalidate_mw(qp, rkey);
823 return rxe_invalidate_mr(qp, rkey);
826 /* Executes a new request. A retried request never reach that function (send
827 * and writes are discarded, and reads and atomics are retried elsewhere.
829 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
831 enum resp_states err;
832 struct sk_buff *skb = PKT_TO_SKB(pkt);
833 union rdma_network_hdr hdr;
835 if (pkt->mask & RXE_SEND_MASK) {
836 if (qp_type(qp) == IB_QPT_UD ||
837 qp_type(qp) == IB_QPT_SMI ||
838 qp_type(qp) == IB_QPT_GSI) {
839 if (skb->protocol == htons(ETH_P_IP)) {
840 memset(&hdr.reserved, 0,
841 sizeof(hdr.reserved));
842 memcpy(&hdr.roce4grh, ip_hdr(skb),
843 sizeof(hdr.roce4grh));
844 err = send_data_in(qp, &hdr, sizeof(hdr));
846 err = send_data_in(qp, ipv6_hdr(skb),
852 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
855 } else if (pkt->mask & RXE_WRITE_MASK) {
856 err = write_data_in(qp, pkt);
859 } else if (pkt->mask & RXE_READ_MASK) {
860 /* For RDMA Read we can increment the msn now. See C9-148. */
862 return RESPST_READ_REPLY;
863 } else if (pkt->mask & RXE_ATOMIC_MASK) {
864 err = process_atomic(qp, pkt);
872 if (pkt->mask & RXE_IETH_MASK) {
873 u32 rkey = ieth_rkey(pkt);
875 err = invalidate_rkey(qp, rkey);
877 return RESPST_ERR_INVALIDATE_RKEY;
880 if (pkt->mask & RXE_END_MASK)
881 /* We successfully processed this new request. */
884 /* next expected psn, read handles this separately */
885 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
886 qp->resp.ack_psn = qp->resp.psn;
888 qp->resp.opcode = pkt->opcode;
889 qp->resp.status = IB_WC_SUCCESS;
891 if (pkt->mask & RXE_COMP_MASK)
892 return RESPST_COMPLETE;
893 else if (qp_type(qp) == IB_QPT_RC)
894 return RESPST_ACKNOWLEDGE;
896 return RESPST_CLEANUP;
899 static enum resp_states do_complete(struct rxe_qp *qp,
900 struct rxe_pkt_info *pkt)
903 struct ib_wc *wc = &cqe.ibwc;
904 struct ib_uverbs_wc *uwc = &cqe.uibwc;
905 struct rxe_recv_wqe *wqe = qp->resp.wqe;
906 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
911 memset(&cqe, 0, sizeof(cqe));
913 if (qp->rcq->is_user) {
914 uwc->status = qp->resp.status;
915 uwc->qp_num = qp->ibqp.qp_num;
916 uwc->wr_id = wqe->wr_id;
918 wc->status = qp->resp.status;
920 wc->wr_id = wqe->wr_id;
923 if (wc->status == IB_WC_SUCCESS) {
924 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
925 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
926 pkt->mask & RXE_WRITE_MASK) ?
927 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
928 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
929 pkt->mask & RXE_WRITE_MASK) ?
930 qp->resp.length : wqe->dma.length - wqe->dma.resid;
932 /* fields after byte_len are different between kernel and user
935 if (qp->rcq->is_user) {
936 uwc->wc_flags = IB_WC_GRH;
938 if (pkt->mask & RXE_IMMDT_MASK) {
939 uwc->wc_flags |= IB_WC_WITH_IMM;
940 uwc->ex.imm_data = immdt_imm(pkt);
943 if (pkt->mask & RXE_IETH_MASK) {
944 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
945 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
948 if (pkt->mask & RXE_DETH_MASK)
949 uwc->src_qp = deth_sqp(pkt);
951 uwc->port_num = qp->attr.port_num;
953 struct sk_buff *skb = PKT_TO_SKB(pkt);
955 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
956 if (skb->protocol == htons(ETH_P_IP))
957 wc->network_hdr_type = RDMA_NETWORK_IPV4;
959 wc->network_hdr_type = RDMA_NETWORK_IPV6;
961 if (is_vlan_dev(skb->dev)) {
962 wc->wc_flags |= IB_WC_WITH_VLAN;
963 wc->vlan_id = vlan_dev_vlan_id(skb->dev);
966 if (pkt->mask & RXE_IMMDT_MASK) {
967 wc->wc_flags |= IB_WC_WITH_IMM;
968 wc->ex.imm_data = immdt_imm(pkt);
971 if (pkt->mask & RXE_IETH_MASK) {
972 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
973 wc->ex.invalidate_rkey = ieth_rkey(pkt);
976 if (pkt->mask & RXE_DETH_MASK)
977 wc->src_qp = deth_sqp(pkt);
979 wc->port_num = qp->attr.port_num;
983 /* have copy for srq and reference for !srq */
985 queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
989 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
990 return RESPST_ERR_CQ_OVERFLOW;
993 if (unlikely(qp->resp.state == QP_STATE_ERROR))
994 return RESPST_CHK_RESOURCE;
997 if (qp_type(qp) == IB_QPT_RC)
998 return RESPST_ACKNOWLEDGE;
1000 return RESPST_CLEANUP;
1003 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
1004 u8 syndrome, u32 psn)
1007 struct rxe_pkt_info ack_pkt;
1008 struct sk_buff *skb;
1010 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
1017 err = rxe_xmit_packet(qp, &ack_pkt, skb);
1019 pr_err_ratelimited("Failed sending ack\n");
1025 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
1029 struct rxe_pkt_info ack_pkt;
1030 struct sk_buff *skb;
1031 struct resp_res *res;
1033 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
1034 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
1041 res = &qp->resp.resources[qp->resp.res_head];
1042 free_rd_atomic_resource(qp, res);
1043 rxe_advance_resp_resource(qp);
1046 res->type = RXE_ATOMIC_MASK;
1047 res->atomic.skb = skb;
1048 res->first_psn = ack_pkt.psn;
1049 res->last_psn = ack_pkt.psn;
1050 res->cur_psn = ack_pkt.psn;
1052 rc = rxe_xmit_packet(qp, &ack_pkt, skb);
1054 pr_err_ratelimited("Failed sending ack\n");
1061 static enum resp_states acknowledge(struct rxe_qp *qp,
1062 struct rxe_pkt_info *pkt)
1064 if (qp_type(qp) != IB_QPT_RC)
1065 return RESPST_CLEANUP;
1067 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1068 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1069 else if (pkt->mask & RXE_ATOMIC_MASK)
1070 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1071 else if (bth_ack(pkt))
1072 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1074 return RESPST_CLEANUP;
1077 static enum resp_states cleanup(struct rxe_qp *qp,
1078 struct rxe_pkt_info *pkt)
1080 struct sk_buff *skb;
1083 skb = skb_dequeue(&qp->req_pkts);
1086 ib_device_put(qp->ibqp.device);
1090 rxe_put(qp->resp.mr);
1097 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1101 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1102 struct resp_res *res = &qp->resp.resources[i];
1107 if (psn_compare(psn, res->first_psn) >= 0 &&
1108 psn_compare(psn, res->last_psn) <= 0) {
1116 static enum resp_states duplicate_request(struct rxe_qp *qp,
1117 struct rxe_pkt_info *pkt)
1119 enum resp_states rc;
1120 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1122 if (pkt->mask & RXE_SEND_MASK ||
1123 pkt->mask & RXE_WRITE_MASK) {
1124 /* SEND. Ack again and cleanup. C9-105. */
1125 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1126 return RESPST_CLEANUP;
1127 } else if (pkt->mask & RXE_READ_MASK) {
1128 struct resp_res *res;
1130 res = find_resource(qp, pkt->psn);
1132 /* Resource not found. Class D error. Drop the
1135 rc = RESPST_CLEANUP;
1138 /* Ensure this new request is the same as the previous
1139 * one or a subset of it.
1141 u64 iova = reth_va(pkt);
1142 u32 resid = reth_len(pkt);
1144 if (iova < res->read.va_org ||
1145 resid > res->read.length ||
1146 (iova + resid) > (res->read.va_org +
1147 res->read.length)) {
1148 rc = RESPST_CLEANUP;
1152 if (reth_rkey(pkt) != res->read.rkey) {
1153 rc = RESPST_CLEANUP;
1157 res->cur_psn = pkt->psn;
1158 res->state = (pkt->psn == res->first_psn) ?
1159 rdatm_res_state_new :
1160 rdatm_res_state_replay;
1163 /* Reset the resource, except length. */
1164 res->read.va_org = iova;
1165 res->read.va = iova;
1166 res->read.resid = resid;
1168 /* Replay the RDMA read reply. */
1170 rc = RESPST_READ_REPLY;
1174 struct resp_res *res;
1176 /* Find the operation in our list of responder resources. */
1177 res = find_resource(qp, pkt->psn);
1179 skb_get(res->atomic.skb);
1180 /* Resend the result. */
1181 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1183 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1184 rc = RESPST_CLEANUP;
1189 /* Resource not found. Class D error. Drop the request. */
1190 rc = RESPST_CLEANUP;
1197 /* Process a class A or C. Both are treated the same in this implementation. */
1198 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1199 enum ib_wc_status status)
1201 qp->resp.aeth_syndrome = syndrome;
1202 qp->resp.status = status;
1204 /* indicate that we should go through the ERROR state */
1205 qp->resp.goto_error = 1;
1208 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1213 qp->resp.drop_msg = 1;
1215 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1216 return RESPST_COMPLETE;
1218 return RESPST_CLEANUP;
1221 /* Class D1. This packet may be the start of a
1222 * new message and could be valid. The previous
1223 * message is invalid and ignored. reset the
1224 * recv wr to its original state
1227 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1228 qp->resp.wqe->dma.cur_sge = 0;
1229 qp->resp.wqe->dma.sge_offset = 0;
1230 qp->resp.opcode = -1;
1234 rxe_put(qp->resp.mr);
1238 return RESPST_CLEANUP;
1242 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1244 struct sk_buff *skb;
1245 struct rxe_queue *q = qp->rq.queue;
1247 while ((skb = skb_dequeue(&qp->req_pkts))) {
1250 ib_device_put(qp->ibqp.device);
1256 while (!qp->srq && q && queue_head(q, q->type))
1257 queue_advance_consumer(q, q->type);
1260 int rxe_responder(void *arg)
1262 struct rxe_qp *qp = (struct rxe_qp *)arg;
1263 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1264 enum resp_states state;
1265 struct rxe_pkt_info *pkt = NULL;
1270 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1277 switch (qp->resp.state) {
1278 case QP_STATE_RESET:
1279 state = RESPST_RESET;
1283 state = RESPST_GET_REQ;
1288 pr_debug("qp#%d state = %s\n", qp_num(qp),
1289 resp_state_name[state]);
1291 case RESPST_GET_REQ:
1292 state = get_req(qp, &pkt);
1294 case RESPST_CHK_PSN:
1295 state = check_psn(qp, pkt);
1297 case RESPST_CHK_OP_SEQ:
1298 state = check_op_seq(qp, pkt);
1300 case RESPST_CHK_OP_VALID:
1301 state = check_op_valid(qp, pkt);
1303 case RESPST_CHK_RESOURCE:
1304 state = check_resource(qp, pkt);
1306 case RESPST_CHK_LENGTH:
1307 state = check_length(qp, pkt);
1309 case RESPST_CHK_RKEY:
1310 state = check_rkey(qp, pkt);
1312 case RESPST_EXECUTE:
1313 state = execute(qp, pkt);
1315 case RESPST_COMPLETE:
1316 state = do_complete(qp, pkt);
1318 case RESPST_READ_REPLY:
1319 state = read_reply(qp, pkt);
1321 case RESPST_ACKNOWLEDGE:
1322 state = acknowledge(qp, pkt);
1324 case RESPST_CLEANUP:
1325 state = cleanup(qp, pkt);
1327 case RESPST_DUPLICATE_REQUEST:
1328 state = duplicate_request(qp, pkt);
1330 case RESPST_ERR_PSN_OUT_OF_SEQ:
1331 /* RC only - Class B. Drop packet. */
1332 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1333 state = RESPST_CLEANUP;
1336 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1337 case RESPST_ERR_MISSING_OPCODE_FIRST:
1338 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1339 case RESPST_ERR_UNSUPPORTED_OPCODE:
1340 case RESPST_ERR_MISALIGNED_ATOMIC:
1341 /* RC Only - Class C. */
1342 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1343 IB_WC_REM_INV_REQ_ERR);
1344 state = RESPST_COMPLETE;
1347 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1348 state = do_class_d1e_error(qp);
1350 case RESPST_ERR_RNR:
1351 if (qp_type(qp) == IB_QPT_RC) {
1352 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1354 send_ack(qp, pkt, AETH_RNR_NAK |
1356 qp->attr.min_rnr_timer),
1359 /* UD/UC - class D */
1360 qp->resp.drop_msg = 1;
1362 state = RESPST_CLEANUP;
1365 case RESPST_ERR_RKEY_VIOLATION:
1366 if (qp_type(qp) == IB_QPT_RC) {
1368 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1369 IB_WC_REM_ACCESS_ERR);
1370 state = RESPST_COMPLETE;
1372 qp->resp.drop_msg = 1;
1374 /* UC/SRQ Class D */
1375 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1376 state = RESPST_COMPLETE;
1378 /* UC/non-SRQ Class E. */
1379 state = RESPST_CLEANUP;
1384 case RESPST_ERR_INVALIDATE_RKEY:
1386 qp->resp.goto_error = 1;
1387 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1388 state = RESPST_COMPLETE;
1391 case RESPST_ERR_LENGTH:
1392 if (qp_type(qp) == IB_QPT_RC) {
1394 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1395 IB_WC_REM_INV_REQ_ERR);
1396 state = RESPST_COMPLETE;
1397 } else if (qp->srq) {
1398 /* UC/UD - class E */
1399 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1400 state = RESPST_COMPLETE;
1402 /* UC/UD - class D */
1403 qp->resp.drop_msg = 1;
1404 state = RESPST_CLEANUP;
1408 case RESPST_ERR_MALFORMED_WQE:
1410 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1411 IB_WC_LOC_QP_OP_ERR);
1412 state = RESPST_COMPLETE;
1415 case RESPST_ERR_CQ_OVERFLOW:
1417 state = RESPST_ERROR;
1421 if (qp->resp.goto_error) {
1422 state = RESPST_ERROR;
1429 if (qp->resp.goto_error) {
1430 state = RESPST_ERROR;
1437 rxe_drain_req_pkts(qp, false);
1438 qp->resp.wqe = NULL;
1442 qp->resp.goto_error = 0;
1443 pr_warn("qp#%d moved to error state\n", qp_num(qp));