3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 #include <rdma/rdma_vt.h>
53 #include <rdma/rdmavt_qp.h>
57 #include "verbs_txreq.h"
60 /* cut down ridiculously long IB macro names */
61 #define OP(x) IB_OPCODE_RC_##x
64 * hfi1_add_retry_timer - add/start a retry timer
67 * add a retry timer on the QP
69 static inline void hfi1_add_retry_timer(struct rvt_qp *qp)
71 struct ib_qp *ibqp = &qp->ibqp;
72 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
74 qp->s_flags |= RVT_S_TIMER;
75 /* 4.096 usec. * (1 << qp->timeout) */
76 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
78 add_timer(&qp->s_timer);
82 * hfi1_add_rnr_timer - add/start an rnr timer
84 * @to - timeout in usecs
86 * add an rnr timer on the QP
88 void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to)
90 struct hfi1_qp_priv *priv = qp->priv;
92 qp->s_flags |= RVT_S_WAIT_RNR;
93 qp->s_timer.expires = jiffies + usecs_to_jiffies(to);
94 add_timer(&priv->s_rnr_timer);
98 * hfi1_mod_retry_timer - mod a retry timer
101 * Modify a potentially already running retry
104 static inline void hfi1_mod_retry_timer(struct rvt_qp *qp)
106 struct ib_qp *ibqp = &qp->ibqp;
107 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
109 qp->s_flags |= RVT_S_TIMER;
110 /* 4.096 usec. * (1 << qp->timeout) */
111 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies +
116 * hfi1_stop_retry_timer - stop a retry timer
119 * stop a retry timer and return if the timer
122 static inline int hfi1_stop_retry_timer(struct rvt_qp *qp)
126 /* Remove QP from retry */
127 if (qp->s_flags & RVT_S_TIMER) {
128 qp->s_flags &= ~RVT_S_TIMER;
129 rval = del_timer(&qp->s_timer);
135 * hfi1_stop_rc_timers - stop all timers
138 * stop any pending timers
140 void hfi1_stop_rc_timers(struct rvt_qp *qp)
142 struct hfi1_qp_priv *priv = qp->priv;
144 /* Remove QP from all timers */
145 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
146 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
147 del_timer(&qp->s_timer);
148 del_timer(&priv->s_rnr_timer);
153 * hfi1_stop_rnr_timer - stop an rnr timer
156 * stop an rnr timer and return if the timer
159 static inline int hfi1_stop_rnr_timer(struct rvt_qp *qp)
162 struct hfi1_qp_priv *priv = qp->priv;
164 /* Remove QP from rnr timer */
165 if (qp->s_flags & RVT_S_WAIT_RNR) {
166 qp->s_flags &= ~RVT_S_WAIT_RNR;
167 rval = del_timer(&priv->s_rnr_timer);
173 * hfi1_del_timers_sync - wait for any timeout routines to exit
176 void hfi1_del_timers_sync(struct rvt_qp *qp)
178 struct hfi1_qp_priv *priv = qp->priv;
180 del_timer_sync(&qp->s_timer);
181 del_timer_sync(&priv->s_rnr_timer);
184 /* only opcode mask for adaptive pio */
185 const u32 rc_only_opcode =
186 BIT(OP(SEND_ONLY) & 0x1f) |
187 BIT(OP(SEND_ONLY_WITH_IMMEDIATE & 0x1f)) |
188 BIT(OP(RDMA_WRITE_ONLY & 0x1f)) |
189 BIT(OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE & 0x1f)) |
190 BIT(OP(RDMA_READ_REQUEST & 0x1f)) |
191 BIT(OP(ACKNOWLEDGE & 0x1f)) |
192 BIT(OP(ATOMIC_ACKNOWLEDGE & 0x1f)) |
193 BIT(OP(COMPARE_SWAP & 0x1f)) |
194 BIT(OP(FETCH_ADD & 0x1f));
196 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
201 len = delta_psn(psn, wqe->psn) * pmtu;
202 ss->sge = wqe->sg_list[0];
203 ss->sg_list = wqe->sg_list + 1;
204 ss->num_sge = wqe->wr.num_sge;
205 ss->total_len = wqe->length;
206 hfi1_skip_sge(ss, len, 0);
207 return wqe->length - len;
211 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
212 * @dev: the device for this QP
213 * @qp: a pointer to the QP
214 * @ohdr: a pointer to the IB header being constructed
215 * @ps: the xmit packet state
217 * Return 1 if constructed; otherwise, return 0.
218 * Note that we are in the responder's side of the QP context.
219 * Note the QP s_lock must be held.
221 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
222 struct hfi1_other_headers *ohdr,
223 struct hfi1_pkt_state *ps)
225 struct rvt_ack_entry *e;
232 struct hfi1_qp_priv *priv = qp->priv;
234 /* Don't send an ACK if we aren't supposed to. */
235 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
238 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
241 switch (qp->s_ack_state) {
242 case OP(RDMA_READ_RESPONSE_LAST):
243 case OP(RDMA_READ_RESPONSE_ONLY):
244 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
245 if (e->rdma_sge.mr) {
246 rvt_put_mr(e->rdma_sge.mr);
247 e->rdma_sge.mr = NULL;
250 case OP(ATOMIC_ACKNOWLEDGE):
252 * We can increment the tail pointer now that the last
253 * response has been sent instead of only being
256 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
257 qp->s_tail_ack_queue = 0;
260 case OP(ACKNOWLEDGE):
261 /* Check for no next entry in the queue. */
262 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
263 if (qp->s_flags & RVT_S_ACK_PENDING)
268 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
269 if (e->opcode == OP(RDMA_READ_REQUEST)) {
271 * If a RDMA read response is being resent and
272 * we haven't seen the duplicate request yet,
273 * then stop sending the remaining responses the
274 * responder has seen until the requester re-sends it.
276 len = e->rdma_sge.sge_length;
277 if (len && !e->rdma_sge.mr) {
278 qp->s_tail_ack_queue = qp->r_head_ack_queue;
281 /* Copy SGE state in case we need to resend */
282 ps->s_txreq->mr = e->rdma_sge.mr;
284 rvt_get_mr(ps->s_txreq->mr);
285 qp->s_ack_rdma_sge.sge = e->rdma_sge;
286 qp->s_ack_rdma_sge.num_sge = 1;
287 qp->s_cur_sge = &qp->s_ack_rdma_sge;
290 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
292 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
295 ohdr->u.aeth = hfi1_compute_aeth(qp);
297 qp->s_ack_rdma_psn = e->psn;
298 bth2 = mask_psn(qp->s_ack_rdma_psn++);
300 /* COMPARE_SWAP or FETCH_ADD */
301 qp->s_cur_sge = NULL;
303 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
304 ohdr->u.at.aeth = hfi1_compute_aeth(qp);
305 ohdr->u.at.atomic_ack_eth[0] =
306 cpu_to_be32(e->atomic_data >> 32);
307 ohdr->u.at.atomic_ack_eth[1] =
308 cpu_to_be32(e->atomic_data);
309 hwords += sizeof(ohdr->u.at) / sizeof(u32);
310 bth2 = mask_psn(e->psn);
313 bth0 = qp->s_ack_state << 24;
316 case OP(RDMA_READ_RESPONSE_FIRST):
317 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
319 case OP(RDMA_READ_RESPONSE_MIDDLE):
320 qp->s_cur_sge = &qp->s_ack_rdma_sge;
321 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
323 rvt_get_mr(ps->s_txreq->mr);
324 len = qp->s_ack_rdma_sge.sge.sge_length;
327 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
329 ohdr->u.aeth = hfi1_compute_aeth(qp);
331 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
332 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
335 bth0 = qp->s_ack_state << 24;
336 bth2 = mask_psn(qp->s_ack_rdma_psn++);
342 * Send a regular ACK.
343 * Set the s_ack_state so we wait until after sending
344 * the ACK before setting s_ack_state to ACKNOWLEDGE
347 qp->s_ack_state = OP(SEND_ONLY);
348 qp->s_flags &= ~RVT_S_ACK_PENDING;
349 qp->s_cur_sge = NULL;
352 cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
354 HFI1_AETH_CREDIT_SHIFT));
356 ohdr->u.aeth = hfi1_compute_aeth(qp);
359 bth0 = OP(ACKNOWLEDGE) << 24;
360 bth2 = mask_psn(qp->s_ack_psn);
362 qp->s_rdma_ack_cnt++;
363 qp->s_hdrwords = hwords;
365 ps->s_txreq->hdr_dwords = hwords + 2;
366 ps->s_txreq->sde = priv->s_sde;
367 qp->s_cur_size = len;
368 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
372 qp->s_ack_state = OP(ACKNOWLEDGE);
374 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
378 qp->s_flags &= ~(RVT_S_RESP_PENDING
385 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
386 * @qp: a pointer to the QP
388 * Assumes s_lock is held.
390 * Return 1 if constructed; otherwise, return 0.
392 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
394 struct hfi1_qp_priv *priv = qp->priv;
395 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
396 struct hfi1_other_headers *ohdr;
397 struct rvt_sge_state *ss;
398 struct rvt_swqe *wqe;
399 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
409 ps->s_txreq = get_txreq(ps->dev, qp);
410 if (IS_ERR(ps->s_txreq))
413 ohdr = &ps->s_txreq->phdr.hdr.u.oth;
414 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
415 ohdr = &ps->s_txreq->phdr.hdr.u.l.oth;
417 /* Sending responses has higher priority over sending requests. */
418 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
419 make_rc_ack(dev, qp, ohdr, ps))
422 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
423 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
425 /* We are in the error state, flush the work request. */
426 smp_read_barrier_depends(); /* see post_one_send() */
427 if (qp->s_last == ACCESS_ONCE(qp->s_head))
429 /* If DMAs are in progress, we can't flush immediately. */
430 if (iowait_sdma_pending(&priv->s_iowait)) {
431 qp->s_flags |= RVT_S_WAIT_DMA;
435 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
436 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
437 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
438 /* will get called again */
442 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
445 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
446 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
447 qp->s_flags |= RVT_S_WAIT_PSN;
450 qp->s_sending_psn = qp->s_psn;
451 qp->s_sending_hpsn = qp->s_psn - 1;
454 /* Send a request. */
455 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
456 switch (qp->s_state) {
458 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
461 * Resend an old request or start a new one.
463 * We keep track of the current SWQE so that
464 * we don't reset the "furthest progress" state
465 * if we need to back up.
468 if (qp->s_cur == qp->s_tail) {
469 /* Check if send work queue is empty. */
470 if (qp->s_tail == qp->s_head) {
475 * If a fence is requested, wait for previous
476 * RDMA read and atomic operations to finish.
478 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
479 qp->s_num_rd_atomic) {
480 qp->s_flags |= RVT_S_WAIT_FENCE;
484 qp->s_psn = wqe->psn;
487 * Note that we have to be careful not to modify the
488 * original work request since we may need to resend
493 bth2 = mask_psn(qp->s_psn);
494 switch (wqe->wr.opcode) {
496 case IB_WR_SEND_WITH_IMM:
497 /* If no credit, return. */
498 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
499 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
500 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
504 qp->s_state = OP(SEND_FIRST);
508 if (wqe->wr.opcode == IB_WR_SEND)
509 qp->s_state = OP(SEND_ONLY);
511 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
512 /* Immediate data comes after the BTH */
513 ohdr->u.imm_data = wqe->wr.ex.imm_data;
516 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
517 bth0 |= IB_BTH_SOLICITED;
518 bth2 |= IB_BTH_REQ_ACK;
519 if (++qp->s_cur == qp->s_size)
523 case IB_WR_RDMA_WRITE:
524 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
527 case IB_WR_RDMA_WRITE_WITH_IMM:
528 /* If no credit, return. */
529 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
530 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
531 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
534 ohdr->u.rc.reth.vaddr =
535 cpu_to_be64(wqe->rdma_wr.remote_addr);
536 ohdr->u.rc.reth.rkey =
537 cpu_to_be32(wqe->rdma_wr.rkey);
538 ohdr->u.rc.reth.length = cpu_to_be32(len);
539 hwords += sizeof(struct ib_reth) / sizeof(u32);
541 qp->s_state = OP(RDMA_WRITE_FIRST);
545 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
546 qp->s_state = OP(RDMA_WRITE_ONLY);
549 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
550 /* Immediate data comes after RETH */
551 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
553 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
554 bth0 |= IB_BTH_SOLICITED;
556 bth2 |= IB_BTH_REQ_ACK;
557 if (++qp->s_cur == qp->s_size)
561 case IB_WR_RDMA_READ:
563 * Don't allow more operations to be started
564 * than the QP limits allow.
567 if (qp->s_num_rd_atomic >=
568 qp->s_max_rd_atomic) {
569 qp->s_flags |= RVT_S_WAIT_RDMAR;
572 qp->s_num_rd_atomic++;
573 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
576 ohdr->u.rc.reth.vaddr =
577 cpu_to_be64(wqe->rdma_wr.remote_addr);
578 ohdr->u.rc.reth.rkey =
579 cpu_to_be32(wqe->rdma_wr.rkey);
580 ohdr->u.rc.reth.length = cpu_to_be32(len);
581 qp->s_state = OP(RDMA_READ_REQUEST);
582 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
585 bth2 |= IB_BTH_REQ_ACK;
586 if (++qp->s_cur == qp->s_size)
590 case IB_WR_ATOMIC_CMP_AND_SWP:
591 case IB_WR_ATOMIC_FETCH_AND_ADD:
593 * Don't allow more operations to be started
594 * than the QP limits allow.
597 if (qp->s_num_rd_atomic >=
598 qp->s_max_rd_atomic) {
599 qp->s_flags |= RVT_S_WAIT_RDMAR;
602 qp->s_num_rd_atomic++;
603 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
606 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
607 qp->s_state = OP(COMPARE_SWAP);
608 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
609 wqe->atomic_wr.swap);
610 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
611 wqe->atomic_wr.compare_add);
613 qp->s_state = OP(FETCH_ADD);
614 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
615 wqe->atomic_wr.compare_add);
616 ohdr->u.atomic_eth.compare_data = 0;
618 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
619 wqe->atomic_wr.remote_addr >> 32);
620 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
621 wqe->atomic_wr.remote_addr);
622 ohdr->u.atomic_eth.rkey = cpu_to_be32(
623 wqe->atomic_wr.rkey);
624 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
627 bth2 |= IB_BTH_REQ_ACK;
628 if (++qp->s_cur == qp->s_size)
635 qp->s_sge.sge = wqe->sg_list[0];
636 qp->s_sge.sg_list = wqe->sg_list + 1;
637 qp->s_sge.num_sge = wqe->wr.num_sge;
638 qp->s_sge.total_len = wqe->length;
639 qp->s_len = wqe->length;
642 if (qp->s_tail >= qp->s_size)
645 if (wqe->wr.opcode == IB_WR_RDMA_READ)
646 qp->s_psn = wqe->lpsn + 1;
651 case OP(RDMA_READ_RESPONSE_FIRST):
653 * qp->s_state is normally set to the opcode of the
654 * last packet constructed for new requests and therefore
655 * is never set to RDMA read response.
656 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
657 * thread to indicate a SEND needs to be restarted from an
658 * earlier PSN without interfering with the sending thread.
661 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
664 qp->s_state = OP(SEND_MIDDLE);
666 case OP(SEND_MIDDLE):
667 bth2 = mask_psn(qp->s_psn++);
672 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
675 if (wqe->wr.opcode == IB_WR_SEND)
676 qp->s_state = OP(SEND_LAST);
678 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
679 /* Immediate data comes after the BTH */
680 ohdr->u.imm_data = wqe->wr.ex.imm_data;
683 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
684 bth0 |= IB_BTH_SOLICITED;
685 bth2 |= IB_BTH_REQ_ACK;
687 if (qp->s_cur >= qp->s_size)
691 case OP(RDMA_READ_RESPONSE_LAST):
693 * qp->s_state is normally set to the opcode of the
694 * last packet constructed for new requests and therefore
695 * is never set to RDMA read response.
696 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
697 * thread to indicate a RDMA write needs to be restarted from
698 * an earlier PSN without interfering with the sending thread.
701 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
703 case OP(RDMA_WRITE_FIRST):
704 qp->s_state = OP(RDMA_WRITE_MIDDLE);
706 case OP(RDMA_WRITE_MIDDLE):
707 bth2 = mask_psn(qp->s_psn++);
712 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
715 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
716 qp->s_state = OP(RDMA_WRITE_LAST);
718 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
719 /* Immediate data comes after the BTH */
720 ohdr->u.imm_data = wqe->wr.ex.imm_data;
722 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
723 bth0 |= IB_BTH_SOLICITED;
725 bth2 |= IB_BTH_REQ_ACK;
727 if (qp->s_cur >= qp->s_size)
731 case OP(RDMA_READ_RESPONSE_MIDDLE):
733 * qp->s_state is normally set to the opcode of the
734 * last packet constructed for new requests and therefore
735 * is never set to RDMA read response.
736 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
737 * thread to indicate a RDMA read needs to be restarted from
738 * an earlier PSN without interfering with the sending thread.
741 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
742 ohdr->u.rc.reth.vaddr =
743 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
744 ohdr->u.rc.reth.rkey =
745 cpu_to_be32(wqe->rdma_wr.rkey);
746 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
747 qp->s_state = OP(RDMA_READ_REQUEST);
748 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
749 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
750 qp->s_psn = wqe->lpsn + 1;
754 if (qp->s_cur == qp->s_size)
758 qp->s_sending_hpsn = bth2;
759 delta = delta_psn(bth2, wqe->psn);
760 if (delta && delta % HFI1_PSN_CREDIT == 0)
761 bth2 |= IB_BTH_REQ_ACK;
762 if (qp->s_flags & RVT_S_SEND_ONE) {
763 qp->s_flags &= ~RVT_S_SEND_ONE;
764 qp->s_flags |= RVT_S_WAIT_ACK;
765 bth2 |= IB_BTH_REQ_ACK;
768 qp->s_hdrwords = hwords;
770 ps->s_txreq->hdr_dwords = hwords + 2;
771 ps->s_txreq->sde = priv->s_sde;
773 qp->s_cur_size = len;
774 hfi1_make_ruc_header(
777 bth0 | (qp->s_state << 24),
784 hfi1_put_txreq(ps->s_txreq);
789 hfi1_put_txreq(ps->s_txreq);
793 qp->s_flags &= ~RVT_S_BUSY;
799 * hfi1_send_rc_ack - Construct an ACK packet and send it
800 * @qp: a pointer to the QP
802 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
803 * Note that RDMA reads and atomics are handled in the
804 * send side QP state and tasklet.
806 void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp,
809 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
810 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
811 u64 pbc, pbc_flags = 0;
817 struct send_context *sc;
818 struct pio_buf *pbuf;
819 struct hfi1_ib_header hdr;
820 struct hfi1_other_headers *ohdr;
823 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
824 if (qp->s_flags & RVT_S_RESP_PENDING)
827 /* Ensure s_rdma_ack_cnt changes are committed */
828 smp_read_barrier_depends();
829 if (qp->s_rdma_ack_cnt)
832 /* Construct the header */
833 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
835 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
836 hwords += hfi1_make_grh(ibp, &hdr.u.l.grh,
837 &qp->remote_ah_attr.grh, hwords, 0);
844 /* read pkey_index w/o lock (its atomic) */
845 bth0 = hfi1_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
846 if (qp->s_mig_state == IB_MIG_MIGRATED)
847 bth0 |= IB_BTH_MIG_REQ;
849 ohdr->u.aeth = cpu_to_be32((qp->r_msn & HFI1_MSN_MASK) |
851 HFI1_AETH_CREDIT_SHIFT));
853 ohdr->u.aeth = hfi1_compute_aeth(qp);
854 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
855 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
856 pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
857 lrh0 |= (sc5 & 0xf) << 12 | (qp->remote_ah_attr.sl & 0xf) << 4;
858 hdr.lrh[0] = cpu_to_be16(lrh0);
859 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
860 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
861 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
862 ohdr->bth[0] = cpu_to_be32(bth0);
863 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
864 ohdr->bth[1] |= cpu_to_be32((!!is_fecn) << HFI1_BECN_SHIFT);
865 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
867 /* Don't try to send ACKs if the link isn't ACTIVE */
868 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
872 plen = 2 /* PBC */ + hwords;
873 vl = sc_to_vlt(ppd->dd, sc5);
874 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
876 pbuf = sc_buffer_alloc(sc, plen, NULL, NULL);
879 * We have no room to send at the moment. Pass
880 * responsibility for sending the ACK to the send tasklet
881 * so that when enough buffer space becomes available,
882 * the ACK is sent ahead of other outgoing packets.
887 trace_output_ibhdr(dd_from_ibdev(qp->ibqp.device), &hdr);
889 /* write the pbc and data */
890 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, &hdr, hwords);
895 this_cpu_inc(*ibp->rvp.rc_qacks);
896 spin_lock_irqsave(&qp->s_lock, flags);
897 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
898 qp->s_nak_state = qp->r_nak_state;
899 qp->s_ack_psn = qp->r_ack_psn;
901 qp->s_flags |= RVT_S_ECN;
903 /* Schedule the send tasklet. */
904 hfi1_schedule_send(qp);
905 spin_unlock_irqrestore(&qp->s_lock, flags);
909 * reset_psn - reset the QP state to send starting from PSN
911 * @psn: the packet sequence number to restart at
913 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
915 * Called at interrupt level with the QP s_lock held.
917 static void reset_psn(struct rvt_qp *qp, u32 psn)
920 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
926 * If we are starting the request from the beginning,
927 * let the normal send code handle initialization.
929 if (cmp_psn(psn, wqe->psn) <= 0) {
930 qp->s_state = OP(SEND_LAST);
934 /* Find the work request opcode corresponding to the given PSN. */
935 opcode = wqe->wr.opcode;
939 if (++n == qp->s_size)
943 wqe = rvt_get_swqe_ptr(qp, n);
944 diff = cmp_psn(psn, wqe->psn);
949 * If we are starting the request from the beginning,
950 * let the normal send code handle initialization.
953 qp->s_state = OP(SEND_LAST);
956 opcode = wqe->wr.opcode;
960 * Set the state to restart in the middle of a request.
961 * Don't change the s_sge, s_cur_sge, or s_cur_size.
962 * See hfi1_make_rc_req().
966 case IB_WR_SEND_WITH_IMM:
967 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
970 case IB_WR_RDMA_WRITE:
971 case IB_WR_RDMA_WRITE_WITH_IMM:
972 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
975 case IB_WR_RDMA_READ:
976 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
981 * This case shouldn't happen since its only
984 qp->s_state = OP(SEND_LAST);
989 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
990 * asynchronously before the send tasklet can get scheduled.
991 * Doing it in hfi1_make_rc_req() is too late.
993 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
994 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
995 qp->s_flags |= RVT_S_WAIT_PSN;
996 qp->s_flags &= ~RVT_S_AHG_VALID;
1000 * Back up requester to resend the last un-ACKed request.
1001 * The QP r_lock and s_lock should be held and interrupts disabled.
1003 static void restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1005 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1006 struct hfi1_ibport *ibp;
1008 if (qp->s_retry == 0) {
1009 if (qp->s_mig_state == IB_MIG_ARMED) {
1010 hfi1_migrate_qp(qp);
1011 qp->s_retry = qp->s_retry_cnt;
1012 } else if (qp->s_last == qp->s_acked) {
1013 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
1014 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1016 } else /* need to handle delayed completion */
1021 ibp = to_iport(qp->ibqp.device, qp->port_num);
1022 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1023 ibp->rvp.n_rc_resends++;
1025 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1027 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1028 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1031 qp->s_flags |= RVT_S_SEND_ONE;
1036 * This is called from s_timer for missing responses.
1038 void hfi1_rc_timeout(unsigned long arg)
1040 struct rvt_qp *qp = (struct rvt_qp *)arg;
1041 struct hfi1_ibport *ibp;
1042 unsigned long flags;
1044 spin_lock_irqsave(&qp->r_lock, flags);
1045 spin_lock(&qp->s_lock);
1046 if (qp->s_flags & RVT_S_TIMER) {
1047 ibp = to_iport(qp->ibqp.device, qp->port_num);
1048 ibp->rvp.n_rc_timeouts++;
1049 qp->s_flags &= ~RVT_S_TIMER;
1050 del_timer(&qp->s_timer);
1051 trace_hfi1_rc_timeout(qp, qp->s_last_psn + 1);
1052 restart_rc(qp, qp->s_last_psn + 1, 1);
1053 hfi1_schedule_send(qp);
1055 spin_unlock(&qp->s_lock);
1056 spin_unlock_irqrestore(&qp->r_lock, flags);
1060 * This is called from s_timer for RNR timeouts.
1062 void hfi1_rc_rnr_retry(unsigned long arg)
1064 struct rvt_qp *qp = (struct rvt_qp *)arg;
1065 unsigned long flags;
1067 spin_lock_irqsave(&qp->s_lock, flags);
1068 hfi1_stop_rnr_timer(qp);
1069 hfi1_schedule_send(qp);
1070 spin_unlock_irqrestore(&qp->s_lock, flags);
1074 * Set qp->s_sending_psn to the next PSN after the given one.
1075 * This would be psn+1 except when RDMA reads are present.
1077 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1079 struct rvt_swqe *wqe;
1082 /* Find the work request corresponding to the given PSN. */
1084 wqe = rvt_get_swqe_ptr(qp, n);
1085 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1086 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1087 qp->s_sending_psn = wqe->lpsn + 1;
1089 qp->s_sending_psn = psn + 1;
1092 if (++n == qp->s_size)
1094 if (n == qp->s_tail)
1100 * This should be called with the QP s_lock held and interrupts disabled.
1102 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_ib_header *hdr)
1104 struct hfi1_other_headers *ohdr;
1105 struct rvt_swqe *wqe;
1111 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
1114 /* Find out where the BTH is */
1115 if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
1118 ohdr = &hdr->u.l.oth;
1120 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1121 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1122 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1123 WARN_ON(!qp->s_rdma_ack_cnt);
1124 qp->s_rdma_ack_cnt--;
1128 psn = be32_to_cpu(ohdr->bth[2]);
1129 reset_sending_psn(qp, psn);
1132 * Start timer after a packet requesting an ACK has been sent and
1133 * there are still requests that haven't been acked.
1135 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1137 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1138 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1139 hfi1_add_retry_timer(qp);
1141 while (qp->s_last != qp->s_acked) {
1144 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1145 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1146 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1148 s_last = qp->s_last;
1149 if (++s_last >= qp->s_size)
1151 qp->s_last = s_last;
1152 /* see post_send() */
1154 for (i = 0; i < wqe->wr.num_sge; i++) {
1155 struct rvt_sge *sge = &wqe->sg_list[i];
1157 rvt_put_mr(sge->mr);
1159 /* Post a send completion queue entry if requested. */
1160 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
1161 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1162 memset(&wc, 0, sizeof(wc));
1163 wc.wr_id = wqe->wr.wr_id;
1164 wc.status = IB_WC_SUCCESS;
1165 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
1166 wc.byte_len = wqe->length;
1168 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
1172 * If we were waiting for sends to complete before re-sending,
1173 * and they are now complete, restart sending.
1175 trace_hfi1_rc_sendcomplete(qp, psn);
1176 if (qp->s_flags & RVT_S_WAIT_PSN &&
1177 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1178 qp->s_flags &= ~RVT_S_WAIT_PSN;
1179 qp->s_sending_psn = qp->s_psn;
1180 qp->s_sending_hpsn = qp->s_psn - 1;
1181 hfi1_schedule_send(qp);
1185 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1187 qp->s_last_psn = psn;
1191 * Generate a SWQE completion.
1192 * This is similar to hfi1_send_complete but has to check to be sure
1193 * that the SGEs are not being referenced if the SWQE is being resent.
1195 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1196 struct rvt_swqe *wqe,
1197 struct hfi1_ibport *ibp)
1203 * Don't decrement refcount and don't generate a
1204 * completion if the SWQE is being resent until the send
1207 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1208 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1211 for (i = 0; i < wqe->wr.num_sge; i++) {
1212 struct rvt_sge *sge = &wqe->sg_list[i];
1214 rvt_put_mr(sge->mr);
1216 s_last = qp->s_last;
1217 if (++s_last >= qp->s_size)
1219 qp->s_last = s_last;
1220 /* see post_send() */
1222 /* Post a send completion queue entry if requested. */
1223 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
1224 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1225 memset(&wc, 0, sizeof(wc));
1226 wc.wr_id = wqe->wr.wr_id;
1227 wc.status = IB_WC_SUCCESS;
1228 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode];
1229 wc.byte_len = wqe->length;
1231 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
1234 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1236 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1238 * If send progress not running attempt to progress
1241 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1242 struct sdma_engine *engine;
1245 /* For now use sc to find engine */
1246 sc5 = ibp->sl_to_sc[qp->remote_ah_attr.sl];
1247 engine = qp_to_sdma_engine(qp, sc5);
1248 sdma_engine_progress_schedule(engine);
1252 qp->s_retry = qp->s_retry_cnt;
1253 update_last_psn(qp, wqe->lpsn);
1256 * If we are completing a request which is in the process of
1257 * being resent, we can stop re-sending it since we know the
1258 * responder has already seen it.
1260 if (qp->s_acked == qp->s_cur) {
1261 if (++qp->s_cur >= qp->s_size)
1263 qp->s_acked = qp->s_cur;
1264 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1265 if (qp->s_acked != qp->s_tail) {
1266 qp->s_state = OP(SEND_LAST);
1267 qp->s_psn = wqe->psn;
1270 if (++qp->s_acked >= qp->s_size)
1272 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1274 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1280 * do_rc_ack - process an incoming RC ACK
1281 * @qp: the QP the ACK came in on
1282 * @psn: the packet sequence number of the ACK
1283 * @opcode: the opcode of the request that resulted in the ACK
1285 * This is called from rc_rcv_resp() to process an incoming RC ACK
1287 * May be called at interrupt level, with the QP s_lock held.
1288 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1290 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1291 u64 val, struct hfi1_ctxtdata *rcd)
1293 struct hfi1_ibport *ibp;
1294 enum ib_wc_status status;
1295 struct rvt_swqe *wqe;
1302 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1303 * requests and implicitly NAK RDMA read and atomic requests issued
1304 * before the NAK'ed request. The MSN won't include the NAK'ed
1305 * request but will include an ACK'ed request(s).
1310 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1311 ibp = to_iport(qp->ibqp.device, qp->port_num);
1314 * The MSN might be for a later WQE than the PSN indicates so
1315 * only complete WQEs that the PSN finishes.
1317 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
1319 * RDMA_READ_RESPONSE_ONLY is a special case since
1320 * we want to generate completion events for everything
1321 * before the RDMA read, copy the data, then generate
1322 * the completion for the read.
1324 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1325 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1331 * If this request is a RDMA read or atomic, and the ACK is
1332 * for a later operation, this ACK NAKs the RDMA read or
1333 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1334 * can ACK a RDMA read and likewise for atomic ops. Note
1335 * that the NAK case can only happen if relaxed ordering is
1336 * used and requests are sent after an RDMA read or atomic
1337 * is sent but before the response is received.
1339 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1340 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1341 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1342 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1343 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1344 /* Retry this request. */
1345 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1346 qp->r_flags |= RVT_R_RDMAR_SEQ;
1347 restart_rc(qp, qp->s_last_psn + 1, 0);
1348 if (list_empty(&qp->rspwait)) {
1349 qp->r_flags |= RVT_R_RSP_SEND;
1350 atomic_inc(&qp->refcount);
1351 list_add_tail(&qp->rspwait,
1352 &rcd->qp_wait_list);
1356 * No need to process the ACK/NAK since we are
1357 * restarting an earlier request.
1361 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1362 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1363 u64 *vaddr = wqe->sg_list[0].vaddr;
1366 if (qp->s_num_rd_atomic &&
1367 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1368 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1369 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1370 qp->s_num_rd_atomic--;
1371 /* Restart sending task if fence is complete */
1372 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1373 !qp->s_num_rd_atomic) {
1374 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1376 hfi1_schedule_send(qp);
1377 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1378 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1380 hfi1_schedule_send(qp);
1383 wqe = do_rc_completion(qp, wqe, ibp);
1384 if (qp->s_acked == qp->s_tail)
1388 switch (aeth >> 29) {
1390 this_cpu_inc(*ibp->rvp.rc_acks);
1391 if (qp->s_acked != qp->s_tail) {
1393 * We are expecting more ACKs so
1394 * mod the retry timer.
1396 hfi1_mod_retry_timer(qp);
1398 * We can stop re-sending the earlier packets and
1399 * continue with the next packet the receiver wants.
1401 if (cmp_psn(qp->s_psn, psn) <= 0)
1402 reset_psn(qp, psn + 1);
1404 /* No more acks - kill all timers */
1405 hfi1_stop_rc_timers(qp);
1406 if (cmp_psn(qp->s_psn, psn) <= 0) {
1407 qp->s_state = OP(SEND_LAST);
1408 qp->s_psn = psn + 1;
1411 if (qp->s_flags & RVT_S_WAIT_ACK) {
1412 qp->s_flags &= ~RVT_S_WAIT_ACK;
1413 hfi1_schedule_send(qp);
1415 hfi1_get_credit(qp, aeth);
1416 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1417 qp->s_retry = qp->s_retry_cnt;
1418 update_last_psn(qp, psn);
1421 case 1: /* RNR NAK */
1422 ibp->rvp.n_rnr_naks++;
1423 if (qp->s_acked == qp->s_tail)
1425 if (qp->s_flags & RVT_S_WAIT_RNR)
1427 if (qp->s_rnr_retry == 0) {
1428 status = IB_WC_RNR_RETRY_EXC_ERR;
1431 if (qp->s_rnr_retry_cnt < 7)
1434 /* The last valid PSN is the previous PSN. */
1435 update_last_psn(qp, psn - 1);
1437 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1441 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1442 hfi1_stop_rc_timers(qp);
1444 ib_hfi1_rnr_table[(aeth >> HFI1_AETH_CREDIT_SHIFT) &
1445 HFI1_AETH_CREDIT_MASK];
1446 hfi1_add_rnr_timer(qp, to);
1450 if (qp->s_acked == qp->s_tail)
1452 /* The last valid PSN is the previous PSN. */
1453 update_last_psn(qp, psn - 1);
1454 switch ((aeth >> HFI1_AETH_CREDIT_SHIFT) &
1455 HFI1_AETH_CREDIT_MASK) {
1456 case 0: /* PSN sequence error */
1457 ibp->rvp.n_seq_naks++;
1459 * Back up to the responder's expected PSN.
1460 * Note that we might get a NAK in the middle of an
1461 * RDMA READ response which terminates the RDMA
1464 restart_rc(qp, psn, 0);
1465 hfi1_schedule_send(qp);
1468 case 1: /* Invalid Request */
1469 status = IB_WC_REM_INV_REQ_ERR;
1470 ibp->rvp.n_other_naks++;
1473 case 2: /* Remote Access Error */
1474 status = IB_WC_REM_ACCESS_ERR;
1475 ibp->rvp.n_other_naks++;
1478 case 3: /* Remote Operation Error */
1479 status = IB_WC_REM_OP_ERR;
1480 ibp->rvp.n_other_naks++;
1482 if (qp->s_last == qp->s_acked) {
1483 hfi1_send_complete(qp, wqe, status);
1484 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1489 /* Ignore other reserved NAK error codes */
1492 qp->s_retry = qp->s_retry_cnt;
1493 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1496 default: /* 2: reserved */
1498 /* Ignore reserved NAK codes. */
1503 hfi1_stop_rc_timers(qp);
1508 * We have seen an out of sequence RDMA read middle or last packet.
1509 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1511 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
1512 struct hfi1_ctxtdata *rcd)
1514 struct rvt_swqe *wqe;
1516 /* Remove QP from retry timer */
1517 hfi1_stop_rc_timers(qp);
1519 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1521 while (cmp_psn(psn, wqe->lpsn) > 0) {
1522 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1523 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1524 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1526 wqe = do_rc_completion(qp, wqe, ibp);
1529 ibp->rvp.n_rdma_seq++;
1530 qp->r_flags |= RVT_R_RDMAR_SEQ;
1531 restart_rc(qp, qp->s_last_psn + 1, 0);
1532 if (list_empty(&qp->rspwait)) {
1533 qp->r_flags |= RVT_R_RSP_SEND;
1534 atomic_inc(&qp->refcount);
1535 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1540 * rc_rcv_resp - process an incoming RC response packet
1541 * @ibp: the port this packet came in on
1542 * @ohdr: the other headers for this packet
1543 * @data: the packet data
1544 * @tlen: the packet length
1545 * @qp: the QP for this packet
1546 * @opcode: the opcode for this packet
1547 * @psn: the packet sequence number for this packet
1548 * @hdrsize: the header length
1549 * @pmtu: the path MTU
1551 * This is called from hfi1_rc_rcv() to process an incoming RC response
1552 * packet for the given QP.
1553 * Called at interrupt level.
1555 static void rc_rcv_resp(struct hfi1_ibport *ibp,
1556 struct hfi1_other_headers *ohdr,
1557 void *data, u32 tlen, struct rvt_qp *qp,
1558 u32 opcode, u32 psn, u32 hdrsize, u32 pmtu,
1559 struct hfi1_ctxtdata *rcd)
1561 struct rvt_swqe *wqe;
1562 enum ib_wc_status status;
1563 unsigned long flags;
1569 spin_lock_irqsave(&qp->s_lock, flags);
1571 trace_hfi1_rc_ack(qp, psn);
1573 /* Ignore invalid responses. */
1574 smp_read_barrier_depends(); /* see post_one_send */
1575 if (cmp_psn(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0)
1578 /* Ignore duplicate responses. */
1579 diff = cmp_psn(psn, qp->s_last_psn);
1580 if (unlikely(diff <= 0)) {
1581 /* Update credits for "ghost" ACKs */
1582 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1583 aeth = be32_to_cpu(ohdr->u.aeth);
1584 if ((aeth >> 29) == 0)
1585 hfi1_get_credit(qp, aeth);
1591 * Skip everything other than the PSN we expect, if we are waiting
1592 * for a reply to a restarted RDMA read or atomic op.
1594 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1595 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
1597 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1600 if (unlikely(qp->s_acked == qp->s_tail))
1602 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1603 status = IB_WC_SUCCESS;
1606 case OP(ACKNOWLEDGE):
1607 case OP(ATOMIC_ACKNOWLEDGE):
1608 case OP(RDMA_READ_RESPONSE_FIRST):
1609 aeth = be32_to_cpu(ohdr->u.aeth);
1610 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1611 __be32 *p = ohdr->u.at.atomic_ack_eth;
1613 val = ((u64) be32_to_cpu(p[0]) << 32) |
1617 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1618 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1620 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1621 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1624 * If this is a response to a resent RDMA read, we
1625 * have to be careful to copy the data to the right
1628 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1632 case OP(RDMA_READ_RESPONSE_MIDDLE):
1633 /* no AETH, no ACK */
1634 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1636 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1639 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1641 if (unlikely(pmtu >= qp->s_rdma_read_len))
1645 * We got a response so update the timeout.
1646 * 4.096 usec. * (1 << qp->timeout)
1648 qp->s_flags |= RVT_S_TIMER;
1649 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
1650 if (qp->s_flags & RVT_S_WAIT_ACK) {
1651 qp->s_flags &= ~RVT_S_WAIT_ACK;
1652 hfi1_schedule_send(qp);
1655 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1656 qp->s_retry = qp->s_retry_cnt;
1659 * Update the RDMA receive state but do the copy w/o
1660 * holding the locks and blocking interrupts.
1662 qp->s_rdma_read_len -= pmtu;
1663 update_last_psn(qp, psn);
1664 spin_unlock_irqrestore(&qp->s_lock, flags);
1665 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0, 0);
1668 case OP(RDMA_READ_RESPONSE_ONLY):
1669 aeth = be32_to_cpu(ohdr->u.aeth);
1670 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1672 /* Get the number of bytes the message was padded by. */
1673 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1675 * Check that the data size is >= 0 && <= pmtu.
1676 * Remember to account for ICRC (4).
1678 if (unlikely(tlen < (hdrsize + pad + 4)))
1681 * If this is a response to a resent RDMA read, we
1682 * have to be careful to copy the data to the right
1685 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1686 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1690 case OP(RDMA_READ_RESPONSE_LAST):
1691 /* ACKs READ req. */
1692 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1694 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1696 /* Get the number of bytes the message was padded by. */
1697 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1699 * Check that the data size is >= 1 && <= pmtu.
1700 * Remember to account for ICRC (4).
1702 if (unlikely(tlen <= (hdrsize + pad + 4)))
1705 tlen -= hdrsize + pad + 4;
1706 if (unlikely(tlen != qp->s_rdma_read_len))
1708 aeth = be32_to_cpu(ohdr->u.aeth);
1709 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0, 0);
1710 WARN_ON(qp->s_rdma_read_sge.num_sge);
1711 (void) do_rc_ack(qp, aeth, psn,
1712 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1717 status = IB_WC_LOC_QP_OP_ERR;
1721 rdma_seq_err(qp, ibp, psn, rcd);
1725 status = IB_WC_LOC_LEN_ERR;
1727 if (qp->s_last == qp->s_acked) {
1728 hfi1_send_complete(qp, wqe, status);
1729 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1732 spin_unlock_irqrestore(&qp->s_lock, flags);
1737 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
1740 if (list_empty(&qp->rspwait)) {
1741 qp->r_flags |= RVT_R_RSP_NAK;
1742 atomic_inc(&qp->refcount);
1743 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1747 static inline void rc_cancel_ack(struct rvt_qp *qp)
1749 struct hfi1_qp_priv *priv = qp->priv;
1751 priv->r_adefered = 0;
1752 if (list_empty(&qp->rspwait))
1754 list_del_init(&qp->rspwait);
1755 qp->r_flags &= ~RVT_R_RSP_NAK;
1756 if (atomic_dec_and_test(&qp->refcount))
1761 * rc_rcv_error - process an incoming duplicate or error RC packet
1762 * @ohdr: the other headers for this packet
1763 * @data: the packet data
1764 * @qp: the QP for this packet
1765 * @opcode: the opcode for this packet
1766 * @psn: the packet sequence number for this packet
1767 * @diff: the difference between the PSN and the expected PSN
1769 * This is called from hfi1_rc_rcv() to process an unexpected
1770 * incoming RC packet for the given QP.
1771 * Called at interrupt level.
1772 * Return 1 if no more processing is needed; otherwise return 0 to
1773 * schedule a response to be sent.
1775 static noinline int rc_rcv_error(struct hfi1_other_headers *ohdr, void *data,
1776 struct rvt_qp *qp, u32 opcode, u32 psn, int diff,
1777 struct hfi1_ctxtdata *rcd)
1779 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1780 struct rvt_ack_entry *e;
1781 unsigned long flags;
1785 trace_hfi1_rc_rcv_error(qp, psn);
1788 * Packet sequence error.
1789 * A NAK will ACK earlier sends and RDMA writes.
1790 * Don't queue the NAK if we already sent one.
1792 if (!qp->r_nak_state) {
1793 ibp->rvp.n_rc_seqnak++;
1794 qp->r_nak_state = IB_NAK_PSN_ERROR;
1795 /* Use the expected PSN. */
1796 qp->r_ack_psn = qp->r_psn;
1798 * Wait to send the sequence NAK until all packets
1799 * in the receive queue have been processed.
1800 * Otherwise, we end up propagating congestion.
1802 rc_defered_ack(rcd, qp);
1808 * Handle a duplicate request. Don't re-execute SEND, RDMA
1809 * write or atomic op. Don't NAK errors, just silently drop
1810 * the duplicate request. Note that r_sge, r_len, and
1811 * r_rcv_len may be in use so don't modify them.
1813 * We are supposed to ACK the earliest duplicate PSN but we
1814 * can coalesce an outstanding duplicate ACK. We have to
1815 * send the earliest so that RDMA reads can be restarted at
1816 * the requester's expected PSN.
1818 * First, find where this duplicate PSN falls within the
1819 * ACKs previously sent.
1820 * old_req is true if there is an older response that is scheduled
1821 * to be sent before sending this one.
1825 ibp->rvp.n_rc_dupreq++;
1827 spin_lock_irqsave(&qp->s_lock, flags);
1829 for (i = qp->r_head_ack_queue; ; i = prev) {
1830 if (i == qp->s_tail_ack_queue)
1835 prev = HFI1_MAX_RDMA_ATOMIC;
1836 if (prev == qp->r_head_ack_queue) {
1840 e = &qp->s_ack_queue[prev];
1845 if (cmp_psn(psn, e->psn) >= 0) {
1846 if (prev == qp->s_tail_ack_queue &&
1847 cmp_psn(psn, e->lpsn) <= 0)
1853 case OP(RDMA_READ_REQUEST): {
1854 struct ib_reth *reth;
1859 * If we didn't find the RDMA read request in the ack queue,
1860 * we can ignore this request.
1862 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1864 /* RETH comes after BTH */
1865 reth = &ohdr->u.rc.reth;
1867 * Address range must be a subset of the original
1868 * request and start on pmtu boundaries.
1869 * We reuse the old ack_queue slot since the requester
1870 * should not back up and request an earlier PSN for the
1873 offset = delta_psn(psn, e->psn) * qp->pmtu;
1874 len = be32_to_cpu(reth->length);
1875 if (unlikely(offset + len != e->rdma_sge.sge_length))
1877 if (e->rdma_sge.mr) {
1878 rvt_put_mr(e->rdma_sge.mr);
1879 e->rdma_sge.mr = NULL;
1882 u32 rkey = be32_to_cpu(reth->rkey);
1883 u64 vaddr = be64_to_cpu(reth->vaddr);
1886 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1887 IB_ACCESS_REMOTE_READ);
1891 e->rdma_sge.vaddr = NULL;
1892 e->rdma_sge.length = 0;
1893 e->rdma_sge.sge_length = 0;
1898 qp->s_tail_ack_queue = prev;
1902 case OP(COMPARE_SWAP):
1903 case OP(FETCH_ADD): {
1905 * If we didn't find the atomic request in the ack queue
1906 * or the send tasklet is already backed up to send an
1907 * earlier entry, we can ignore this request.
1909 if (!e || e->opcode != (u8) opcode || old_req)
1911 qp->s_tail_ack_queue = prev;
1917 * Ignore this operation if it doesn't request an ACK
1918 * or an earlier RDMA read or atomic is going to be resent.
1920 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1923 * Resend the most recent ACK if this request is
1924 * after all the previous RDMA reads and atomics.
1926 if (i == qp->r_head_ack_queue) {
1927 spin_unlock_irqrestore(&qp->s_lock, flags);
1928 qp->r_nak_state = 0;
1929 qp->r_ack_psn = qp->r_psn - 1;
1934 * Resend the RDMA read or atomic op which
1935 * ACKs this duplicate request.
1937 qp->s_tail_ack_queue = i;
1940 qp->s_ack_state = OP(ACKNOWLEDGE);
1941 qp->s_flags |= RVT_S_RESP_PENDING;
1942 qp->r_nak_state = 0;
1943 hfi1_schedule_send(qp);
1946 spin_unlock_irqrestore(&qp->s_lock, flags);
1954 void hfi1_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
1956 unsigned long flags;
1959 spin_lock_irqsave(&qp->s_lock, flags);
1960 lastwqe = rvt_error_qp(qp, err);
1961 spin_unlock_irqrestore(&qp->s_lock, flags);
1966 ev.device = qp->ibqp.device;
1967 ev.element.qp = &qp->ibqp;
1968 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1969 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1973 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
1978 if (next > HFI1_MAX_RDMA_ATOMIC)
1980 qp->s_tail_ack_queue = next;
1981 qp->s_ack_state = OP(ACKNOWLEDGE);
1984 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
1985 u32 lqpn, u32 rqpn, u8 svc_type)
1987 struct opa_hfi1_cong_log_event_internal *cc_event;
1988 unsigned long flags;
1990 if (sl >= OPA_MAX_SLS)
1993 spin_lock_irqsave(&ppd->cc_log_lock, flags);
1995 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
1996 ppd->threshold_event_counter++;
1998 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
1999 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
2000 ppd->cc_log_idx = 0;
2001 cc_event->lqpn = lqpn & RVT_QPN_MASK;
2002 cc_event->rqpn = rqpn & RVT_QPN_MASK;
2004 cc_event->svc_type = svc_type;
2005 cc_event->rlid = rlid;
2006 /* keep timestamp in units of 1.024 usec */
2007 cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
2009 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2012 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
2013 u32 rqpn, u8 svc_type)
2015 struct cca_timer *cca_timer;
2016 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2017 u8 trigger_threshold;
2018 struct cc_state *cc_state;
2019 unsigned long flags;
2021 if (sl >= OPA_MAX_SLS)
2024 cca_timer = &ppd->cca_timer[sl];
2026 cc_state = get_cc_state(ppd);
2028 if (cc_state == NULL)
2032 * 1) increase CCTI (for this SL)
2033 * 2) select IPG (i.e., call set_link_ipg())
2036 ccti_limit = cc_state->cct.ccti_limit;
2037 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2038 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2040 cc_state->cong_setting.entries[sl].trigger_threshold;
2042 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2044 if (cca_timer->ccti < ccti_limit) {
2045 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2046 cca_timer->ccti += ccti_incr;
2048 cca_timer->ccti = ccti_limit;
2052 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2054 ccti = cca_timer->ccti;
2056 if (!hrtimer_active(&cca_timer->hrtimer)) {
2057 /* ccti_timer is in units of 1.024 usec */
2058 unsigned long nsec = 1024 * ccti_timer;
2060 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2064 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2065 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2069 * hfi1_rc_rcv - process an incoming RC packet
2070 * @rcd: the context pointer
2071 * @hdr: the header of this packet
2072 * @rcv_flags: flags relevant to rcv processing
2073 * @data: the packet data
2074 * @tlen: the packet length
2075 * @qp: the QP for this packet
2077 * This is called from qp_rcv() to process an incoming RC packet
2079 * May be called at interrupt level.
2081 void hfi1_rc_rcv(struct hfi1_packet *packet)
2083 struct hfi1_ctxtdata *rcd = packet->rcd;
2084 struct hfi1_ib_header *hdr = packet->hdr;
2085 u32 rcv_flags = packet->rcv_flags;
2086 void *data = packet->ebuf;
2087 u32 tlen = packet->tlen;
2088 struct rvt_qp *qp = packet->qp;
2089 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2090 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2091 struct hfi1_other_headers *ohdr = packet->ohdr;
2093 u32 hdrsize = packet->hlen;
2097 u32 pmtu = qp->pmtu;
2099 struct ib_reth *reth;
2100 unsigned long flags;
2102 int ret, is_fecn = 0;
2105 bth0 = be32_to_cpu(ohdr->bth[0]);
2106 if (hfi1_ruc_check_hdr(ibp, hdr, rcv_flags & HFI1_HAS_GRH, qp, bth0))
2109 bth1 = be32_to_cpu(ohdr->bth[1]);
2110 if (unlikely(bth1 & (HFI1_BECN_SMASK | HFI1_FECN_SMASK))) {
2111 if (bth1 & HFI1_BECN_SMASK) {
2112 u16 rlid = qp->remote_ah_attr.dlid;
2115 lqpn = qp->ibqp.qp_num;
2116 rqpn = qp->remote_qpn;
2119 qp->remote_ah_attr.sl,
2123 is_fecn = bth1 & HFI1_FECN_SMASK;
2126 psn = be32_to_cpu(ohdr->bth[2]);
2127 opcode = (bth0 >> 24) & 0xff;
2130 * Process responses (ACKs) before anything else. Note that the
2131 * packet sequence number will be for something in the send work
2132 * queue rather than the expected receive packet sequence number.
2133 * In other words, this QP is the requester.
2135 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2136 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2137 rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
2138 hdrsize, pmtu, rcd);
2144 /* Compute 24 bits worth of difference. */
2145 diff = delta_psn(psn, qp->r_psn);
2146 if (unlikely(diff)) {
2147 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2152 /* Check for opcode sequence errors. */
2153 switch (qp->r_state) {
2154 case OP(SEND_FIRST):
2155 case OP(SEND_MIDDLE):
2156 if (opcode == OP(SEND_MIDDLE) ||
2157 opcode == OP(SEND_LAST) ||
2158 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
2162 case OP(RDMA_WRITE_FIRST):
2163 case OP(RDMA_WRITE_MIDDLE):
2164 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2165 opcode == OP(RDMA_WRITE_LAST) ||
2166 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2171 if (opcode == OP(SEND_MIDDLE) ||
2172 opcode == OP(SEND_LAST) ||
2173 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2174 opcode == OP(RDMA_WRITE_MIDDLE) ||
2175 opcode == OP(RDMA_WRITE_LAST) ||
2176 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2179 * Note that it is up to the requester to not send a new
2180 * RDMA read or atomic operation before receiving an ACK
2181 * for the previous operation.
2186 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2189 /* OK, process the packet. */
2191 case OP(SEND_FIRST):
2192 ret = hfi1_rvt_get_rwqe(qp, 0);
2199 case OP(SEND_MIDDLE):
2200 case OP(RDMA_WRITE_MIDDLE):
2202 /* Check for invalid length PMTU or posted rwqe len. */
2203 if (unlikely(tlen != (hdrsize + pmtu + 4)))
2205 qp->r_rcv_len += pmtu;
2206 if (unlikely(qp->r_rcv_len > qp->r_len))
2208 hfi1_copy_sge(&qp->r_sge, data, pmtu, 1, 0);
2211 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2213 ret = hfi1_rvt_get_rwqe(qp, 1);
2221 case OP(SEND_ONLY_WITH_IMMEDIATE):
2222 ret = hfi1_rvt_get_rwqe(qp, 0);
2228 if (opcode == OP(SEND_ONLY))
2229 goto no_immediate_data;
2230 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2231 case OP(SEND_LAST_WITH_IMMEDIATE):
2233 wc.ex.imm_data = ohdr->u.imm_data;
2234 wc.wc_flags = IB_WC_WITH_IMM;
2236 case OP(RDMA_WRITE_LAST):
2237 copy_last = ibpd_to_rvtpd(qp->ibqp.pd)->user;
2244 /* Get the number of bytes the message was padded by. */
2245 pad = (bth0 >> 20) & 3;
2246 /* Check for invalid length. */
2247 /* LAST len should be >= 1 */
2248 if (unlikely(tlen < (hdrsize + pad + 4)))
2250 /* Don't count the CRC. */
2251 tlen -= (hdrsize + pad + 4);
2252 wc.byte_len = tlen + qp->r_rcv_len;
2253 if (unlikely(wc.byte_len > qp->r_len))
2255 hfi1_copy_sge(&qp->r_sge, data, tlen, 1, copy_last);
2256 rvt_put_ss(&qp->r_sge);
2258 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2260 wc.wr_id = qp->r_wr_id;
2261 wc.status = IB_WC_SUCCESS;
2262 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2263 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2264 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2266 wc.opcode = IB_WC_RECV;
2268 wc.src_qp = qp->remote_qpn;
2269 wc.slid = qp->remote_ah_attr.dlid;
2271 * It seems that IB mandates the presence of an SL in a
2272 * work completion only for the UD transport (see section
2273 * 11.4.2 of IBTA Vol. 1).
2275 * However, the way the SL is chosen below is consistent
2276 * with the way that IB/qib works and is trying avoid
2277 * introducing incompatibilities.
2279 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2281 wc.sl = qp->remote_ah_attr.sl;
2282 /* zero fields that are N/A */
2285 wc.dlid_path_bits = 0;
2287 /* Signal completion event if the solicited bit is set. */
2288 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
2289 (bth0 & IB_BTH_SOLICITED) != 0);
2292 case OP(RDMA_WRITE_ONLY):
2295 case OP(RDMA_WRITE_FIRST):
2296 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2297 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2300 reth = &ohdr->u.rc.reth;
2301 qp->r_len = be32_to_cpu(reth->length);
2303 qp->r_sge.sg_list = NULL;
2304 if (qp->r_len != 0) {
2305 u32 rkey = be32_to_cpu(reth->rkey);
2306 u64 vaddr = be64_to_cpu(reth->vaddr);
2309 /* Check rkey & NAK */
2310 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2311 rkey, IB_ACCESS_REMOTE_WRITE);
2314 qp->r_sge.num_sge = 1;
2316 qp->r_sge.num_sge = 0;
2317 qp->r_sge.sge.mr = NULL;
2318 qp->r_sge.sge.vaddr = NULL;
2319 qp->r_sge.sge.length = 0;
2320 qp->r_sge.sge.sge_length = 0;
2322 if (opcode == OP(RDMA_WRITE_FIRST))
2324 else if (opcode == OP(RDMA_WRITE_ONLY))
2325 goto no_immediate_data;
2326 ret = hfi1_rvt_get_rwqe(qp, 1);
2331 wc.ex.imm_data = ohdr->u.rc.imm_data;
2332 wc.wc_flags = IB_WC_WITH_IMM;
2335 case OP(RDMA_READ_REQUEST): {
2336 struct rvt_ack_entry *e;
2340 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2342 next = qp->r_head_ack_queue + 1;
2343 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2344 if (next > HFI1_MAX_RDMA_ATOMIC)
2346 spin_lock_irqsave(&qp->s_lock, flags);
2347 if (unlikely(next == qp->s_tail_ack_queue)) {
2348 if (!qp->s_ack_queue[next].sent)
2349 goto nack_inv_unlck;
2350 update_ack_queue(qp, next);
2352 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2353 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2354 rvt_put_mr(e->rdma_sge.mr);
2355 e->rdma_sge.mr = NULL;
2357 reth = &ohdr->u.rc.reth;
2358 len = be32_to_cpu(reth->length);
2360 u32 rkey = be32_to_cpu(reth->rkey);
2361 u64 vaddr = be64_to_cpu(reth->vaddr);
2364 /* Check rkey & NAK */
2365 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2366 rkey, IB_ACCESS_REMOTE_READ);
2368 goto nack_acc_unlck;
2370 * Update the next expected PSN. We add 1 later
2371 * below, so only add the remainder here.
2374 qp->r_psn += (len - 1) / pmtu;
2376 e->rdma_sge.mr = NULL;
2377 e->rdma_sge.vaddr = NULL;
2378 e->rdma_sge.length = 0;
2379 e->rdma_sge.sge_length = 0;
2384 e->lpsn = qp->r_psn;
2386 * We need to increment the MSN here instead of when we
2387 * finish sending the result since a duplicate request would
2388 * increment it more than once.
2392 qp->r_state = opcode;
2393 qp->r_nak_state = 0;
2394 qp->r_head_ack_queue = next;
2396 /* Schedule the send tasklet. */
2397 qp->s_flags |= RVT_S_RESP_PENDING;
2398 hfi1_schedule_send(qp);
2400 spin_unlock_irqrestore(&qp->s_lock, flags);
2406 case OP(COMPARE_SWAP):
2407 case OP(FETCH_ADD): {
2408 struct ib_atomic_eth *ateth;
2409 struct rvt_ack_entry *e;
2416 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2418 next = qp->r_head_ack_queue + 1;
2419 if (next > HFI1_MAX_RDMA_ATOMIC)
2421 spin_lock_irqsave(&qp->s_lock, flags);
2422 if (unlikely(next == qp->s_tail_ack_queue)) {
2423 if (!qp->s_ack_queue[next].sent)
2424 goto nack_inv_unlck;
2425 update_ack_queue(qp, next);
2427 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2428 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2429 rvt_put_mr(e->rdma_sge.mr);
2430 e->rdma_sge.mr = NULL;
2432 ateth = &ohdr->u.atomic_eth;
2433 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
2434 be32_to_cpu(ateth->vaddr[1]);
2435 if (unlikely(vaddr & (sizeof(u64) - 1)))
2436 goto nack_inv_unlck;
2437 rkey = be32_to_cpu(ateth->rkey);
2438 /* Check rkey & NAK */
2439 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2441 IB_ACCESS_REMOTE_ATOMIC)))
2442 goto nack_acc_unlck;
2443 /* Perform atomic OP and save result. */
2444 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2445 sdata = be64_to_cpu(ateth->swap_data);
2446 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2447 (u64) atomic64_add_return(sdata, maddr) - sdata :
2448 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2449 be64_to_cpu(ateth->compare_data),
2451 rvt_put_mr(qp->r_sge.sge.mr);
2452 qp->r_sge.num_sge = 0;
2459 qp->r_state = opcode;
2460 qp->r_nak_state = 0;
2461 qp->r_head_ack_queue = next;
2463 /* Schedule the send tasklet. */
2464 qp->s_flags |= RVT_S_RESP_PENDING;
2465 hfi1_schedule_send(qp);
2467 spin_unlock_irqrestore(&qp->s_lock, flags);
2474 /* NAK unknown opcodes. */
2478 qp->r_state = opcode;
2479 qp->r_ack_psn = psn;
2480 qp->r_nak_state = 0;
2481 /* Send an ACK if requested or required. */
2482 if (psn & IB_BTH_REQ_ACK) {
2483 struct hfi1_qp_priv *priv = qp->priv;
2485 if (packet->numpkt == 0) {
2489 if (priv->r_adefered >= HFI1_PSN_CREDIT) {
2493 if (unlikely(is_fecn)) {
2498 rc_defered_ack(rcd, qp);
2503 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2504 qp->r_ack_psn = qp->r_psn;
2505 /* Queue RNR NAK for later */
2506 rc_defered_ack(rcd, qp);
2510 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2511 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2512 qp->r_ack_psn = qp->r_psn;
2513 /* Queue NAK for later */
2514 rc_defered_ack(rcd, qp);
2518 spin_unlock_irqrestore(&qp->s_lock, flags);
2520 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2521 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2522 qp->r_ack_psn = qp->r_psn;
2523 /* Queue NAK for later */
2524 rc_defered_ack(rcd, qp);
2528 spin_unlock_irqrestore(&qp->s_lock, flags);
2530 hfi1_rc_error(qp, IB_WC_LOC_PROT_ERR);
2531 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2532 qp->r_ack_psn = qp->r_psn;
2534 hfi1_send_rc_ack(rcd, qp, is_fecn);
2537 void hfi1_rc_hdrerr(
2538 struct hfi1_ctxtdata *rcd,
2539 struct hfi1_ib_header *hdr,
2543 int has_grh = rcv_flags & HFI1_HAS_GRH;
2544 struct hfi1_other_headers *ohdr;
2545 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
2553 ohdr = &hdr->u.l.oth;
2555 bth0 = be32_to_cpu(ohdr->bth[0]);
2556 if (hfi1_ruc_check_hdr(ibp, hdr, has_grh, qp, bth0))
2559 psn = be32_to_cpu(ohdr->bth[2]);
2560 opcode = (bth0 >> 24) & 0xff;
2562 /* Only deal with RDMA Writes for now */
2563 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
2564 diff = delta_psn(psn, qp->r_psn);
2565 if (!qp->r_nak_state && diff >= 0) {
2566 ibp->rvp.n_rc_seqnak++;
2567 qp->r_nak_state = IB_NAK_PSN_ERROR;
2568 /* Use the expected PSN. */
2569 qp->r_ack_psn = qp->r_psn;
2571 * Wait to send the sequence
2572 * NAK until all packets
2573 * in the receive queue have
2575 * Otherwise, we end up
2576 * propagating congestion.
2578 rc_defered_ack(rcd, qp);
2579 } /* Out of sequence NAK */
2580 } /* QP Request NAKs */