2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
55 #include "verbs_txreq.h"
58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
59 u8 *prev_ack, bool *scheduled)
60 __must_hold(&qp->s_lock)
62 struct rvt_ack_entry *e = NULL;
66 for (i = qp->r_head_ack_queue; ; i = p) {
67 if (i == qp->s_tail_ack_queue)
72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
73 if (p == qp->r_head_ack_queue) {
77 e = &qp->s_ack_queue[p];
82 if (cmp_psn(psn, e->psn) >= 0) {
83 if (p == qp->s_tail_ack_queue &&
84 cmp_psn(psn, e->lpsn) <= 0)
99 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
100 * @dev: the device for this QP
101 * @qp: a pointer to the QP
102 * @ohdr: a pointer to the IB header being constructed
103 * @ps: the xmit packet state
105 * Return 1 if constructed; otherwise, return 0.
106 * Note that we are in the responder's side of the QP context.
107 * Note the QP s_lock must be held.
109 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
110 struct ib_other_headers *ohdr,
111 struct hfi1_pkt_state *ps)
113 struct rvt_ack_entry *e;
116 u32 bth0 = 0, bth2 = 0;
117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
120 struct hfi1_qp_priv *qpriv = qp->priv;
123 u8 next = qp->s_tail_ack_queue;
124 struct tid_rdma_request *req;
126 trace_hfi1_rsp_make_rc_ack(qp, 0);
127 lockdep_assert_held(&qp->s_lock);
128 /* Don't send an ACK if we aren't supposed to. */
129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
132 if (qpriv->hdr_type == HFI1_PKT_TYPE_9B)
133 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
136 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
139 switch (qp->s_ack_state) {
140 case OP(RDMA_READ_RESPONSE_LAST):
141 case OP(RDMA_READ_RESPONSE_ONLY):
142 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
143 release_rdma_sge_mr(e);
145 case OP(ATOMIC_ACKNOWLEDGE):
147 * We can increment the tail pointer now that the last
148 * response has been sent instead of only being
151 if (++next > rvt_size_atomic(&dev->rdi))
154 * Only advance the s_acked_ack_queue pointer if there
155 * have been no TID RDMA requests.
157 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
158 if (e->opcode != TID_OP(WRITE_REQ) &&
159 qp->s_acked_ack_queue == qp->s_tail_ack_queue)
160 qp->s_acked_ack_queue = next;
161 qp->s_tail_ack_queue = next;
162 trace_hfi1_rsp_make_rc_ack(qp, e->psn);
165 case OP(ACKNOWLEDGE):
166 /* Check for no next entry in the queue. */
167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
168 if (qp->s_flags & RVT_S_ACK_PENDING)
173 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
174 /* Check for tid write fence */
175 if ((qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK) ||
176 hfi1_tid_rdma_ack_interlock(qp, e)) {
177 iowait_set_flag(&qpriv->s_iowait, IOWAIT_PENDING_IB);
180 if (e->opcode == OP(RDMA_READ_REQUEST)) {
182 * If a RDMA read response is being resent and
183 * we haven't seen the duplicate request yet,
184 * then stop sending the remaining responses the
185 * responder has seen until the requester re-sends it.
187 len = e->rdma_sge.sge_length;
188 if (len && !e->rdma_sge.mr) {
189 if (qp->s_acked_ack_queue ==
190 qp->s_tail_ack_queue)
191 qp->s_acked_ack_queue =
192 qp->r_head_ack_queue;
193 qp->s_tail_ack_queue = qp->r_head_ack_queue;
196 /* Copy SGE state in case we need to resend */
197 ps->s_txreq->mr = e->rdma_sge.mr;
199 rvt_get_mr(ps->s_txreq->mr);
200 qp->s_ack_rdma_sge.sge = e->rdma_sge;
201 qp->s_ack_rdma_sge.num_sge = 1;
202 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
210 ohdr->u.aeth = rvt_compute_aeth(qp);
212 qp->s_ack_rdma_psn = e->psn;
213 bth2 = mask_psn(qp->s_ack_rdma_psn++);
214 } else if (e->opcode == TID_OP(WRITE_REQ)) {
216 * If a TID RDMA WRITE RESP is being resent, we have to
217 * wait for the actual request. All requests that are to
218 * be resent will have their state set to
219 * TID_REQUEST_RESEND. When the new request arrives, the
220 * state will be changed to TID_REQUEST_RESEND_ACTIVE.
222 req = ack_to_tid_req(e);
223 if (req->state == TID_REQUEST_RESEND ||
224 req->state == TID_REQUEST_INIT_RESEND)
226 qp->s_ack_state = TID_OP(WRITE_RESP);
227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
229 } else if (e->opcode == TID_OP(READ_REQ)) {
231 * If a TID RDMA read response is being resent and
232 * we haven't seen the duplicate request yet,
233 * then stop sending the remaining responses the
234 * responder has seen until the requester re-sends it.
236 len = e->rdma_sge.sge_length;
237 if (len && !e->rdma_sge.mr) {
238 if (qp->s_acked_ack_queue ==
239 qp->s_tail_ack_queue)
240 qp->s_acked_ack_queue =
241 qp->r_head_ack_queue;
242 qp->s_tail_ack_queue = qp->r_head_ack_queue;
245 /* Copy SGE state in case we need to resend */
246 ps->s_txreq->mr = e->rdma_sge.mr;
248 rvt_get_mr(ps->s_txreq->mr);
249 qp->s_ack_rdma_sge.sge = e->rdma_sge;
250 qp->s_ack_rdma_sge.num_sge = 1;
251 qp->s_ack_state = TID_OP(READ_RESP);
254 /* COMPARE_SWAP or FETCH_ADD */
255 ps->s_txreq->ss = NULL;
257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
258 ohdr->u.at.aeth = rvt_compute_aeth(qp);
259 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
260 hwords += sizeof(ohdr->u.at) / sizeof(u32);
261 bth2 = mask_psn(e->psn);
264 trace_hfi1_tid_write_rsp_make_rc_ack(qp);
265 bth0 = qp->s_ack_state << 24;
268 case OP(RDMA_READ_RESPONSE_FIRST):
269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
271 case OP(RDMA_READ_RESPONSE_MIDDLE):
272 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
275 rvt_get_mr(ps->s_txreq->mr);
276 len = qp->s_ack_rdma_sge.sge.sge_length;
279 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
281 ohdr->u.aeth = rvt_compute_aeth(qp);
283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
284 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
287 bth0 = qp->s_ack_state << 24;
288 bth2 = mask_psn(qp->s_ack_rdma_psn++);
291 case TID_OP(WRITE_RESP):
294 * 1. Check if RVT_S_ACK_PENDING is set. If yes,
296 * 2. Attempt to allocate TID resources.
297 * 3. Remove RVT_S_RESP_PENDING flags from s_flags
298 * 4. If resources not available:
299 * 4.1 Set RVT_S_WAIT_TID_SPACE
300 * 4.2 Queue QP on RCD TID queue
301 * 4.3 Put QP on iowait list.
302 * 4.4 Build IB RNR NAK with appropriate timeout value
303 * 4.5 Return indication progress made.
304 * 5. If resources are available:
305 * 5.1 Program HW flow CSRs
306 * 5.2 Build TID RDMA WRITE RESP packet
307 * 5.3 If more resources needed, do 2.1 - 2.3.
308 * 5.4 Wake up next QP on RCD TID queue.
309 * 5.5 Return indication progress made.
312 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
313 req = ack_to_tid_req(e);
316 * Send scheduled RNR NAK's. RNR NAK's need to be sent at
317 * segment boundaries, not at request boundaries. Don't change
318 * s_ack_state because we are still in the middle of a request
320 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND &&
321 qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
322 req->cur_seg == req->alloc_seg) {
323 qpriv->rnr_nak_state = TID_RNR_NAK_SENT;
324 goto normal_no_state;
327 bth2 = mask_psn(qp->s_ack_rdma_psn);
328 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
335 bth0 = qp->s_ack_state << 24;
336 qp->s_ack_rdma_psn++;
337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
339 if (req->cur_seg != req->total_segs)
343 /* Do not free e->rdma_sge until all data are received */
344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
347 case TID_OP(READ_RESP):
349 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
350 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
351 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
360 * Increment qp->s_tail_ack_queue through s_ack_state
363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
366 case TID_OP(READ_REQ):
372 * Send a regular ACK.
373 * Set the s_ack_state so we wait until after sending
374 * the ACK before setting s_ack_state to ACKNOWLEDGE
377 qp->s_ack_state = OP(SEND_ONLY);
381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
383 IB_AETH_CREDIT_SHIFT));
385 ohdr->u.aeth = rvt_compute_aeth(qp);
388 bth0 = OP(ACKNOWLEDGE) << 24;
389 bth2 = mask_psn(qp->s_ack_psn);
390 qp->s_flags &= ~RVT_S_ACK_PENDING;
391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
392 ps->s_txreq->ss = NULL;
394 qp->s_rdma_ack_cnt++;
395 ps->s_txreq->sde = qpriv->s_sde;
396 ps->s_txreq->s_cur_size = len;
397 ps->s_txreq->hdr_dwords = hwords;
398 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
401 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
402 spin_lock_irqsave(&qp->r_lock, ps->flags);
403 spin_lock(&qp->s_lock);
404 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
405 spin_unlock(&qp->s_lock);
406 spin_unlock_irqrestore(&qp->r_lock, ps->flags);
407 spin_lock_irqsave(&qp->s_lock, ps->flags);
409 qp->s_ack_state = OP(ACKNOWLEDGE);
411 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
415 qp->s_flags &= ~(RVT_S_RESP_PENDING
422 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
423 * @qp: a pointer to the QP
425 * Assumes s_lock is held.
427 * Return 1 if constructed; otherwise, return 0.
429 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
431 struct hfi1_qp_priv *priv = qp->priv;
432 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
433 struct ib_other_headers *ohdr;
434 struct rvt_sge_state *ss = NULL;
435 struct rvt_swqe *wqe;
436 struct hfi1_swqe_priv *wpriv;
437 struct tid_rdma_request *req = NULL;
438 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
441 u32 bth0 = 0, bth2 = 0;
442 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
447 struct tid_rdma_flow *flow = NULL;
448 struct tid_rdma_params *remote;
450 trace_hfi1_sender_make_rc_req(qp);
451 lockdep_assert_held(&qp->s_lock);
452 ps->s_txreq = get_txreq(ps->dev, qp);
456 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
457 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
459 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
460 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
462 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
464 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
466 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
467 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
468 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
470 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
473 /* Sending responses has higher priority over sending requests. */
474 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
475 make_rc_ack(dev, qp, ohdr, ps))
478 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
479 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
481 /* We are in the error state, flush the work request. */
482 if (qp->s_last == READ_ONCE(qp->s_head))
484 /* If DMAs are in progress, we can't flush immediately. */
485 if (iowait_sdma_pending(&priv->s_iowait)) {
486 qp->s_flags |= RVT_S_WAIT_DMA;
490 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
492 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
493 /* will get called again */
497 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
500 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
501 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
502 qp->s_flags |= RVT_S_WAIT_PSN;
505 qp->s_sending_psn = qp->s_psn;
506 qp->s_sending_hpsn = qp->s_psn - 1;
509 /* Send a request. */
510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
512 switch (qp->s_state) {
514 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
517 * Resend an old request or start a new one.
519 * We keep track of the current SWQE so that
520 * we don't reset the "furthest progress" state
521 * if we need to back up.
524 if (qp->s_cur == qp->s_tail) {
525 /* Check if send work queue is empty. */
526 if (qp->s_tail == READ_ONCE(qp->s_head)) {
531 * If a fence is requested, wait for previous
532 * RDMA read and atomic operations to finish.
533 * However, there is no need to guard against
534 * TID RDMA READ after TID RDMA READ.
536 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
537 qp->s_num_rd_atomic &&
538 (wqe->wr.opcode != IB_WR_TID_RDMA_READ ||
539 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
540 qp->s_flags |= RVT_S_WAIT_FENCE;
544 * Local operations are processed immediately
545 * after all prior requests have completed
547 if (wqe->wr.opcode == IB_WR_REG_MR ||
548 wqe->wr.opcode == IB_WR_LOCAL_INV) {
552 if (qp->s_last != qp->s_cur)
554 if (++qp->s_cur == qp->s_size)
556 if (++qp->s_tail == qp->s_size)
558 if (!(wqe->wr.send_flags &
559 RVT_SEND_COMPLETION_ONLY)) {
560 err = rvt_invalidate_rkey(
562 wqe->wr.ex.invalidate_rkey);
565 rvt_send_complete(qp, wqe,
566 err ? IB_WC_LOC_PROT_ERR
569 atomic_dec(&qp->local_ops_pending);
574 qp->s_psn = wqe->psn;
577 * Note that we have to be careful not to modify the
578 * original work request since we may need to resend
583 bth2 = mask_psn(qp->s_psn);
586 * Interlock between various IB requests and TID RDMA
589 if ((priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) ||
590 hfi1_tid_rdma_wqe_interlock(qp, wqe))
593 switch (wqe->wr.opcode) {
595 case IB_WR_SEND_WITH_IMM:
596 case IB_WR_SEND_WITH_INV:
597 /* If no credit, return. */
598 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
599 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
600 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
604 qp->s_state = OP(SEND_FIRST);
608 if (wqe->wr.opcode == IB_WR_SEND) {
609 qp->s_state = OP(SEND_ONLY);
610 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
611 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
612 /* Immediate data comes after the BTH */
613 ohdr->u.imm_data = wqe->wr.ex.imm_data;
616 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
617 /* Invalidate rkey comes after the BTH */
618 ohdr->u.ieth = cpu_to_be32(
619 wqe->wr.ex.invalidate_rkey);
622 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
623 bth0 |= IB_BTH_SOLICITED;
624 bth2 |= IB_BTH_REQ_ACK;
625 if (++qp->s_cur == qp->s_size)
629 case IB_WR_RDMA_WRITE:
630 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
632 goto no_flow_control;
633 case IB_WR_RDMA_WRITE_WITH_IMM:
634 /* If no credit, return. */
635 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
636 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
637 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
642 wqe->rdma_wr.remote_addr,
644 ohdr->u.rc.reth.rkey =
645 cpu_to_be32(wqe->rdma_wr.rkey);
646 ohdr->u.rc.reth.length = cpu_to_be32(len);
647 hwords += sizeof(struct ib_reth) / sizeof(u32);
649 qp->s_state = OP(RDMA_WRITE_FIRST);
653 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
654 qp->s_state = OP(RDMA_WRITE_ONLY);
657 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
658 /* Immediate data comes after RETH */
659 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
661 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
662 bth0 |= IB_BTH_SOLICITED;
664 bth2 |= IB_BTH_REQ_ACK;
665 if (++qp->s_cur == qp->s_size)
669 case IB_WR_TID_RDMA_WRITE:
672 * Limit the number of TID RDMA WRITE requests.
674 if (atomic_read(&priv->n_tid_requests) >=
675 HFI1_TID_RDMA_WRITE_CNT)
678 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
682 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
686 if (priv->s_tid_cur == HFI1_QP_WQE_INVALID) {
687 priv->s_tid_cur = qp->s_cur;
688 if (priv->s_tid_tail == HFI1_QP_WQE_INVALID) {
689 priv->s_tid_tail = qp->s_cur;
690 priv->s_state = TID_OP(WRITE_RESP);
692 } else if (priv->s_tid_cur == priv->s_tid_head) {
693 struct rvt_swqe *__w;
694 struct tid_rdma_request *__r;
696 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
697 __r = wqe_to_tid_req(__w);
700 * The s_tid_cur pointer is advanced to s_cur if
701 * any of the following conditions about the WQE
702 * to which s_ti_cur currently points to are
704 * 1. The request is not a TID RDMA WRITE
706 * 2. The request is in the INACTIVE or
707 * COMPLETE states (TID RDMA READ requests
708 * stay at INACTIVE and TID RDMA WRITE
709 * transition to COMPLETE when done),
710 * 3. The request is in the ACTIVE or SYNC
711 * state and the number of completed
712 * segments is equal to the total segment
714 * (If ACTIVE, the request is waiting for
715 * ACKs. If SYNC, the request has not
716 * received any responses because it's
717 * waiting on a sync point.)
719 if (__w->wr.opcode != IB_WR_TID_RDMA_WRITE ||
720 __r->state == TID_REQUEST_INACTIVE ||
721 __r->state == TID_REQUEST_COMPLETE ||
722 ((__r->state == TID_REQUEST_ACTIVE ||
723 __r->state == TID_REQUEST_SYNC) &&
724 __r->comp_seg == __r->total_segs)) {
725 if (priv->s_tid_tail ==
728 TID_OP(WRITE_DATA_LAST)) {
729 priv->s_tid_tail = qp->s_cur;
733 priv->s_tid_cur = qp->s_cur;
736 * A corner case: when the last TID RDMA WRITE
737 * request was completed, s_tid_head,
738 * s_tid_cur, and s_tid_tail all point to the
739 * same location. Other requests are posted and
740 * s_cur wraps around to the same location,
741 * where a new TID RDMA WRITE is posted. In
742 * this case, none of the indices need to be
743 * updated. However, the priv->s_state should.
745 if (priv->s_tid_tail == qp->s_cur &&
746 priv->s_state == TID_OP(WRITE_DATA_LAST))
747 priv->s_state = TID_OP(WRITE_RESP);
749 req = wqe_to_tid_req(wqe);
751 priv->s_tid_head = qp->s_cur;
752 priv->pending_tid_w_resp += req->total_segs;
753 atomic_inc(&priv->n_tid_requests);
754 atomic_dec(&priv->n_requests);
756 req->state = TID_REQUEST_RESEND;
757 req->comp_seg = delta_psn(bth2, wqe->psn);
759 * Pull back any segments since we are going
760 * to re-receive them.
762 req->setup_head = req->clear_tail;
763 priv->pending_tid_w_resp +=
764 delta_psn(wqe->lpsn, bth2) + 1;
767 trace_hfi1_tid_write_sender_make_req(qp, newreq);
768 trace_hfi1_tid_req_make_req_write(qp, newreq,
772 if (++qp->s_cur == qp->s_size)
776 case IB_WR_RDMA_READ:
778 * Don't allow more operations to be started
779 * than the QP limits allow.
781 if (qp->s_num_rd_atomic >=
782 qp->s_max_rd_atomic) {
783 qp->s_flags |= RVT_S_WAIT_RDMAR;
786 qp->s_num_rd_atomic++;
787 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
790 wqe->rdma_wr.remote_addr,
792 ohdr->u.rc.reth.rkey =
793 cpu_to_be32(wqe->rdma_wr.rkey);
794 ohdr->u.rc.reth.length = cpu_to_be32(len);
795 qp->s_state = OP(RDMA_READ_REQUEST);
796 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
799 bth2 |= IB_BTH_REQ_ACK;
800 if (++qp->s_cur == qp->s_size)
804 case IB_WR_TID_RDMA_READ:
805 trace_hfi1_tid_read_sender_make_req(qp, newreq);
807 req = wqe_to_tid_req(wqe);
808 trace_hfi1_tid_req_make_req_read(qp, newreq,
812 delta = cmp_psn(qp->s_psn, wqe->psn);
815 * Don't allow more operations to be started
816 * than the QP limits allow. We could get here under
817 * three conditions; (1) It's a new request; (2) We are
818 * sending the second or later segment of a request,
819 * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
820 * when the last segment of a previous request is
821 * received just before this; (3) We are re-sending a
824 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
825 qp->s_flags |= RVT_S_WAIT_RDMAR;
829 struct tid_rdma_flow *flow =
830 &req->flows[req->setup_head];
833 * Set up s_sge as it is needed for TID
834 * allocation. However, if the pages have been
835 * walked and mapped, skip it. An earlier try
836 * has failed to allocate the TID entries.
838 if (!flow->npagesets) {
839 qp->s_sge.sge = wqe->sg_list[0];
840 qp->s_sge.sg_list = wqe->sg_list + 1;
841 qp->s_sge.num_sge = wqe->wr.num_sge;
842 qp->s_sge.total_len = wqe->length;
843 qp->s_len = wqe->length;
845 req->clear_tail = req->setup_head;
846 req->flow_idx = req->setup_head;
847 req->state = TID_REQUEST_ACTIVE;
849 } else if (delta == 0) {
850 /* Re-send a request */
853 req->ack_pending = 0;
854 req->flow_idx = req->clear_tail;
855 req->state = TID_REQUEST_RESEND;
857 req->s_next_psn = qp->s_psn;
858 /* Read one segment at a time */
859 len = min_t(u32, req->seg_len,
860 wqe->length - req->seg_len * req->cur_seg);
861 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
865 /* Wait for TID space */
868 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
872 /* Check if this is the last segment */
873 if (req->cur_seg >= req->total_segs &&
874 ++qp->s_cur == qp->s_size)
878 case IB_WR_ATOMIC_CMP_AND_SWP:
879 case IB_WR_ATOMIC_FETCH_AND_ADD:
881 * Don't allow more operations to be started
882 * than the QP limits allow.
884 if (qp->s_num_rd_atomic >=
885 qp->s_max_rd_atomic) {
886 qp->s_flags |= RVT_S_WAIT_RDMAR;
889 qp->s_num_rd_atomic++;
893 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
895 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
896 wqe->wr.opcode == IB_WR_OPFN) {
897 qp->s_state = OP(COMPARE_SWAP);
898 put_ib_ateth_swap(wqe->atomic_wr.swap,
899 &ohdr->u.atomic_eth);
900 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
901 &ohdr->u.atomic_eth);
903 qp->s_state = OP(FETCH_ADD);
904 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
905 &ohdr->u.atomic_eth);
906 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
908 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
909 &ohdr->u.atomic_eth);
910 ohdr->u.atomic_eth.rkey = cpu_to_be32(
911 wqe->atomic_wr.rkey);
912 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
915 bth2 |= IB_BTH_REQ_ACK;
916 if (++qp->s_cur == qp->s_size)
923 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) {
924 qp->s_sge.sge = wqe->sg_list[0];
925 qp->s_sge.sg_list = wqe->sg_list + 1;
926 qp->s_sge.num_sge = wqe->wr.num_sge;
927 qp->s_sge.total_len = wqe->length;
928 qp->s_len = wqe->length;
932 if (qp->s_tail >= qp->s_size)
935 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
936 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
937 qp->s_psn = wqe->lpsn + 1;
938 else if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
939 qp->s_psn = req->s_next_psn;
944 case OP(RDMA_READ_RESPONSE_FIRST):
946 * qp->s_state is normally set to the opcode of the
947 * last packet constructed for new requests and therefore
948 * is never set to RDMA read response.
949 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
950 * thread to indicate a SEND needs to be restarted from an
951 * earlier PSN without interfering with the sending thread.
954 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
957 qp->s_state = OP(SEND_MIDDLE);
959 case OP(SEND_MIDDLE):
960 bth2 = mask_psn(qp->s_psn++);
965 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
968 if (wqe->wr.opcode == IB_WR_SEND) {
969 qp->s_state = OP(SEND_LAST);
970 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
971 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
972 /* Immediate data comes after the BTH */
973 ohdr->u.imm_data = wqe->wr.ex.imm_data;
976 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
977 /* invalidate data comes after the BTH */
978 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
981 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
982 bth0 |= IB_BTH_SOLICITED;
983 bth2 |= IB_BTH_REQ_ACK;
985 if (qp->s_cur >= qp->s_size)
989 case OP(RDMA_READ_RESPONSE_LAST):
991 * qp->s_state is normally set to the opcode of the
992 * last packet constructed for new requests and therefore
993 * is never set to RDMA read response.
994 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
995 * thread to indicate a RDMA write needs to be restarted from
996 * an earlier PSN without interfering with the sending thread.
999 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
1001 case OP(RDMA_WRITE_FIRST):
1002 qp->s_state = OP(RDMA_WRITE_MIDDLE);
1004 case OP(RDMA_WRITE_MIDDLE):
1005 bth2 = mask_psn(qp->s_psn++);
1010 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
1013 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
1014 qp->s_state = OP(RDMA_WRITE_LAST);
1016 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
1017 /* Immediate data comes after the BTH */
1018 ohdr->u.imm_data = wqe->wr.ex.imm_data;
1020 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
1021 bth0 |= IB_BTH_SOLICITED;
1023 bth2 |= IB_BTH_REQ_ACK;
1025 if (qp->s_cur >= qp->s_size)
1029 case OP(RDMA_READ_RESPONSE_MIDDLE):
1031 * qp->s_state is normally set to the opcode of the
1032 * last packet constructed for new requests and therefore
1033 * is never set to RDMA read response.
1034 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
1035 * thread to indicate a RDMA read needs to be restarted from
1036 * an earlier PSN without interfering with the sending thread.
1039 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
1041 wqe->rdma_wr.remote_addr + len,
1043 ohdr->u.rc.reth.rkey =
1044 cpu_to_be32(wqe->rdma_wr.rkey);
1045 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
1046 qp->s_state = OP(RDMA_READ_REQUEST);
1047 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
1048 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
1049 qp->s_psn = wqe->lpsn + 1;
1053 if (qp->s_cur == qp->s_size)
1057 case TID_OP(WRITE_RESP):
1059 * This value for s_state is used for restarting a TID RDMA
1060 * WRITE request. See comment in OP(RDMA_READ_RESPONSE_MIDDLE
1063 req = wqe_to_tid_req(wqe);
1064 req->state = TID_REQUEST_RESEND;
1066 remote = rcu_dereference(priv->tid_rdma.remote);
1067 req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
1068 len = wqe->length - (req->comp_seg * remote->max_len);
1071 bth2 = mask_psn(qp->s_psn);
1072 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
1074 qp->s_psn = wqe->lpsn + 1;
1076 qp->s_state = TID_OP(WRITE_REQ);
1077 priv->pending_tid_w_resp += delta_psn(wqe->lpsn, bth2) + 1;
1078 priv->s_tid_cur = qp->s_cur;
1079 if (++qp->s_cur == qp->s_size)
1081 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
1082 wqe->psn, wqe->lpsn, req);
1085 case TID_OP(READ_RESP):
1086 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
1088 /* This is used to restart a TID read request */
1089 req = wqe_to_tid_req(wqe);
1092 * Back down. The field qp->s_psn has been set to the psn with
1093 * which the request should be restart. It's OK to use division
1094 * as this is on the retry path.
1096 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
1099 * The following function need to be redefined to return the
1100 * status to make sure that we find the flow. At the same
1101 * time, we can use the req->state change to check if the
1102 * call succeeds or not.
1104 req->state = TID_REQUEST_RESEND;
1105 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
1106 if (req->state != TID_REQUEST_ACTIVE) {
1108 * Failed to find the flow. Release all allocated tid
1111 hfi1_kern_exp_rcv_clear_all(req);
1112 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1114 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
1117 req->state = TID_REQUEST_RESEND;
1118 len = min_t(u32, req->seg_len,
1119 wqe->length - req->seg_len * req->cur_seg);
1120 flow = &req->flows[req->flow_idx];
1122 req->s_next_psn = flow->flow_state.ib_lpsn + 1;
1123 delta = hfi1_build_tid_rdma_read_packet(wqe, ohdr, &bth1,
1126 /* Wait for TID space */
1131 /* Check if this is the last segment */
1132 if (req->cur_seg >= req->total_segs &&
1133 ++qp->s_cur == qp->s_size)
1135 qp->s_psn = req->s_next_psn;
1136 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1137 wqe->psn, wqe->lpsn, req);
1139 case TID_OP(READ_REQ):
1140 req = wqe_to_tid_req(wqe);
1141 delta = cmp_psn(qp->s_psn, wqe->psn);
1143 * If the current WR is not TID RDMA READ, or this is the start
1144 * of a new request, we need to change the qp->s_state so that
1145 * the request can be set up properly.
1147 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ || delta == 0 ||
1148 qp->s_cur == qp->s_tail) {
1149 qp->s_state = OP(RDMA_READ_REQUEST);
1150 if (delta == 0 || qp->s_cur == qp->s_tail)
1157 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
1158 qp->s_flags |= RVT_S_WAIT_RDMAR;
1163 /* Read one segment at a time */
1164 len = min_t(u32, req->seg_len,
1165 wqe->length - req->seg_len * req->cur_seg);
1166 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
1169 /* Wait for TID space */
1174 /* Check if this is the last segment */
1175 if (req->cur_seg >= req->total_segs &&
1176 ++qp->s_cur == qp->s_size)
1178 qp->s_psn = req->s_next_psn;
1179 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1180 wqe->psn, wqe->lpsn, req);
1183 qp->s_sending_hpsn = bth2;
1184 delta = delta_psn(bth2, wqe->psn);
1185 if (delta && delta % HFI1_PSN_CREDIT == 0 &&
1186 wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1187 bth2 |= IB_BTH_REQ_ACK;
1188 if (qp->s_flags & RVT_S_SEND_ONE) {
1189 qp->s_flags &= ~RVT_S_SEND_ONE;
1190 qp->s_flags |= RVT_S_WAIT_ACK;
1191 bth2 |= IB_BTH_REQ_ACK;
1194 ps->s_txreq->hdr_dwords = hwords;
1195 ps->s_txreq->sde = priv->s_sde;
1196 ps->s_txreq->ss = ss;
1197 ps->s_txreq->s_cur_size = len;
1198 hfi1_make_ruc_header(
1201 bth0 | (qp->s_state << 24),
1209 hfi1_put_txreq(ps->s_txreq);
1214 hfi1_put_txreq(ps->s_txreq);
1218 qp->s_flags &= ~RVT_S_BUSY;
1220 * If we didn't get a txreq, the QP will be woken up later to try
1221 * again. Set the flags to indicate which work item to wake
1224 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
1228 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
1229 struct ib_other_headers *ohdr,
1232 if (qp->r_nak_state)
1233 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
1235 IB_AETH_CREDIT_SHIFT));
1237 ohdr->u.aeth = rvt_compute_aeth(qp);
1239 ohdr->bth[0] = cpu_to_be32(bth0);
1240 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
1241 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
1244 static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1246 struct rvt_qp *qp = packet->qp;
1247 struct hfi1_ibport *ibp;
1248 unsigned long flags;
1250 spin_lock_irqsave(&qp->s_lock, flags);
1251 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1253 ibp = rcd_to_iport(packet->rcd);
1254 this_cpu_inc(*ibp->rvp.rc_qacks);
1255 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
1256 qp->s_nak_state = qp->r_nak_state;
1257 qp->s_ack_psn = qp->r_ack_psn;
1259 qp->s_flags |= RVT_S_ECN;
1261 /* Schedule the send tasklet. */
1262 hfi1_schedule_send(qp);
1264 spin_unlock_irqrestore(&qp->s_lock, flags);
1267 static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
1268 struct hfi1_opa_header *opa_hdr,
1269 u8 sc5, bool is_fecn,
1270 u64 *pbc_flags, u32 *hwords,
1273 struct rvt_qp *qp = packet->qp;
1274 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1275 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1276 struct ib_header *hdr = &opa_hdr->ibh;
1277 struct ib_other_headers *ohdr;
1278 u16 lrh0 = HFI1_LRH_BTH;
1282 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
1284 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
1287 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
1288 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1289 rdma_ah_read_grh(&qp->remote_ah_attr),
1290 *hwords - 2, SIZE_OF_CRC);
1291 ohdr = &hdr->u.l.oth;
1292 lrh0 = HFI1_LRH_GRH;
1294 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1295 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
1297 /* read pkey_index w/o lock (its atomic) */
1298 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1300 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
1301 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
1304 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
1305 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
1306 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
1308 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1309 if (qp->s_mig_state == IB_MIG_MIGRATED)
1310 bth0 |= IB_BTH_MIG_REQ;
1311 bth1 = (!!is_fecn) << IB_BECN_SHIFT;
1313 * Inline ACKs go out without the use of the Verbs send engine, so
1314 * we need to set the STL Verbs Extended bit here
1316 bth1 |= HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT;
1317 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1320 static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
1321 struct hfi1_opa_header *opa_hdr,
1322 u8 sc5, bool is_fecn,
1323 u64 *pbc_flags, u32 *hwords,
1326 struct rvt_qp *qp = packet->qp;
1327 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
1328 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1329 struct hfi1_16b_header *hdr = &opa_hdr->opah;
1330 struct ib_other_headers *ohdr;
1333 bool becn = is_fecn;
1334 u8 l4 = OPA_16B_L4_IB_LOCAL;
1337 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
1339 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
1341 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
1342 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
1344 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
1345 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
1346 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
1347 rdma_ah_read_grh(&qp->remote_ah_attr),
1348 *hwords - 4, *nwords);
1349 ohdr = &hdr->u.l.oth;
1350 l4 = OPA_16B_L4_IB_GLOBAL;
1352 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
1354 /* read pkey_index w/o lock (its atomic) */
1355 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1357 /* Convert dwords to flits */
1358 len = (*hwords + *nwords) >> 1;
1360 hfi1_make_16b_hdr(hdr, ppd->lid |
1361 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
1362 ((1 << ppd->lmc) - 1)),
1363 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
1364 16B), len, pkey, becn, 0, l4, sc5);
1366 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
1367 bth0 |= extra_bytes << 20;
1368 if (qp->s_mig_state == IB_MIG_MIGRATED)
1369 bth1 = OPA_BTH_MIG_REQ;
1370 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1373 typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
1374 struct hfi1_opa_header *opa_hdr,
1375 u8 sc5, bool is_fecn,
1376 u64 *pbc_flags, u32 *hwords,
1379 /* We support only two types - 9B and 16B for now */
1380 static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
1381 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
1382 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
1386 * hfi1_send_rc_ack - Construct an ACK packet and send it
1387 * @qp: a pointer to the QP
1389 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
1390 * Note that RDMA reads and atomics are handled in the
1391 * send side QP state and send engine.
1393 void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
1395 struct hfi1_ctxtdata *rcd = packet->rcd;
1396 struct rvt_qp *qp = packet->qp;
1397 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1398 struct hfi1_qp_priv *priv = qp->priv;
1399 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1400 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
1401 u64 pbc, pbc_flags = 0;
1405 struct pio_buf *pbuf;
1406 struct hfi1_opa_header opa_hdr;
1408 /* clear the defer count */
1411 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
1412 if (qp->s_flags & RVT_S_RESP_PENDING) {
1413 hfi1_queue_rc_ack(packet, is_fecn);
1417 /* Ensure s_rdma_ack_cnt changes are committed */
1418 if (qp->s_rdma_ack_cnt) {
1419 hfi1_queue_rc_ack(packet, is_fecn);
1423 /* Don't try to send ACKs if the link isn't ACTIVE */
1424 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
1427 /* Make the appropriate header */
1428 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
1429 &pbc_flags, &hwords, &nwords);
1431 plen = 2 /* PBC */ + hwords + nwords;
1432 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
1433 sc_to_vlt(ppd->dd, sc5), plen);
1434 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
1437 * We have no room to send at the moment. Pass
1438 * responsibility for sending the ACK to the send engine
1439 * so that when enough buffer space becomes available,
1440 * the ACK is sent ahead of other outgoing packets.
1442 hfi1_queue_rc_ack(packet, is_fecn);
1445 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1446 &opa_hdr, ib_is_sc5(sc5));
1448 /* write the pbc and data */
1449 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
1450 (priv->hdr_type == HFI1_PKT_TYPE_9B ?
1451 (void *)&opa_hdr.ibh :
1452 (void *)&opa_hdr.opah), hwords);
1457 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1459 * @psn: the packet sequence number to restart at
1462 * This is called from reset_psn() to update qp->s_num_rd_atomic
1463 * for the current wqe.
1464 * Called at interrupt level with the QP s_lock held.
1466 static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
1467 struct rvt_swqe *wqe)
1469 u32 opcode = wqe->wr.opcode;
1471 if (opcode == IB_WR_RDMA_READ ||
1472 opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1473 opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1474 qp->s_num_rd_atomic++;
1475 } else if (opcode == IB_WR_TID_RDMA_READ) {
1476 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1477 struct hfi1_qp_priv *priv = qp->priv;
1479 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1482 cur_seg = (psn - wqe->psn) / priv->pkts_ps;
1483 req->ack_pending = cur_seg - req->comp_seg;
1484 priv->pending_tid_r_segs += req->ack_pending;
1485 qp->s_num_rd_atomic += req->ack_pending;
1487 priv->pending_tid_r_segs += req->total_segs;
1488 qp->s_num_rd_atomic += req->total_segs;
1494 * reset_psn - reset the QP state to send starting from PSN
1496 * @psn: the packet sequence number to restart at
1498 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
1500 * Called at interrupt level with the QP s_lock held.
1502 static void reset_psn(struct rvt_qp *qp, u32 psn)
1504 u32 n = qp->s_acked;
1505 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
1507 struct hfi1_qp_priv *priv = qp->priv;
1509 lockdep_assert_held(&qp->s_lock);
1511 priv->pending_tid_r_segs = 0;
1512 priv->pending_tid_w_resp = 0;
1513 qp->s_num_rd_atomic = 0;
1516 * If we are starting the request from the beginning,
1517 * let the normal send code handle initialization.
1519 if (cmp_psn(psn, wqe->psn) <= 0) {
1520 qp->s_state = OP(SEND_LAST);
1523 update_num_rd_atomic(qp, psn, wqe);
1525 /* Find the work request opcode corresponding to the given PSN. */
1529 if (++n == qp->s_size)
1531 if (n == qp->s_tail)
1533 wqe = rvt_get_swqe_ptr(qp, n);
1534 diff = cmp_psn(psn, wqe->psn);
1536 /* Point wqe back to the previous one*/
1537 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1542 * If we are starting the request from the beginning,
1543 * let the normal send code handle initialization.
1546 qp->s_state = OP(SEND_LAST);
1550 update_num_rd_atomic(qp, psn, wqe);
1552 opcode = wqe->wr.opcode;
1555 * Set the state to restart in the middle of a request.
1556 * Don't change the s_sge, s_cur_sge, or s_cur_size.
1557 * See hfi1_make_rc_req().
1561 case IB_WR_SEND_WITH_IMM:
1562 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1565 case IB_WR_RDMA_WRITE:
1566 case IB_WR_RDMA_WRITE_WITH_IMM:
1567 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1570 case IB_WR_TID_RDMA_WRITE:
1571 qp->s_state = TID_OP(WRITE_RESP);
1574 case IB_WR_RDMA_READ:
1575 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1578 case IB_WR_TID_RDMA_READ:
1579 qp->s_state = TID_OP(READ_RESP);
1584 * This case shouldn't happen since its only
1587 qp->s_state = OP(SEND_LAST);
1590 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1593 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1594 * asynchronously before the send engine can get scheduled.
1595 * Doing it in hfi1_make_rc_req() is too late.
1597 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1598 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1599 qp->s_flags |= RVT_S_WAIT_PSN;
1600 qp->s_flags &= ~HFI1_S_AHG_VALID;
1601 trace_hfi1_sender_reset_psn(qp);
1605 * Back up requester to resend the last un-ACKed request.
1606 * The QP r_lock and s_lock should be held and interrupts disabled.
1608 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1610 struct hfi1_qp_priv *priv = qp->priv;
1611 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1612 struct hfi1_ibport *ibp;
1614 lockdep_assert_held(&qp->r_lock);
1615 lockdep_assert_held(&qp->s_lock);
1616 trace_hfi1_sender_restart_rc(qp);
1617 if (qp->s_retry == 0) {
1618 if (qp->s_mig_state == IB_MIG_ARMED) {
1619 hfi1_migrate_qp(qp);
1620 qp->s_retry = qp->s_retry_cnt;
1621 } else if (qp->s_last == qp->s_acked) {
1623 * We need special handling for the OPFN request WQEs as
1624 * they are not allowed to generate real user errors
1626 if (wqe->wr.opcode == IB_WR_OPFN) {
1627 struct hfi1_ibport *ibp =
1628 to_iport(qp->ibqp.device, qp->port_num);
1630 * Call opfn_conn_reply() with capcode and
1631 * remaining data as 0 to close out the
1634 opfn_conn_reply(qp, priv->opfn.curr);
1635 wqe = do_rc_completion(qp, wqe, ibp);
1636 qp->s_flags &= ~RVT_S_WAIT_ACK;
1638 trace_hfi1_tid_write_sender_restart_rc(qp, 0);
1639 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
1640 struct tid_rdma_request *req;
1642 req = wqe_to_tid_req(wqe);
1643 hfi1_kern_exp_rcv_clear_all(req);
1644 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1647 hfi1_trdma_send_complete(qp, wqe,
1648 IB_WC_RETRY_EXC_ERR);
1649 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1652 } else { /* need to handle delayed completion */
1659 ibp = to_iport(qp->ibqp.device, qp->port_num);
1660 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1661 wqe->wr.opcode == IB_WR_TID_RDMA_READ)
1662 ibp->rvp.n_rc_resends++;
1664 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1666 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1667 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1668 RVT_S_WAIT_ACK | HFI1_S_WAIT_TID_RESP);
1670 qp->s_flags |= RVT_S_SEND_ONE;
1675 * Set qp->s_sending_psn to the next PSN after the given one.
1676 * This would be psn+1 except when RDMA reads or TID RDMA ops
1679 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1681 struct rvt_swqe *wqe;
1684 lockdep_assert_held(&qp->s_lock);
1685 /* Find the work request corresponding to the given PSN. */
1687 wqe = rvt_get_swqe_ptr(qp, n);
1688 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1689 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1690 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
1691 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
1692 qp->s_sending_psn = wqe->lpsn + 1;
1694 qp->s_sending_psn = psn + 1;
1697 if (++n == qp->s_size)
1699 if (n == qp->s_tail)
1705 * This should be called with the QP s_lock held and interrupts disabled.
1707 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1709 struct ib_other_headers *ohdr;
1710 struct hfi1_qp_priv *priv = qp->priv;
1711 struct rvt_swqe *wqe;
1712 struct ib_header *hdr = NULL;
1713 struct hfi1_16b_header *hdr_16b = NULL;
1714 u32 opcode, head, tail;
1716 struct tid_rdma_request *req;
1718 lockdep_assert_held(&qp->s_lock);
1719 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1722 /* Find out where the BTH is */
1723 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
1725 if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
1728 ohdr = &hdr->u.l.oth;
1732 hdr_16b = &opah->opah;
1733 l4 = hfi1_16B_get_l4(hdr_16b);
1734 if (l4 == OPA_16B_L4_IB_LOCAL)
1735 ohdr = &hdr_16b->u.oth;
1737 ohdr = &hdr_16b->u.l.oth;
1740 opcode = ib_bth_get_opcode(ohdr);
1741 if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1742 opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
1743 opcode == TID_OP(READ_RESP) ||
1744 opcode == TID_OP(WRITE_RESP)) {
1745 WARN_ON(!qp->s_rdma_ack_cnt);
1746 qp->s_rdma_ack_cnt--;
1750 psn = ib_bth_get_psn(ohdr);
1752 * Don't attempt to reset the sending PSN for packets in the
1753 * KDETH PSN space since the PSN does not match anything.
1755 if (opcode != TID_OP(WRITE_DATA) &&
1756 opcode != TID_OP(WRITE_DATA_LAST) &&
1757 opcode != TID_OP(ACK) && opcode != TID_OP(RESYNC))
1758 reset_sending_psn(qp, psn);
1760 /* Handle TID RDMA WRITE packets differently */
1761 if (opcode >= TID_OP(WRITE_REQ) &&
1762 opcode <= TID_OP(WRITE_DATA_LAST)) {
1763 head = priv->s_tid_head;
1764 tail = priv->s_tid_cur;
1766 * s_tid_cur is set to s_tid_head in the case, where
1767 * a new TID RDMA request is being started and all
1768 * previous ones have been completed.
1769 * Therefore, we need to do a secondary check in order
1770 * to properly determine whether we should start the
1773 wqe = rvt_get_swqe_ptr(qp, tail);
1774 req = wqe_to_tid_req(wqe);
1775 if (head == tail && req->comp_seg < req->total_segs) {
1777 tail = qp->s_size - 1;
1787 * Start timer after a packet requesting an ACK has been sent and
1788 * there are still requests that haven't been acked.
1790 if ((psn & IB_BTH_REQ_ACK) && tail != head &&
1791 opcode != TID_OP(WRITE_DATA) && opcode != TID_OP(WRITE_DATA_LAST) &&
1792 opcode != TID_OP(RESYNC) &&
1794 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1795 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1796 if (opcode == TID_OP(READ_REQ))
1797 rvt_add_retry_timer_ext(qp, priv->timeout_shift);
1799 rvt_add_retry_timer(qp);
1802 /* Start TID RDMA ACK timer */
1803 if ((opcode == TID_OP(WRITE_DATA) ||
1804 opcode == TID_OP(WRITE_DATA_LAST) ||
1805 opcode == TID_OP(RESYNC)) &&
1806 (psn & IB_BTH_REQ_ACK) &&
1807 !(priv->s_flags & HFI1_S_TID_RETRY_TIMER) &&
1808 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1810 * The TID RDMA ACK packet could be received before this
1811 * function is called. Therefore, add the timer only if TID
1812 * RDMA ACK packets are actually pending.
1814 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1815 req = wqe_to_tid_req(wqe);
1816 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
1817 req->ack_seg < req->cur_seg)
1818 hfi1_add_tid_retry_timer(qp);
1821 while (qp->s_last != qp->s_acked) {
1824 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1825 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1826 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1828 trdma_clean_swqe(qp, wqe);
1829 rvt_qp_wqe_unreserve(qp, wqe);
1830 s_last = qp->s_last;
1831 trace_hfi1_qp_send_completion(qp, wqe, s_last);
1832 if (++s_last >= qp->s_size)
1834 qp->s_last = s_last;
1835 /* see post_send() */
1838 rvt_qp_swqe_complete(qp,
1840 ib_hfi1_wc_opcode[wqe->wr.opcode],
1844 * If we were waiting for sends to complete before re-sending,
1845 * and they are now complete, restart sending.
1847 trace_hfi1_sendcomplete(qp, psn);
1848 if (qp->s_flags & RVT_S_WAIT_PSN &&
1849 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1850 qp->s_flags &= ~RVT_S_WAIT_PSN;
1851 qp->s_sending_psn = qp->s_psn;
1852 qp->s_sending_hpsn = qp->s_psn - 1;
1853 hfi1_schedule_send(qp);
1857 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1859 qp->s_last_psn = psn;
1863 * Generate a SWQE completion.
1864 * This is similar to hfi1_send_complete but has to check to be sure
1865 * that the SGEs are not being referenced if the SWQE is being resent.
1867 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1868 struct rvt_swqe *wqe,
1869 struct hfi1_ibport *ibp)
1871 struct hfi1_qp_priv *priv = qp->priv;
1873 lockdep_assert_held(&qp->s_lock);
1875 * Don't decrement refcount and don't generate a
1876 * completion if the SWQE is being resent until the send
1879 trace_hfi1_rc_completion(qp, wqe->lpsn);
1880 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1881 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1884 trdma_clean_swqe(qp, wqe);
1886 rvt_qp_wqe_unreserve(qp, wqe);
1887 s_last = qp->s_last;
1888 trace_hfi1_qp_send_completion(qp, wqe, s_last);
1889 if (++s_last >= qp->s_size)
1891 qp->s_last = s_last;
1892 /* see post_send() */
1894 rvt_qp_swqe_complete(qp,
1896 ib_hfi1_wc_opcode[wqe->wr.opcode],
1899 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1901 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1903 * If send progress not running attempt to progress
1906 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1907 struct sdma_engine *engine;
1908 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1911 /* For now use sc to find engine */
1912 sc5 = ibp->sl_to_sc[sl];
1913 engine = qp_to_sdma_engine(qp, sc5);
1914 sdma_engine_progress_schedule(engine);
1918 qp->s_retry = qp->s_retry_cnt;
1920 * Don't update the last PSN if the request being completed is
1921 * a TID RDMA WRITE request.
1922 * Completion of the TID RDMA WRITE requests are done by the
1923 * TID RDMA ACKs and as such could be for a request that has
1924 * already been ACKed as far as the IB state machine is
1927 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
1928 update_last_psn(qp, wqe->lpsn);
1931 * If we are completing a request which is in the process of
1932 * being resent, we can stop re-sending it since we know the
1933 * responder has already seen it.
1935 if (qp->s_acked == qp->s_cur) {
1936 if (++qp->s_cur >= qp->s_size)
1938 qp->s_acked = qp->s_cur;
1939 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1940 if (qp->s_acked != qp->s_tail) {
1941 qp->s_state = OP(SEND_LAST);
1942 qp->s_psn = wqe->psn;
1945 if (++qp->s_acked >= qp->s_size)
1947 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1949 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1951 if (priv->s_flags & HFI1_S_TID_WAIT_INTERLCK) {
1952 priv->s_flags &= ~HFI1_S_TID_WAIT_INTERLCK;
1953 hfi1_schedule_send(qp);
1958 static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
1960 /* Retry this request. */
1961 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1962 qp->r_flags |= RVT_R_RDMAR_SEQ;
1963 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1964 if (list_empty(&qp->rspwait)) {
1965 qp->r_flags |= RVT_R_RSP_SEND;
1967 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1973 * update_qp_retry_state - Update qp retry state.
1975 * @psn: the packet sequence number of the TID RDMA WRITE RESP.
1976 * @spsn: The start psn for the given TID RDMA WRITE swqe.
1977 * @lpsn: The last psn for the given TID RDMA WRITE swqe.
1979 * This function is called to update the qp retry state upon
1980 * receiving a TID WRITE RESP after the qp is scheduled to retry
1983 static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
1986 struct hfi1_qp_priv *qpriv = qp->priv;
1988 qp->s_psn = psn + 1;
1990 * If this is the first TID RDMA WRITE RESP packet for the current
1991 * request, change the s_state so that the retry will be processed
1992 * correctly. Similarly, if this is the last TID RDMA WRITE RESP
1993 * packet, change the s_state and advance the s_cur.
1995 if (cmp_psn(psn, lpsn) >= 0) {
1996 qp->s_cur = qpriv->s_tid_cur + 1;
1997 if (qp->s_cur >= qp->s_size)
1999 qp->s_state = TID_OP(WRITE_REQ);
2000 } else if (!cmp_psn(psn, spsn)) {
2001 qp->s_cur = qpriv->s_tid_cur;
2002 qp->s_state = TID_OP(WRITE_RESP);
2007 * do_rc_ack - process an incoming RC ACK
2008 * @qp: the QP the ACK came in on
2009 * @psn: the packet sequence number of the ACK
2010 * @opcode: the opcode of the request that resulted in the ACK
2012 * This is called from rc_rcv_resp() to process an incoming RC ACK
2014 * May be called at interrupt level, with the QP s_lock held.
2015 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
2017 int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
2018 u64 val, struct hfi1_ctxtdata *rcd)
2020 struct hfi1_ibport *ibp;
2021 enum ib_wc_status status;
2022 struct hfi1_qp_priv *qpriv = qp->priv;
2023 struct rvt_swqe *wqe;
2027 struct rvt_dev_info *rdi;
2029 lockdep_assert_held(&qp->s_lock);
2031 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2032 * requests and implicitly NAK RDMA read and atomic requests issued
2033 * before the NAK'ed request. The MSN won't include the NAK'ed
2034 * request but will include an ACK'ed request(s).
2037 if (aeth >> IB_AETH_NAK_SHIFT)
2039 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2040 ibp = rcd_to_iport(rcd);
2043 * The MSN might be for a later WQE than the PSN indicates so
2044 * only complete WQEs that the PSN finishes.
2046 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
2048 * RDMA_READ_RESPONSE_ONLY is a special case since
2049 * we want to generate completion events for everything
2050 * before the RDMA read, copy the data, then generate
2051 * the completion for the read.
2053 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
2054 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
2060 * If this request is a RDMA read or atomic, and the ACK is
2061 * for a later operation, this ACK NAKs the RDMA read or
2062 * atomic. In other words, only a RDMA_READ_LAST or ONLY
2063 * can ACK a RDMA read and likewise for atomic ops. Note
2064 * that the NAK case can only happen if relaxed ordering is
2065 * used and requests are sent after an RDMA read or atomic
2066 * is sent but before the response is received.
2068 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
2069 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
2070 (wqe->wr.opcode == IB_WR_TID_RDMA_READ &&
2071 (opcode != TID_OP(READ_RESP) || diff != 0)) ||
2072 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2073 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
2074 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0)) ||
2075 (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2076 (delta_psn(psn, qp->s_last_psn) != 1))) {
2077 set_restart_qp(qp, rcd);
2079 * No need to process the ACK/NAK since we are
2080 * restarting an earlier request.
2084 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2085 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2086 u64 *vaddr = wqe->sg_list[0].vaddr;
2089 if (wqe->wr.opcode == IB_WR_OPFN)
2090 opfn_conn_reply(qp, val);
2092 if (qp->s_num_rd_atomic &&
2093 (wqe->wr.opcode == IB_WR_RDMA_READ ||
2094 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2095 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
2096 qp->s_num_rd_atomic--;
2097 /* Restart sending task if fence is complete */
2098 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2099 !qp->s_num_rd_atomic) {
2100 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2102 hfi1_schedule_send(qp);
2103 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2104 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
2106 hfi1_schedule_send(qp);
2111 * TID RDMA WRITE requests will be completed by the TID RDMA
2112 * ACK packet handler (see tid_rdma.c).
2114 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE)
2117 wqe = do_rc_completion(qp, wqe, ibp);
2118 if (qp->s_acked == qp->s_tail)
2122 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
2123 trace_hfi1_sender_do_rc_ack(qp);
2124 switch (aeth >> IB_AETH_NAK_SHIFT) {
2126 this_cpu_inc(*ibp->rvp.rc_acks);
2127 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ) {
2128 if (wqe_to_tid_req(wqe)->ack_pending)
2129 rvt_mod_retry_timer_ext(qp,
2130 qpriv->timeout_shift);
2132 rvt_stop_rc_timers(qp);
2133 } else if (qp->s_acked != qp->s_tail) {
2134 struct rvt_swqe *__w = NULL;
2136 if (qpriv->s_tid_cur != HFI1_QP_WQE_INVALID)
2137 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
2140 * Stop timers if we've received all of the TID RDMA
2141 * WRITE * responses.
2143 if (__w && __w->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2144 opcode == TID_OP(WRITE_RESP)) {
2146 * Normally, the loop above would correctly
2147 * process all WQEs from s_acked onward and
2148 * either complete them or check for correct
2150 * However, for TID RDMA, due to pipelining,
2151 * the response may not be for the request at
2152 * s_acked so the above look would just be
2153 * skipped. This does not allow for checking
2154 * the PSN sequencing. It has to be done
2157 if (cmp_psn(psn, qp->s_last_psn + 1)) {
2158 set_restart_qp(qp, rcd);
2162 * If the psn is being resent, stop the
2165 if (qp->s_cur != qp->s_tail &&
2166 cmp_psn(qp->s_psn, psn) <= 0)
2167 update_qp_retry_state(qp, psn,
2170 else if (--qpriv->pending_tid_w_resp)
2171 rvt_mod_retry_timer(qp);
2173 rvt_stop_rc_timers(qp);
2176 * We are expecting more ACKs so
2177 * mod the retry timer.
2179 rvt_mod_retry_timer(qp);
2181 * We can stop re-sending the earlier packets
2182 * and continue with the next packet the
2185 if (cmp_psn(qp->s_psn, psn) <= 0)
2186 reset_psn(qp, psn + 1);
2189 /* No more acks - kill all timers */
2190 rvt_stop_rc_timers(qp);
2191 if (cmp_psn(qp->s_psn, psn) <= 0) {
2192 qp->s_state = OP(SEND_LAST);
2193 qp->s_psn = psn + 1;
2196 if (qp->s_flags & RVT_S_WAIT_ACK) {
2197 qp->s_flags &= ~RVT_S_WAIT_ACK;
2198 hfi1_schedule_send(qp);
2200 rvt_get_credit(qp, aeth);
2201 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2202 qp->s_retry = qp->s_retry_cnt;
2204 * If the current request is a TID RDMA WRITE request and the
2205 * response is not a TID RDMA WRITE RESP packet, s_last_psn
2206 * can't be advanced.
2208 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE &&
2209 opcode != TID_OP(WRITE_RESP) &&
2210 cmp_psn(psn, wqe->psn) >= 0)
2212 update_last_psn(qp, psn);
2215 case 1: /* RNR NAK */
2216 ibp->rvp.n_rnr_naks++;
2217 if (qp->s_acked == qp->s_tail)
2219 if (qp->s_flags & RVT_S_WAIT_RNR)
2221 rdi = ib_to_rvt(qp->ibqp.device);
2222 if (qp->s_rnr_retry == 0 &&
2223 !((rdi->post_parms[wqe->wr.opcode].flags &
2224 RVT_OPERATION_IGN_RNR_CNT) &&
2225 qp->s_rnr_retry_cnt == 0)) {
2226 status = IB_WC_RNR_RETRY_EXC_ERR;
2229 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
2233 * The last valid PSN is the previous PSN. For TID RDMA WRITE
2234 * request, s_last_psn should be incremented only when a TID
2235 * RDMA WRITE RESP is received to avoid skipping lost TID RDMA
2236 * WRITE RESP packets.
2238 if (wqe->wr.opcode == IB_WR_TID_RDMA_WRITE) {
2239 reset_psn(qp, qp->s_last_psn + 1);
2241 update_last_psn(qp, psn - 1);
2245 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
2246 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
2247 rvt_stop_rc_timers(qp);
2248 rvt_add_rnr_timer(qp, aeth);
2252 if (qp->s_acked == qp->s_tail)
2254 /* The last valid PSN is the previous PSN. */
2255 update_last_psn(qp, psn - 1);
2256 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
2257 IB_AETH_CREDIT_MASK) {
2258 case 0: /* PSN sequence error */
2259 ibp->rvp.n_seq_naks++;
2261 * Back up to the responder's expected PSN.
2262 * Note that we might get a NAK in the middle of an
2263 * RDMA READ response which terminates the RDMA
2266 hfi1_restart_rc(qp, psn, 0);
2267 hfi1_schedule_send(qp);
2270 case 1: /* Invalid Request */
2271 status = IB_WC_REM_INV_REQ_ERR;
2272 ibp->rvp.n_other_naks++;
2275 case 2: /* Remote Access Error */
2276 status = IB_WC_REM_ACCESS_ERR;
2277 ibp->rvp.n_other_naks++;
2280 case 3: /* Remote Operation Error */
2281 status = IB_WC_REM_OP_ERR;
2282 ibp->rvp.n_other_naks++;
2284 if (qp->s_last == qp->s_acked) {
2285 if (wqe->wr.opcode == IB_WR_TID_RDMA_READ)
2286 hfi1_kern_read_tid_flow_free(qp);
2288 hfi1_trdma_send_complete(qp, wqe, status);
2289 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2294 /* Ignore other reserved NAK error codes */
2297 qp->s_retry = qp->s_retry_cnt;
2298 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2301 default: /* 2: reserved */
2303 /* Ignore reserved NAK codes. */
2306 /* cannot be reached */
2308 rvt_stop_rc_timers(qp);
2313 * We have seen an out of sequence RDMA read middle or last packet.
2314 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
2316 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
2317 struct hfi1_ctxtdata *rcd)
2319 struct rvt_swqe *wqe;
2321 lockdep_assert_held(&qp->s_lock);
2322 /* Remove QP from retry timer */
2323 rvt_stop_rc_timers(qp);
2325 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2327 while (cmp_psn(psn, wqe->lpsn) > 0) {
2328 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
2329 wqe->wr.opcode == IB_WR_TID_RDMA_READ ||
2330 wqe->wr.opcode == IB_WR_TID_RDMA_WRITE ||
2331 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2332 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
2334 wqe = do_rc_completion(qp, wqe, ibp);
2337 ibp->rvp.n_rdma_seq++;
2338 qp->r_flags |= RVT_R_RDMAR_SEQ;
2339 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
2340 if (list_empty(&qp->rspwait)) {
2341 qp->r_flags |= RVT_R_RSP_SEND;
2343 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2348 * rc_rcv_resp - process an incoming RC response packet
2349 * @packet: data packet information
2351 * This is called from hfi1_rc_rcv() to process an incoming RC response
2352 * packet for the given QP.
2353 * Called at interrupt level.
2355 static void rc_rcv_resp(struct hfi1_packet *packet)
2357 struct hfi1_ctxtdata *rcd = packet->rcd;
2358 void *data = packet->payload;
2359 u32 tlen = packet->tlen;
2360 struct rvt_qp *qp = packet->qp;
2361 struct hfi1_ibport *ibp;
2362 struct ib_other_headers *ohdr = packet->ohdr;
2363 struct rvt_swqe *wqe;
2364 enum ib_wc_status status;
2365 unsigned long flags;
2369 u32 psn = ib_bth_get_psn(packet->ohdr);
2370 u32 pmtu = qp->pmtu;
2371 u16 hdrsize = packet->hlen;
2372 u8 opcode = packet->opcode;
2373 u8 pad = packet->pad;
2374 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2376 spin_lock_irqsave(&qp->s_lock, flags);
2377 trace_hfi1_ack(qp, psn);
2379 /* Ignore invalid responses. */
2380 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
2383 /* Ignore duplicate responses. */
2384 diff = cmp_psn(psn, qp->s_last_psn);
2385 if (unlikely(diff <= 0)) {
2386 /* Update credits for "ghost" ACKs */
2387 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
2388 aeth = be32_to_cpu(ohdr->u.aeth);
2389 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
2390 rvt_get_credit(qp, aeth);
2396 * Skip everything other than the PSN we expect, if we are waiting
2397 * for a reply to a restarted RDMA read or atomic op.
2399 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
2400 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
2402 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
2405 if (unlikely(qp->s_acked == qp->s_tail))
2407 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2408 status = IB_WC_SUCCESS;
2411 case OP(ACKNOWLEDGE):
2412 case OP(ATOMIC_ACKNOWLEDGE):
2413 case OP(RDMA_READ_RESPONSE_FIRST):
2414 aeth = be32_to_cpu(ohdr->u.aeth);
2415 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
2416 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
2419 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
2420 opcode != OP(RDMA_READ_RESPONSE_FIRST))
2422 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2423 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2426 * If this is a response to a resent RDMA read, we
2427 * have to be careful to copy the data to the right
2430 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2434 case OP(RDMA_READ_RESPONSE_MIDDLE):
2435 /* no AETH, no ACK */
2436 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2438 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2441 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2443 if (unlikely(pmtu >= qp->s_rdma_read_len))
2447 * We got a response so update the timeout.
2448 * 4.096 usec. * (1 << qp->timeout)
2450 rvt_mod_retry_timer(qp);
2451 if (qp->s_flags & RVT_S_WAIT_ACK) {
2452 qp->s_flags &= ~RVT_S_WAIT_ACK;
2453 hfi1_schedule_send(qp);
2456 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
2457 qp->s_retry = qp->s_retry_cnt;
2460 * Update the RDMA receive state but do the copy w/o
2461 * holding the locks and blocking interrupts.
2463 qp->s_rdma_read_len -= pmtu;
2464 update_last_psn(qp, psn);
2465 spin_unlock_irqrestore(&qp->s_lock, flags);
2466 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2467 data, pmtu, false, false);
2470 case OP(RDMA_READ_RESPONSE_ONLY):
2471 aeth = be32_to_cpu(ohdr->u.aeth);
2472 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
2475 * Check that the data size is >= 0 && <= pmtu.
2476 * Remember to account for ICRC (4).
2478 if (unlikely(tlen < (hdrsize + extra_bytes)))
2481 * If this is a response to a resent RDMA read, we
2482 * have to be careful to copy the data to the right
2485 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2486 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2490 case OP(RDMA_READ_RESPONSE_LAST):
2491 /* ACKs READ req. */
2492 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2494 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
2497 * Check that the data size is >= 1 && <= pmtu.
2498 * Remember to account for ICRC (4).
2500 if (unlikely(tlen <= (hdrsize + extra_bytes)))
2503 tlen -= hdrsize + extra_bytes;
2504 if (unlikely(tlen != qp->s_rdma_read_len))
2506 aeth = be32_to_cpu(ohdr->u.aeth);
2507 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2508 data, tlen, false, false);
2509 WARN_ON(qp->s_rdma_read_sge.num_sge);
2510 (void)do_rc_ack(qp, aeth, psn,
2511 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
2516 status = IB_WC_LOC_QP_OP_ERR;
2520 ibp = rcd_to_iport(rcd);
2521 rdma_seq_err(qp, ibp, psn, rcd);
2525 status = IB_WC_LOC_LEN_ERR;
2527 if (qp->s_last == qp->s_acked) {
2528 rvt_send_complete(qp, wqe, status);
2529 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2532 spin_unlock_irqrestore(&qp->s_lock, flags);
2537 static inline void rc_cancel_ack(struct rvt_qp *qp)
2540 if (list_empty(&qp->rspwait))
2542 list_del_init(&qp->rspwait);
2543 qp->r_flags &= ~RVT_R_RSP_NAK;
2548 * rc_rcv_error - process an incoming duplicate or error RC packet
2549 * @ohdr: the other headers for this packet
2550 * @data: the packet data
2551 * @qp: the QP for this packet
2552 * @opcode: the opcode for this packet
2553 * @psn: the packet sequence number for this packet
2554 * @diff: the difference between the PSN and the expected PSN
2556 * This is called from hfi1_rc_rcv() to process an unexpected
2557 * incoming RC packet for the given QP.
2558 * Called at interrupt level.
2559 * Return 1 if no more processing is needed; otherwise return 0 to
2560 * schedule a response to be sent.
2562 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
2563 struct rvt_qp *qp, u32 opcode, u32 psn,
2564 int diff, struct hfi1_ctxtdata *rcd)
2566 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2567 struct rvt_ack_entry *e;
2568 unsigned long flags;
2570 u8 mra; /* most recent ACK */
2573 trace_hfi1_rcv_error(qp, psn);
2576 * Packet sequence error.
2577 * A NAK will ACK earlier sends and RDMA writes.
2578 * Don't queue the NAK if we already sent one.
2580 if (!qp->r_nak_state) {
2581 ibp->rvp.n_rc_seqnak++;
2582 qp->r_nak_state = IB_NAK_PSN_ERROR;
2583 /* Use the expected PSN. */
2584 qp->r_ack_psn = qp->r_psn;
2586 * Wait to send the sequence NAK until all packets
2587 * in the receive queue have been processed.
2588 * Otherwise, we end up propagating congestion.
2590 rc_defered_ack(rcd, qp);
2596 * Handle a duplicate request. Don't re-execute SEND, RDMA
2597 * write or atomic op. Don't NAK errors, just silently drop
2598 * the duplicate request. Note that r_sge, r_len, and
2599 * r_rcv_len may be in use so don't modify them.
2601 * We are supposed to ACK the earliest duplicate PSN but we
2602 * can coalesce an outstanding duplicate ACK. We have to
2603 * send the earliest so that RDMA reads can be restarted at
2604 * the requester's expected PSN.
2606 * First, find where this duplicate PSN falls within the
2607 * ACKs previously sent.
2608 * old_req is true if there is an older response that is scheduled
2609 * to be sent before sending this one.
2613 ibp->rvp.n_rc_dupreq++;
2615 spin_lock_irqsave(&qp->s_lock, flags);
2617 e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
2620 case OP(RDMA_READ_REQUEST): {
2621 struct ib_reth *reth;
2626 * If we didn't find the RDMA read request in the ack queue,
2627 * we can ignore this request.
2629 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
2631 /* RETH comes after BTH */
2632 reth = &ohdr->u.rc.reth;
2634 * Address range must be a subset of the original
2635 * request and start on pmtu boundaries.
2636 * We reuse the old ack_queue slot since the requester
2637 * should not back up and request an earlier PSN for the
2640 offset = delta_psn(psn, e->psn) * qp->pmtu;
2641 len = be32_to_cpu(reth->length);
2642 if (unlikely(offset + len != e->rdma_sge.sge_length))
2644 release_rdma_sge_mr(e);
2646 u32 rkey = be32_to_cpu(reth->rkey);
2647 u64 vaddr = get_ib_reth_vaddr(reth);
2650 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2651 IB_ACCESS_REMOTE_READ);
2655 e->rdma_sge.vaddr = NULL;
2656 e->rdma_sge.length = 0;
2657 e->rdma_sge.sge_length = 0;
2662 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2663 qp->s_acked_ack_queue = prev;
2664 qp->s_tail_ack_queue = prev;
2668 case OP(COMPARE_SWAP):
2669 case OP(FETCH_ADD): {
2671 * If we didn't find the atomic request in the ack queue
2672 * or the send engine is already backed up to send an
2673 * earlier entry, we can ignore this request.
2675 if (!e || e->opcode != (u8)opcode || old_req)
2677 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2678 qp->s_acked_ack_queue = prev;
2679 qp->s_tail_ack_queue = prev;
2685 * Ignore this operation if it doesn't request an ACK
2686 * or an earlier RDMA read or atomic is going to be resent.
2688 if (!(psn & IB_BTH_REQ_ACK) || old_req)
2691 * Resend the most recent ACK if this request is
2692 * after all the previous RDMA reads and atomics.
2694 if (mra == qp->r_head_ack_queue) {
2695 spin_unlock_irqrestore(&qp->s_lock, flags);
2696 qp->r_nak_state = 0;
2697 qp->r_ack_psn = qp->r_psn - 1;
2702 * Resend the RDMA read or atomic op which
2703 * ACKs this duplicate request.
2705 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2706 qp->s_acked_ack_queue = mra;
2707 qp->s_tail_ack_queue = mra;
2710 qp->s_ack_state = OP(ACKNOWLEDGE);
2711 qp->s_flags |= RVT_S_RESP_PENDING;
2712 qp->r_nak_state = 0;
2713 hfi1_schedule_send(qp);
2716 spin_unlock_irqrestore(&qp->s_lock, flags);
2724 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
2725 u32 lqpn, u32 rqpn, u8 svc_type)
2727 struct opa_hfi1_cong_log_event_internal *cc_event;
2728 unsigned long flags;
2730 if (sl >= OPA_MAX_SLS)
2733 spin_lock_irqsave(&ppd->cc_log_lock, flags);
2735 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
2736 ppd->threshold_event_counter++;
2738 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
2739 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
2740 ppd->cc_log_idx = 0;
2741 cc_event->lqpn = lqpn & RVT_QPN_MASK;
2742 cc_event->rqpn = rqpn & RVT_QPN_MASK;
2744 cc_event->svc_type = svc_type;
2745 cc_event->rlid = rlid;
2746 /* keep timestamp in units of 1.024 usec */
2747 cc_event->timestamp = ktime_get_ns() / 1024;
2749 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
2752 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
2753 u32 rqpn, u8 svc_type)
2755 struct cca_timer *cca_timer;
2756 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
2757 u8 trigger_threshold;
2758 struct cc_state *cc_state;
2759 unsigned long flags;
2761 if (sl >= OPA_MAX_SLS)
2764 cc_state = get_cc_state(ppd);
2770 * 1) increase CCTI (for this SL)
2771 * 2) select IPG (i.e., call set_link_ipg())
2774 ccti_limit = cc_state->cct.ccti_limit;
2775 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
2776 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
2778 cc_state->cong_setting.entries[sl].trigger_threshold;
2780 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2782 cca_timer = &ppd->cca_timer[sl];
2783 if (cca_timer->ccti < ccti_limit) {
2784 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2785 cca_timer->ccti += ccti_incr;
2787 cca_timer->ccti = ccti_limit;
2791 ccti = cca_timer->ccti;
2793 if (!hrtimer_active(&cca_timer->hrtimer)) {
2794 /* ccti_timer is in units of 1.024 usec */
2795 unsigned long nsec = 1024 * ccti_timer;
2797 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2798 HRTIMER_MODE_REL_PINNED);
2801 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2803 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2804 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2808 * hfi1_rc_rcv - process an incoming RC packet
2809 * @packet: data packet information
2811 * This is called from qp_rcv() to process an incoming RC packet
2813 * May be called at interrupt level.
2815 void hfi1_rc_rcv(struct hfi1_packet *packet)
2817 struct hfi1_ctxtdata *rcd = packet->rcd;
2818 void *data = packet->payload;
2819 u32 tlen = packet->tlen;
2820 struct rvt_qp *qp = packet->qp;
2821 struct hfi1_qp_priv *qpriv = qp->priv;
2822 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2823 struct ib_other_headers *ohdr = packet->ohdr;
2824 u32 opcode = packet->opcode;
2825 u32 hdrsize = packet->hlen;
2826 u32 psn = ib_bth_get_psn(packet->ohdr);
2827 u32 pad = packet->pad;
2829 u32 pmtu = qp->pmtu;
2831 struct ib_reth *reth;
2832 unsigned long flags;
2834 bool copy_last = false, fecn;
2836 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2838 lockdep_assert_held(&qp->r_lock);
2840 if (hfi1_ruc_check_hdr(ibp, packet))
2843 fecn = process_ecn(qp, packet);
2844 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
2847 * Process responses (ACKs) before anything else. Note that the
2848 * packet sequence number will be for something in the send work
2849 * queue rather than the expected receive packet sequence number.
2850 * In other words, this QP is the requester.
2852 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2853 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2854 rc_rcv_resp(packet);
2858 /* Compute 24 bits worth of difference. */
2859 diff = delta_psn(psn, qp->r_psn);
2860 if (unlikely(diff)) {
2861 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2866 /* Check for opcode sequence errors. */
2867 switch (qp->r_state) {
2868 case OP(SEND_FIRST):
2869 case OP(SEND_MIDDLE):
2870 if (opcode == OP(SEND_MIDDLE) ||
2871 opcode == OP(SEND_LAST) ||
2872 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2873 opcode == OP(SEND_LAST_WITH_INVALIDATE))
2877 case OP(RDMA_WRITE_FIRST):
2878 case OP(RDMA_WRITE_MIDDLE):
2879 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2880 opcode == OP(RDMA_WRITE_LAST) ||
2881 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2886 if (opcode == OP(SEND_MIDDLE) ||
2887 opcode == OP(SEND_LAST) ||
2888 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2889 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2890 opcode == OP(RDMA_WRITE_MIDDLE) ||
2891 opcode == OP(RDMA_WRITE_LAST) ||
2892 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2895 * Note that it is up to the requester to not send a new
2896 * RDMA read or atomic operation before receiving an ACK
2897 * for the previous operation.
2902 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2905 /* OK, process the packet. */
2907 case OP(SEND_FIRST):
2908 ret = rvt_get_rwqe(qp, false);
2915 case OP(SEND_MIDDLE):
2916 case OP(RDMA_WRITE_MIDDLE):
2918 /* Check for invalid length PMTU or posted rwqe len. */
2920 * There will be no padding for 9B packet but 16B packets
2921 * will come in with some padding since we always add
2922 * CRC and LT bytes which will need to be flit aligned
2924 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2926 qp->r_rcv_len += pmtu;
2927 if (unlikely(qp->r_rcv_len > qp->r_len))
2929 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
2932 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2934 ret = rvt_get_rwqe(qp, true);
2942 case OP(SEND_ONLY_WITH_IMMEDIATE):
2943 case OP(SEND_ONLY_WITH_INVALIDATE):
2944 ret = rvt_get_rwqe(qp, false);
2950 if (opcode == OP(SEND_ONLY))
2951 goto no_immediate_data;
2952 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2954 /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
2955 case OP(SEND_LAST_WITH_IMMEDIATE):
2957 wc.ex.imm_data = ohdr->u.imm_data;
2958 wc.wc_flags = IB_WC_WITH_IMM;
2960 case OP(SEND_LAST_WITH_INVALIDATE):
2962 rkey = be32_to_cpu(ohdr->u.ieth);
2963 if (rvt_invalidate_rkey(qp, rkey))
2964 goto no_immediate_data;
2965 wc.ex.invalidate_rkey = rkey;
2966 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2968 case OP(RDMA_WRITE_LAST):
2969 copy_last = rvt_is_user_qp(qp);
2976 /* Check for invalid length. */
2977 /* LAST len should be >= 1 */
2978 if (unlikely(tlen < (hdrsize + extra_bytes)))
2980 /* Don't count the CRC(and padding and LT byte for 16B). */
2981 tlen -= (hdrsize + extra_bytes);
2982 wc.byte_len = tlen + qp->r_rcv_len;
2983 if (unlikely(wc.byte_len > qp->r_len))
2985 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
2986 rvt_put_ss(&qp->r_sge);
2988 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2990 wc.wr_id = qp->r_wr_id;
2991 wc.status = IB_WC_SUCCESS;
2992 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2993 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2994 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2996 wc.opcode = IB_WC_RECV;
2998 wc.src_qp = qp->remote_qpn;
2999 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3001 * It seems that IB mandates the presence of an SL in a
3002 * work completion only for the UD transport (see section
3003 * 11.4.2 of IBTA Vol. 1).
3005 * However, the way the SL is chosen below is consistent
3006 * with the way that IB/qib works and is trying avoid
3007 * introducing incompatibilities.
3009 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
3011 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3012 /* zero fields that are N/A */
3015 wc.dlid_path_bits = 0;
3017 /* Signal completion event if the solicited bit is set. */
3018 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
3019 ib_bth_is_solicited(ohdr));
3022 case OP(RDMA_WRITE_ONLY):
3023 copy_last = rvt_is_user_qp(qp);
3025 case OP(RDMA_WRITE_FIRST):
3026 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
3027 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3030 reth = &ohdr->u.rc.reth;
3031 qp->r_len = be32_to_cpu(reth->length);
3033 qp->r_sge.sg_list = NULL;
3034 if (qp->r_len != 0) {
3035 u32 rkey = be32_to_cpu(reth->rkey);
3036 u64 vaddr = get_ib_reth_vaddr(reth);
3039 /* Check rkey & NAK */
3040 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
3041 rkey, IB_ACCESS_REMOTE_WRITE);
3044 qp->r_sge.num_sge = 1;
3046 qp->r_sge.num_sge = 0;
3047 qp->r_sge.sge.mr = NULL;
3048 qp->r_sge.sge.vaddr = NULL;
3049 qp->r_sge.sge.length = 0;
3050 qp->r_sge.sge.sge_length = 0;
3052 if (opcode == OP(RDMA_WRITE_FIRST))
3054 else if (opcode == OP(RDMA_WRITE_ONLY))
3055 goto no_immediate_data;
3056 ret = rvt_get_rwqe(qp, true);
3060 /* peer will send again */
3061 rvt_put_ss(&qp->r_sge);
3064 wc.ex.imm_data = ohdr->u.rc.imm_data;
3065 wc.wc_flags = IB_WC_WITH_IMM;
3068 case OP(RDMA_READ_REQUEST): {
3069 struct rvt_ack_entry *e;
3073 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3075 next = qp->r_head_ack_queue + 1;
3076 /* s_ack_queue is size rvt_size_atomic()+1 so use > not >= */
3077 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3079 spin_lock_irqsave(&qp->s_lock, flags);
3080 if (unlikely(next == qp->s_acked_ack_queue)) {
3081 if (!qp->s_ack_queue[next].sent)
3082 goto nack_inv_unlck;
3083 update_ack_queue(qp, next);
3085 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3086 release_rdma_sge_mr(e);
3087 reth = &ohdr->u.rc.reth;
3088 len = be32_to_cpu(reth->length);
3090 u32 rkey = be32_to_cpu(reth->rkey);
3091 u64 vaddr = get_ib_reth_vaddr(reth);
3094 /* Check rkey & NAK */
3095 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
3096 rkey, IB_ACCESS_REMOTE_READ);
3098 goto nack_acc_unlck;
3100 * Update the next expected PSN. We add 1 later
3101 * below, so only add the remainder here.
3103 qp->r_psn += rvt_div_mtu(qp, len - 1);
3105 e->rdma_sge.mr = NULL;
3106 e->rdma_sge.vaddr = NULL;
3107 e->rdma_sge.length = 0;
3108 e->rdma_sge.sge_length = 0;
3113 e->lpsn = qp->r_psn;
3115 * We need to increment the MSN here instead of when we
3116 * finish sending the result since a duplicate request would
3117 * increment it more than once.
3121 qp->r_state = opcode;
3122 qp->r_nak_state = 0;
3123 qp->r_head_ack_queue = next;
3124 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3126 /* Schedule the send engine. */
3127 qp->s_flags |= RVT_S_RESP_PENDING;
3129 qp->s_flags |= RVT_S_ECN;
3130 hfi1_schedule_send(qp);
3132 spin_unlock_irqrestore(&qp->s_lock, flags);
3136 case OP(COMPARE_SWAP):
3137 case OP(FETCH_ADD): {
3138 struct ib_atomic_eth *ateth = &ohdr->u.atomic_eth;
3139 u64 vaddr = get_ib_ateth_vaddr(ateth);
3140 bool opfn = opcode == OP(COMPARE_SWAP) &&
3141 vaddr == HFI1_VERBS_E_ATOMIC_VADDR;
3142 struct rvt_ack_entry *e;
3148 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
3151 next = qp->r_head_ack_queue + 1;
3152 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3154 spin_lock_irqsave(&qp->s_lock, flags);
3155 if (unlikely(next == qp->s_acked_ack_queue)) {
3156 if (!qp->s_ack_queue[next].sent)
3157 goto nack_inv_unlck;
3158 update_ack_queue(qp, next);
3160 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3161 release_rdma_sge_mr(e);
3162 /* Process OPFN special virtual address */
3164 opfn_conn_response(qp, e, ateth);
3167 if (unlikely(vaddr & (sizeof(u64) - 1)))
3168 goto nack_inv_unlck;
3169 rkey = be32_to_cpu(ateth->rkey);
3170 /* Check rkey & NAK */
3171 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3173 IB_ACCESS_REMOTE_ATOMIC)))
3174 goto nack_acc_unlck;
3175 /* Perform atomic OP and save result. */
3176 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3177 sdata = get_ib_ateth_swap(ateth);
3178 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
3179 (u64)atomic64_add_return(sdata, maddr) - sdata :
3180 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3181 get_ib_ateth_compare(ateth),
3183 rvt_put_mr(qp->r_sge.sge.mr);
3184 qp->r_sge.num_sge = 0;
3192 qp->r_state = opcode;
3193 qp->r_nak_state = 0;
3194 qp->r_head_ack_queue = next;
3195 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3197 /* Schedule the send engine. */
3198 qp->s_flags |= RVT_S_RESP_PENDING;
3200 qp->s_flags |= RVT_S_ECN;
3201 hfi1_schedule_send(qp);
3203 spin_unlock_irqrestore(&qp->s_lock, flags);
3208 /* NAK unknown opcodes. */
3212 qp->r_state = opcode;
3213 qp->r_ack_psn = psn;
3214 qp->r_nak_state = 0;
3215 /* Send an ACK if requested or required. */
3216 if (psn & IB_BTH_REQ_ACK || fecn) {
3217 if (packet->numpkt == 0 || fecn ||
3218 qp->r_adefered >= HFI1_PSN_CREDIT) {
3223 rc_defered_ack(rcd, qp);
3228 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
3229 qp->r_ack_psn = qp->r_psn;
3230 /* Queue RNR NAK for later */
3231 rc_defered_ack(rcd, qp);
3235 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3236 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
3237 qp->r_ack_psn = qp->r_psn;
3238 /* Queue NAK for later */
3239 rc_defered_ack(rcd, qp);
3243 spin_unlock_irqrestore(&qp->s_lock, flags);
3245 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3246 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3247 qp->r_ack_psn = qp->r_psn;
3248 /* Queue NAK for later */
3249 rc_defered_ack(rcd, qp);
3253 spin_unlock_irqrestore(&qp->s_lock, flags);
3255 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3256 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3257 qp->r_ack_psn = qp->r_psn;
3259 hfi1_send_rc_ack(packet, fecn);
3262 void hfi1_rc_hdrerr(
3263 struct hfi1_ctxtdata *rcd,
3264 struct hfi1_packet *packet,
3267 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
3272 if (hfi1_ruc_check_hdr(ibp, packet))
3275 psn = ib_bth_get_psn(packet->ohdr);
3276 opcode = ib_bth_get_opcode(packet->ohdr);
3278 /* Only deal with RDMA Writes for now */
3279 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
3280 diff = delta_psn(psn, qp->r_psn);
3281 if (!qp->r_nak_state && diff >= 0) {
3282 ibp->rvp.n_rc_seqnak++;
3283 qp->r_nak_state = IB_NAK_PSN_ERROR;
3284 /* Use the expected PSN. */
3285 qp->r_ack_psn = qp->r_psn;
3287 * Wait to send the sequence
3288 * NAK until all packets
3289 * in the receive queue have
3291 * Otherwise, we end up
3292 * propagating congestion.
3294 rc_defered_ack(rcd, qp);
3295 } /* Out of sequence NAK */
3296 } /* QP Request NAKs */