2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_UC_##x
41 * qib_make_uc_req - construct a request packet (SEND, RDMA write)
42 * @qp: a pointer to the QP
44 * Assumes the s_lock is held.
46 * Return 1 if constructed; otherwise, return 0.
48 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
50 struct qib_qp_priv *priv = qp->priv;
51 struct ib_other_headers *ohdr;
59 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
62 /* We are in the error state, flush the work request. */
63 if (qp->s_last == READ_ONCE(qp->s_head))
65 /* If DMAs are in progress, we can't flush immediately. */
66 if (atomic_read(&priv->s_dma_busy)) {
67 qp->s_flags |= RVT_S_WAIT_DMA;
70 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
71 rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
75 ohdr = &priv->s_hdr->u.oth;
76 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
77 ohdr = &priv->s_hdr->u.l.oth;
79 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
83 /* Get the next send request. */
84 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
86 switch (qp->s_state) {
88 if (!(ib_rvt_state_ops[qp->state] &
89 RVT_PROCESS_NEXT_SEND_OK))
91 /* Check if send work queue is empty. */
92 if (qp->s_cur == READ_ONCE(qp->s_head))
95 * Start a new request.
98 qp->s_sge.sge = wqe->sg_list[0];
99 qp->s_sge.sg_list = wqe->sg_list + 1;
100 qp->s_sge.num_sge = wqe->wr.num_sge;
101 qp->s_sge.total_len = wqe->length;
104 switch (wqe->wr.opcode) {
106 case IB_WR_SEND_WITH_IMM:
108 qp->s_state = OP(SEND_FIRST);
112 if (wqe->wr.opcode == IB_WR_SEND)
113 qp->s_state = OP(SEND_ONLY);
116 OP(SEND_ONLY_WITH_IMMEDIATE);
117 /* Immediate data comes after the BTH */
118 ohdr->u.imm_data = wqe->wr.ex.imm_data;
121 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
122 bth0 |= IB_BTH_SOLICITED;
124 if (++qp->s_cur >= qp->s_size)
128 case IB_WR_RDMA_WRITE:
129 case IB_WR_RDMA_WRITE_WITH_IMM:
130 ohdr->u.rc.reth.vaddr =
131 cpu_to_be64(wqe->rdma_wr.remote_addr);
132 ohdr->u.rc.reth.rkey =
133 cpu_to_be32(wqe->rdma_wr.rkey);
134 ohdr->u.rc.reth.length = cpu_to_be32(len);
135 hwords += sizeof(struct ib_reth) / 4;
137 qp->s_state = OP(RDMA_WRITE_FIRST);
141 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
142 qp->s_state = OP(RDMA_WRITE_ONLY);
145 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
146 /* Immediate data comes after the RETH */
147 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
149 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
150 bth0 |= IB_BTH_SOLICITED;
153 if (++qp->s_cur >= qp->s_size)
163 qp->s_state = OP(SEND_MIDDLE);
165 case OP(SEND_MIDDLE):
171 if (wqe->wr.opcode == IB_WR_SEND)
172 qp->s_state = OP(SEND_LAST);
174 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
175 /* Immediate data comes after the BTH */
176 ohdr->u.imm_data = wqe->wr.ex.imm_data;
179 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
180 bth0 |= IB_BTH_SOLICITED;
182 if (++qp->s_cur >= qp->s_size)
186 case OP(RDMA_WRITE_FIRST):
187 qp->s_state = OP(RDMA_WRITE_MIDDLE);
189 case OP(RDMA_WRITE_MIDDLE):
195 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
196 qp->s_state = OP(RDMA_WRITE_LAST);
199 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
200 /* Immediate data comes after the BTH */
201 ohdr->u.imm_data = wqe->wr.ex.imm_data;
203 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
204 bth0 |= IB_BTH_SOLICITED;
207 if (++qp->s_cur >= qp->s_size)
212 qp->s_hdrwords = hwords;
213 qp->s_cur_sge = &qp->s_sge;
214 qp->s_cur_size = len;
215 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
216 qp->s_psn++ & QIB_PSN_MASK);
220 qp->s_flags &= ~RVT_S_BUSY;
225 * qib_uc_rcv - handle an incoming UC packet
226 * @ibp: the port the packet came in on
227 * @hdr: the header of the packet
228 * @has_grh: true if the packet has a GRH
229 * @data: the packet data
230 * @tlen: the length of the packet
231 * @qp: the QP for this packet.
233 * This is called from qib_qp_rcv() to process an incoming UC packet
235 * Called at interrupt level.
237 void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
238 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
240 struct ib_other_headers *ohdr;
247 struct ib_reth *reth;
253 hdrsize = 8 + 12; /* LRH + BTH */
255 ohdr = &hdr->u.l.oth;
256 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
259 opcode = be32_to_cpu(ohdr->bth[0]);
260 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
263 psn = be32_to_cpu(ohdr->bth[2]);
266 /* Compare the PSN verses the expected PSN. */
267 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
269 * Handle a sequence error.
270 * Silently drop any current message.
274 if (qp->r_state == OP(SEND_FIRST) ||
275 qp->r_state == OP(SEND_MIDDLE)) {
276 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
277 qp->r_sge.num_sge = 0;
279 rvt_put_ss(&qp->r_sge);
280 qp->r_state = OP(SEND_LAST);
284 case OP(SEND_ONLY_WITH_IMMEDIATE):
287 case OP(RDMA_WRITE_FIRST):
288 case OP(RDMA_WRITE_ONLY):
289 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
297 /* Check for opcode sequence errors. */
298 switch (qp->r_state) {
300 case OP(SEND_MIDDLE):
301 if (opcode == OP(SEND_MIDDLE) ||
302 opcode == OP(SEND_LAST) ||
303 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
307 case OP(RDMA_WRITE_FIRST):
308 case OP(RDMA_WRITE_MIDDLE):
309 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
310 opcode == OP(RDMA_WRITE_LAST) ||
311 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
316 if (opcode == OP(SEND_FIRST) ||
317 opcode == OP(SEND_ONLY) ||
318 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
319 opcode == OP(RDMA_WRITE_FIRST) ||
320 opcode == OP(RDMA_WRITE_ONLY) ||
321 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
326 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
329 /* OK, process the packet. */
333 case OP(SEND_ONLY_WITH_IMMEDIATE):
335 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
336 qp->r_sge = qp->s_rdma_read_sge;
338 ret = rvt_get_rwqe(qp, false);
344 * qp->s_rdma_read_sge will be the owner
345 * of the mr references.
347 qp->s_rdma_read_sge = qp->r_sge;
350 if (opcode == OP(SEND_ONLY))
351 goto no_immediate_data;
352 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
355 case OP(SEND_MIDDLE):
356 /* Check for invalid length PMTU or posted rwqe len. */
357 if (unlikely(tlen != (hdrsize + pmtu + 4)))
359 qp->r_rcv_len += pmtu;
360 if (unlikely(qp->r_rcv_len > qp->r_len))
362 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
365 case OP(SEND_LAST_WITH_IMMEDIATE):
367 wc.ex.imm_data = ohdr->u.imm_data;
369 wc.wc_flags = IB_WC_WITH_IMM;
376 /* Get the number of bytes the message was padded by. */
377 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
378 /* Check for invalid length. */
379 /* XXX LAST len should be >= 1 */
380 if (unlikely(tlen < (hdrsize + pad + 4)))
382 /* Don't count the CRC. */
383 tlen -= (hdrsize + pad + 4);
384 wc.byte_len = tlen + qp->r_rcv_len;
385 if (unlikely(wc.byte_len > qp->r_len))
387 wc.opcode = IB_WC_RECV;
388 rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
389 rvt_put_ss(&qp->s_rdma_read_sge);
391 wc.wr_id = qp->r_wr_id;
392 wc.status = IB_WC_SUCCESS;
394 wc.src_qp = qp->remote_qpn;
395 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
396 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
397 /* zero fields that are N/A */
400 wc.dlid_path_bits = 0;
402 /* Signal completion event if the solicited bit is set. */
403 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
404 ib_bth_is_solicited(ohdr));
407 case OP(RDMA_WRITE_FIRST):
408 case OP(RDMA_WRITE_ONLY):
409 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
411 if (unlikely(!(qp->qp_access_flags &
412 IB_ACCESS_REMOTE_WRITE))) {
415 reth = &ohdr->u.rc.reth;
416 hdrsize += sizeof(*reth);
417 qp->r_len = be32_to_cpu(reth->length);
419 qp->r_sge.sg_list = NULL;
420 if (qp->r_len != 0) {
421 u32 rkey = be32_to_cpu(reth->rkey);
422 u64 vaddr = be64_to_cpu(reth->vaddr);
426 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
427 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
430 qp->r_sge.num_sge = 1;
432 qp->r_sge.num_sge = 0;
433 qp->r_sge.sge.mr = NULL;
434 qp->r_sge.sge.vaddr = NULL;
435 qp->r_sge.sge.length = 0;
436 qp->r_sge.sge.sge_length = 0;
438 if (opcode == OP(RDMA_WRITE_ONLY))
440 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
441 wc.ex.imm_data = ohdr->u.rc.imm_data;
445 case OP(RDMA_WRITE_MIDDLE):
446 /* Check for invalid length PMTU or posted rwqe len. */
447 if (unlikely(tlen != (hdrsize + pmtu + 4)))
449 qp->r_rcv_len += pmtu;
450 if (unlikely(qp->r_rcv_len > qp->r_len))
452 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
455 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
456 wc.ex.imm_data = ohdr->u.imm_data;
459 wc.wc_flags = IB_WC_WITH_IMM;
461 /* Get the number of bytes the message was padded by. */
462 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
463 /* Check for invalid length. */
464 /* XXX LAST len should be >= 1 */
465 if (unlikely(tlen < (hdrsize + pad + 4)))
467 /* Don't count the CRC. */
468 tlen -= (hdrsize + pad + 4);
469 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
471 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
472 rvt_put_ss(&qp->s_rdma_read_sge);
474 ret = rvt_get_rwqe(qp, true);
480 wc.byte_len = qp->r_len;
481 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
482 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
483 rvt_put_ss(&qp->r_sge);
486 case OP(RDMA_WRITE_LAST):
488 /* Get the number of bytes the message was padded by. */
489 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
490 /* Check for invalid length. */
491 /* XXX LAST len should be >= 1 */
492 if (unlikely(tlen < (hdrsize + pad + 4)))
494 /* Don't count the CRC. */
495 tlen -= (hdrsize + pad + 4);
496 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
498 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
499 rvt_put_ss(&qp->r_sge);
503 /* Drop packet for unknown opcodes. */
507 qp->r_state = opcode;
511 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
512 qp->r_sge.num_sge = 0;
514 ibp->rvp.n_pkt_drops++;
518 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);