2 * Copyright(c) 2015 - 2020 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/err.h>
49 #include <linux/vmalloc.h>
50 #include <linux/hash.h>
51 #include <linux/module.h>
52 #include <linux/seq_file.h>
53 #include <rdma/rdma_vt.h>
54 #include <rdma/rdmavt_qp.h>
55 #include <rdma/ib_verbs.h>
60 #include "verbs_txreq.h"
62 unsigned int hfi1_qp_table_size = 256;
63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
64 MODULE_PARM_DESC(qp_table_size, "QP table size");
66 static void flush_tx_list(struct rvt_qp *qp);
67 static int iowait_sleep(
68 struct sdma_engine *sde,
69 struct iowait_work *wait,
70 struct sdma_txreq *stx,
73 static void iowait_wakeup(struct iowait *wait, int reason);
74 static void iowait_sdma_drained(struct iowait *wait);
75 static void qp_pio_drain(struct rvt_qp *qp);
77 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
78 [IB_WR_RDMA_WRITE] = {
79 .length = sizeof(struct ib_rdma_wr),
80 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
84 .length = sizeof(struct ib_rdma_wr),
85 .qpt_support = BIT(IB_QPT_RC),
86 .flags = RVT_OPERATION_ATOMIC,
89 [IB_WR_ATOMIC_CMP_AND_SWP] = {
90 .length = sizeof(struct ib_atomic_wr),
91 .qpt_support = BIT(IB_QPT_RC),
92 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
95 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
96 .length = sizeof(struct ib_atomic_wr),
97 .qpt_support = BIT(IB_QPT_RC),
98 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
101 [IB_WR_RDMA_WRITE_WITH_IMM] = {
102 .length = sizeof(struct ib_rdma_wr),
103 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
107 .length = sizeof(struct ib_send_wr),
108 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
109 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
112 [IB_WR_SEND_WITH_IMM] = {
113 .length = sizeof(struct ib_send_wr),
114 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
115 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
119 .length = sizeof(struct ib_reg_wr),
120 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
121 .flags = RVT_OPERATION_LOCAL,
124 [IB_WR_LOCAL_INV] = {
125 .length = sizeof(struct ib_send_wr),
126 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
127 .flags = RVT_OPERATION_LOCAL,
130 [IB_WR_SEND_WITH_INV] = {
131 .length = sizeof(struct ib_send_wr),
132 .qpt_support = BIT(IB_QPT_RC),
136 .length = sizeof(struct ib_atomic_wr),
137 .qpt_support = BIT(IB_QPT_RC),
138 .flags = RVT_OPERATION_USE_RESERVE,
141 [IB_WR_TID_RDMA_WRITE] = {
142 .length = sizeof(struct ib_rdma_wr),
143 .qpt_support = BIT(IB_QPT_RC),
144 .flags = RVT_OPERATION_IGN_RNR_CNT,
149 static void flush_list_head(struct list_head *l)
151 while (!list_empty(l)) {
152 struct sdma_txreq *tx;
154 tx = list_first_entry(
158 list_del_init(&tx->list);
160 container_of(tx, struct verbs_txreq, txreq));
164 static void flush_tx_list(struct rvt_qp *qp)
166 struct hfi1_qp_priv *priv = qp->priv;
168 flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
169 flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
172 static void flush_iowait(struct rvt_qp *qp)
174 struct hfi1_qp_priv *priv = qp->priv;
176 seqlock_t *lock = priv->s_iowait.lock;
180 write_seqlock_irqsave(lock, flags);
181 if (!list_empty(&priv->s_iowait.list)) {
182 list_del_init(&priv->s_iowait.list);
183 priv->s_iowait.lock = NULL;
186 write_sequnlock_irqrestore(lock, flags);
190 * This function is what we would push to the core layer if we wanted to be a
191 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
192 * to blindly pass the MTU enum value from the PathRecord to us.
194 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
196 /* Constraining 10KB packets to 8KB packets */
197 if (mtu == (enum ib_mtu)OPA_MTU_10240)
199 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
202 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
203 int attr_mask, struct ib_udata *udata)
205 struct ib_qp *ibqp = &qp->ibqp;
206 struct hfi1_ibdev *dev = to_idev(ibqp->device);
207 struct hfi1_devdata *dd = dd_from_dev(dev);
210 if (attr_mask & IB_QP_AV) {
211 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
215 if (!qp_to_sdma_engine(qp, sc) &&
216 dd->flags & HFI1_HAS_SEND_DMA)
219 if (!qp_to_send_context(qp, sc))
223 if (attr_mask & IB_QP_ALT_PATH) {
224 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
228 if (!qp_to_sdma_engine(qp, sc) &&
229 dd->flags & HFI1_HAS_SEND_DMA)
232 if (!qp_to_send_context(qp, sc))
240 * qp_set_16b - Set the hdr_type based on whether the slid or the
241 * dlid in the connection is extended. Only applicable for RC and UC
242 * QPs. UD QPs determine this on the fly from the ah in the wqe
244 static inline void qp_set_16b(struct rvt_qp *qp)
246 struct hfi1_pportdata *ppd;
247 struct hfi1_ibport *ibp;
248 struct hfi1_qp_priv *priv = qp->priv;
250 /* Update ah_attr to account for extended LIDs */
251 hfi1_update_ah_attr(qp->ibqp.device, &qp->remote_ah_attr);
253 /* Create 32 bit LIDs */
254 hfi1_make_opa_lid(&qp->remote_ah_attr);
256 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH))
259 ibp = to_iport(qp->ibqp.device, qp->port_num);
260 ppd = ppd_from_ibp(ibp);
261 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, &qp->remote_ah_attr);
264 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
265 int attr_mask, struct ib_udata *udata)
267 struct ib_qp *ibqp = &qp->ibqp;
268 struct hfi1_qp_priv *priv = qp->priv;
270 if (attr_mask & IB_QP_AV) {
271 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
272 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
273 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
277 if (attr_mask & IB_QP_PATH_MIG_STATE &&
278 attr->path_mig_state == IB_MIG_MIGRATED &&
279 qp->s_mig_state == IB_MIG_ARMED) {
280 qp->s_flags |= HFI1_S_AHG_CLEAR;
281 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
282 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
283 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
287 opfn_qp_init(qp, attr, attr_mask);
291 * hfi1_setup_wqe - set up the wqe
293 * @wqe - The built wqe
294 * @call_send - Determine if the send should be posted or scheduled.
296 * Perform setup of the wqe. This is called
297 * prior to inserting the wqe into the ring but after
298 * the wqe has been setup by RDMAVT. This function
299 * allows the driver the opportunity to perform
300 * validation and additional setup of the wqe.
302 * Returns 0 on success, -EINVAL on failure
305 int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
307 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
309 struct hfi1_pportdata *ppd;
310 struct hfi1_devdata *dd;
312 switch (qp->ibqp.qp_type) {
314 hfi1_setup_tid_rdma_wqe(qp, wqe);
317 if (wqe->length > 0x80000000U)
319 if (wqe->length > qp->pmtu)
324 * SM packets should exclusively use VL15 and their SL is
325 * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
326 * is created, SL is 0 in most cases and as a result some
327 * fields (vl and pmtu) in ah may not be set correctly,
328 * depending on the SL2SC and SC2VL tables at the time.
330 ppd = ppd_from_ibp(ibp);
331 dd = dd_from_ppd(ppd);
332 if (wqe->length > dd->vld[15].mtu)
337 ah = rvt_get_swqe_ah(wqe);
338 if (wqe->length > (1 << ah->log_pmtu))
340 if (ibp->sl_to_sc[rdma_ah_get_sl(&ah->attr)] == 0xf)
347 * System latency between send and schedule is large enough that
348 * forcing call_send to true for piothreshold packets is necessary.
350 if (wqe->length <= piothreshold)
356 * _hfi1_schedule_send - schedule progress
359 * This schedules qp progress w/o regard to the s_flags.
361 * It is only used in the post send, which doesn't hold
364 bool _hfi1_schedule_send(struct rvt_qp *qp)
366 struct hfi1_qp_priv *priv = qp->priv;
367 struct hfi1_ibport *ibp =
368 to_iport(qp->ibqp.device, qp->port_num);
369 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
370 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
372 return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
375 cpumask_first(cpumask_of_node(dd->node)));
378 static void qp_pio_drain(struct rvt_qp *qp)
380 struct hfi1_qp_priv *priv = qp->priv;
382 if (!priv->s_sendcontext)
384 while (iowait_pio_pending(&priv->s_iowait)) {
385 write_seqlock_irq(&priv->s_sendcontext->waitlock);
386 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
387 write_sequnlock_irq(&priv->s_sendcontext->waitlock);
388 iowait_pio_drain(&priv->s_iowait);
389 write_seqlock_irq(&priv->s_sendcontext->waitlock);
390 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
391 write_sequnlock_irq(&priv->s_sendcontext->waitlock);
396 * hfi1_schedule_send - schedule progress
399 * This schedules qp progress and caller should hold
401 * @return true if the first leg is scheduled;
402 * false if the first leg is not scheduled.
404 bool hfi1_schedule_send(struct rvt_qp *qp)
406 lockdep_assert_held(&qp->s_lock);
407 if (hfi1_send_ok(qp)) {
408 _hfi1_schedule_send(qp);
411 if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
412 iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
417 static void hfi1_qp_schedule(struct rvt_qp *qp)
419 struct hfi1_qp_priv *priv = qp->priv;
422 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
423 ret = hfi1_schedule_send(qp);
425 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
427 if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_TID)) {
428 ret = hfi1_schedule_tid_send(qp);
430 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
434 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
438 spin_lock_irqsave(&qp->s_lock, flags);
439 if (qp->s_flags & flag) {
440 qp->s_flags &= ~flag;
441 trace_hfi1_qpwakeup(qp, flag);
442 hfi1_qp_schedule(qp);
444 spin_unlock_irqrestore(&qp->s_lock, flags);
445 /* Notify hfi1_destroy_qp() if it is waiting. */
449 void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
451 struct hfi1_qp_priv *priv = qp->priv;
453 if (iowait_set_work_flag(wait) == IOWAIT_IB_SE) {
454 qp->s_flags &= ~RVT_S_BUSY;
456 * If we are sending a first-leg packet from the second leg,
457 * we need to clear the busy flag from priv->s_flags to
458 * avoid a race condition when the qp wakes up before
459 * the call to hfi1_verbs_send() returns to the second
460 * leg. In that case, the second leg will terminate without
461 * being re-scheduled, resulting in failure to send TID RDMA
462 * WRITE DATA and TID RDMA ACK packets.
464 if (priv->s_flags & HFI1_S_TID_BUSY_SET) {
465 priv->s_flags &= ~(HFI1_S_TID_BUSY_SET |
467 iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
470 priv->s_flags &= ~RVT_S_BUSY;
474 static int iowait_sleep(
475 struct sdma_engine *sde,
476 struct iowait_work *wait,
477 struct sdma_txreq *stx,
481 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
483 struct hfi1_qp_priv *priv;
490 spin_lock_irqsave(&qp->s_lock, flags);
491 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
493 * If we couldn't queue the DMA request, save the info
494 * and try again later rather than destroying the
495 * buffer and undoing the side effects of the copy.
497 /* Make a common routine? */
498 list_add_tail(&stx->list, &wait->tx_head);
499 write_seqlock(&sde->waitlock);
500 if (sdma_progress(sde, seq, stx))
502 if (list_empty(&priv->s_iowait.list)) {
503 struct hfi1_ibport *ibp =
504 to_iport(qp->ibqp.device, qp->port_num);
506 ibp->rvp.n_dmawait++;
507 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
508 iowait_get_priority(&priv->s_iowait);
509 iowait_queue(pkts_sent, &priv->s_iowait,
511 priv->s_iowait.lock = &sde->waitlock;
512 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
515 write_sequnlock(&sde->waitlock);
516 hfi1_qp_unbusy(qp, wait);
517 spin_unlock_irqrestore(&qp->s_lock, flags);
520 spin_unlock_irqrestore(&qp->s_lock, flags);
525 write_sequnlock(&sde->waitlock);
526 spin_unlock_irqrestore(&qp->s_lock, flags);
527 list_del_init(&stx->list);
531 static void iowait_wakeup(struct iowait *wait, int reason)
533 struct rvt_qp *qp = iowait_to_qp(wait);
535 WARN_ON(reason != SDMA_AVAIL_REASON);
536 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
539 static void iowait_sdma_drained(struct iowait *wait)
541 struct rvt_qp *qp = iowait_to_qp(wait);
545 * This happens when the send engine notes
546 * a QP in the error state and cannot
547 * do the flush work until that QP's
548 * sdma work has finished.
550 spin_lock_irqsave(&qp->s_lock, flags);
551 if (qp->s_flags & RVT_S_WAIT_DMA) {
552 qp->s_flags &= ~RVT_S_WAIT_DMA;
553 hfi1_schedule_send(qp);
555 spin_unlock_irqrestore(&qp->s_lock, flags);
558 static void hfi1_init_priority(struct iowait *w)
560 struct rvt_qp *qp = iowait_to_qp(w);
561 struct hfi1_qp_priv *priv = qp->priv;
563 if (qp->s_flags & RVT_S_ACK_PENDING)
565 if (priv->s_flags & RVT_S_ACK_PENDING)
570 * qp_to_sdma_engine - map a qp to a send engine
575 * A send engine for the qp or NULL for SMI type qp.
577 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
579 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
580 struct sdma_engine *sde;
582 if (!(dd->flags & HFI1_HAS_SEND_DMA))
584 switch (qp->ibqp.qp_type) {
590 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
595 * qp_to_send_context - map a qp to a send context
600 * A send context for the qp
602 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
604 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
606 switch (qp->ibqp.qp_type) {
608 /* SMA packets to VL15 */
609 return dd->vld[15].sc;
614 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
618 static const char * const qp_type_str[] = {
619 "SMI", "GSI", "RC", "UC", "UD",
622 static int qp_idle(struct rvt_qp *qp)
625 qp->s_last == qp->s_acked &&
626 qp->s_acked == qp->s_cur &&
627 qp->s_cur == qp->s_tail &&
628 qp->s_tail == qp->s_head;
632 * qp_iter_print - print the qp information to seq_file
633 * @s: the seq_file to emit the qp information on
634 * @iter: the iterator for the qp hash list
636 void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
638 struct rvt_swqe *wqe;
639 struct rvt_qp *qp = iter->qp;
640 struct hfi1_qp_priv *priv = qp->priv;
641 struct sdma_engine *sde;
642 struct send_context *send_context;
643 struct rvt_ack_entry *e = NULL;
644 struct rvt_srq *srq = qp->ibqp.srq ?
645 ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
647 sde = qp_to_sdma_engine(qp, priv->s_sc);
648 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
649 send_context = qp_to_send_context(qp, priv->s_sc);
651 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
653 "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
655 qp_idle(qp) ? "I" : "B",
657 atomic_read(&qp->refcount),
658 qp_type_str[qp->ibqp.qp_type],
660 wqe ? wqe->wr.opcode : 0,
662 iowait_sdma_pending(&priv->s_iowait),
663 iowait_pio_pending(&priv->s_iowait),
664 !list_empty(&priv->s_iowait.list),
669 qp->s_psn, qp->s_next_psn,
670 qp->s_sending_psn, qp->s_sending_hpsn,
672 qp->s_last, qp->s_acked, qp->s_cur,
673 qp->s_tail, qp->s_head, qp->s_size,
675 /* ack_queue ring pointers, size */
676 qp->s_tail_ack_queue, qp->r_head_ack_queue,
677 rvt_max_atomic(&to_idev(qp->ibqp.device)->rdi),
680 rdma_ah_get_dlid(&qp->remote_ah_attr),
681 rdma_ah_get_sl(&qp->remote_ah_attr),
688 sde ? sde->this_idx : 0,
690 send_context ? send_context->sw_index : 0,
691 ib_cq_head(qp->ibqp.send_cq),
692 ib_cq_tail(qp->ibqp.send_cq),
696 /* ack queue information */
702 srq ? srq->rq.size : qp->r_rq.size
706 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
708 struct hfi1_qp_priv *priv;
710 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
712 return ERR_PTR(-ENOMEM);
716 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
720 return ERR_PTR(-ENOMEM);
731 /* Init to a value to start the running average correctly */
732 priv->s_running_pkt_size = piothreshold / 2;
736 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
738 struct hfi1_qp_priv *priv = qp->priv;
740 hfi1_qp_priv_tid_free(rdi, qp);
745 unsigned free_all_qps(struct rvt_dev_info *rdi)
747 struct hfi1_ibdev *verbs_dev = container_of(rdi,
750 struct hfi1_devdata *dd = container_of(verbs_dev,
754 unsigned qp_inuse = 0;
756 for (n = 0; n < dd->num_pports; n++) {
757 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
760 if (rcu_dereference(ibp->rvp.qp[0]))
762 if (rcu_dereference(ibp->rvp.qp[1]))
770 void flush_qp_waiters(struct rvt_qp *qp)
772 lockdep_assert_held(&qp->s_lock);
774 hfi1_tid_rdma_flush_wait(qp);
777 void stop_send_queue(struct rvt_qp *qp)
779 struct hfi1_qp_priv *priv = qp->priv;
781 iowait_cancel_work(&priv->s_iowait);
782 if (cancel_work_sync(&priv->tid_rdma.trigger_work))
786 void quiesce_qp(struct rvt_qp *qp)
788 struct hfi1_qp_priv *priv = qp->priv;
790 hfi1_del_tid_reap_timer(qp);
791 hfi1_del_tid_retry_timer(qp);
792 iowait_sdma_drain(&priv->s_iowait);
797 void notify_qp_reset(struct rvt_qp *qp)
799 hfi1_qp_kern_exp_rcv_clear_all(qp);
803 /* Clear any OPFN state */
804 if (qp->ibqp.qp_type == IB_QPT_RC)
809 * Switch to alternate path.
810 * The QP s_lock should be held and interrupts disabled.
812 void hfi1_migrate_qp(struct rvt_qp *qp)
814 struct hfi1_qp_priv *priv = qp->priv;
817 qp->s_mig_state = IB_MIG_MIGRATED;
818 qp->remote_ah_attr = qp->alt_ah_attr;
819 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
820 qp->s_pkey_index = qp->s_alt_pkey_index;
821 qp->s_flags |= HFI1_S_AHG_CLEAR;
822 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
823 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
826 ev.device = qp->ibqp.device;
827 ev.element.qp = &qp->ibqp;
828 ev.event = IB_EVENT_PATH_MIG;
829 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
832 int mtu_to_path_mtu(u32 mtu)
834 return mtu_to_enum(mtu, OPA_MTU_8192);
837 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
840 struct hfi1_ibdev *verbs_dev = container_of(rdi,
843 struct hfi1_devdata *dd = container_of(verbs_dev,
846 struct hfi1_ibport *ibp;
849 ibp = &dd->pport[qp->port_num - 1].ibport_data;
850 sc = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
851 vl = sc_to_vlt(dd, sc);
853 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
854 if (vl < PER_VL_SEND_CONTEXTS)
855 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
859 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
860 struct ib_qp_attr *attr)
862 int mtu, pidx = qp->port_num - 1;
863 struct hfi1_ibdev *verbs_dev = container_of(rdi,
866 struct hfi1_devdata *dd = container_of(verbs_dev,
869 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
871 return -1; /* values less than 0 are error */
873 if (mtu > dd->pport[pidx].ibmtu)
874 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
876 return attr->path_mtu;
879 void notify_error_qp(struct rvt_qp *qp)
881 struct hfi1_qp_priv *priv = qp->priv;
882 seqlock_t *lock = priv->s_iowait.lock;
886 if (!list_empty(&priv->s_iowait.list) &&
887 !(qp->s_flags & RVT_S_BUSY) &&
888 !(priv->s_flags & RVT_S_BUSY)) {
889 qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
890 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
891 iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_TID);
892 list_del_init(&priv->s_iowait.list);
893 priv->s_iowait.lock = NULL;
896 write_sequnlock(lock);
899 if (!(qp->s_flags & RVT_S_BUSY) && !(priv->s_flags & RVT_S_BUSY)) {
902 rvt_put_mr(qp->s_rdma_mr);
903 qp->s_rdma_mr = NULL;
910 * hfi1_qp_iter_cb - callback for iterator
912 * @v - the sl in low bits of v
914 * This is called from the iterator callback to work
915 * on an individual qp.
917 static void hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v)
921 struct hfi1_ibport *ibp =
922 to_iport(qp->ibqp.device, qp->port_num);
923 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
926 if (qp->port_num != ppd->port ||
927 (qp->ibqp.qp_type != IB_QPT_UC &&
928 qp->ibqp.qp_type != IB_QPT_RC) ||
929 rdma_ah_get_sl(&qp->remote_ah_attr) != sl ||
930 !(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))
933 spin_lock_irq(&qp->r_lock);
934 spin_lock(&qp->s_hlock);
935 spin_lock(&qp->s_lock);
936 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
937 spin_unlock(&qp->s_lock);
938 spin_unlock(&qp->s_hlock);
939 spin_unlock_irq(&qp->r_lock);
941 ev.device = qp->ibqp.device;
942 ev.element.qp = &qp->ibqp;
943 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
944 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
949 * hfi1_error_port_qps - put a port's RC/UC qps into error state
951 * @sl: the service level.
953 * This function places all RC/UC qps with a given service level into error
954 * state. It is generally called to force upper lay apps to abandon stale qps
955 * after an sl->sc mapping change.
957 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
959 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
960 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
962 rvt_qp_iter(&dev->rdi, sl, hfi1_qp_iter_cb);