2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
44 #include <rdma/rdma_vt.h>
47 #include "qib_common.h"
49 static unsigned int ib_qib_qp_table_size = 256;
50 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(qp_table_size, "QP table size");
53 static unsigned int qib_lkey_table_size = 16;
54 module_param_named(lkey_table_size, qib_lkey_table_size, uint,
56 MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
59 static unsigned int ib_qib_max_pds = 0xFFFF;
60 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61 MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
64 static unsigned int ib_qib_max_ahs = 0xFFFF;
65 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
68 unsigned int ib_qib_max_cqes = 0x2FFFF;
69 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
73 unsigned int ib_qib_max_cqs = 0x1FFFF;
74 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
77 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 unsigned int ib_qib_max_qps = 16384;
82 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
85 unsigned int ib_qib_max_sges = 0x60;
86 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
89 unsigned int ib_qib_max_mcast_grps = 16384;
90 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
94 unsigned int ib_qib_max_mcast_qp_attached = 16;
95 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
97 MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
100 unsigned int ib_qib_max_srqs = 1024;
101 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
104 unsigned int ib_qib_max_srq_sges = 128;
105 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
108 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
112 static unsigned int ib_qib_disable_sma;
113 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
117 * Note that it is OK to post send work requests in the SQE and ERR
118 * states; qib_do_send() will process them and generate error
119 * completions as per IB 1.2 C10-96.
121 const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
123 [IB_QPS_INIT] = QIB_POST_RECV_OK,
124 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
125 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
126 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
127 QIB_PROCESS_NEXT_SEND_OK,
128 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
130 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
131 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
132 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
133 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
136 struct qib_ucontext {
137 struct ib_ucontext ibucontext;
140 static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
143 return container_of(ibucontext, struct qib_ucontext, ibucontext);
147 * Translate ib_wr_opcode into ib_wc_opcode.
149 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
150 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
151 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
152 [IB_WR_SEND] = IB_WC_SEND,
153 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
154 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
155 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
156 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
162 __be64 ib_qib_sys_image_guid;
165 * qib_copy_sge - copy data to SGE memory
167 * @data: the data to copy
168 * @length: the length of the data
170 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
172 struct rvt_sge *sge = &ss->sge;
175 u32 len = sge->length;
179 if (len > sge->sge_length)
180 len = sge->sge_length;
182 memcpy(sge->vaddr, data, len);
185 sge->sge_length -= len;
186 if (sge->sge_length == 0) {
190 *sge = *ss->sg_list++;
191 } else if (sge->length == 0 && sge->mr->lkey) {
192 if (++sge->n >= RVT_SEGSZ) {
193 if (++sge->m >= sge->mr->mapsz)
198 sge->mr->map[sge->m]->segs[sge->n].vaddr;
200 sge->mr->map[sge->m]->segs[sge->n].length;
208 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
210 * @length: the number of bytes to skip
212 void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
214 struct rvt_sge *sge = &ss->sge;
217 u32 len = sge->length;
221 if (len > sge->sge_length)
222 len = sge->sge_length;
226 sge->sge_length -= len;
227 if (sge->sge_length == 0) {
231 *sge = *ss->sg_list++;
232 } else if (sge->length == 0 && sge->mr->lkey) {
233 if (++sge->n >= RVT_SEGSZ) {
234 if (++sge->m >= sge->mr->mapsz)
239 sge->mr->map[sge->m]->segs[sge->n].vaddr;
241 sge->mr->map[sge->m]->segs[sge->n].length;
248 * Count the number of DMA descriptors needed to send length bytes of data.
249 * Don't modify the qib_sge_state to get the count.
250 * Return zero if any of the segments is not aligned.
252 static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
254 struct rvt_sge *sg_list = ss->sg_list;
255 struct rvt_sge sge = ss->sge;
256 u8 num_sge = ss->num_sge;
257 u32 ndesc = 1; /* count the header */
260 u32 len = sge.length;
264 if (len > sge.sge_length)
265 len = sge.sge_length;
267 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
268 (len != length && (len & (sizeof(u32) - 1)))) {
275 sge.sge_length -= len;
276 if (sge.sge_length == 0) {
279 } else if (sge.length == 0 && sge.mr->lkey) {
280 if (++sge.n >= RVT_SEGSZ) {
281 if (++sge.m >= sge.mr->mapsz)
286 sge.mr->map[sge.m]->segs[sge.n].vaddr;
288 sge.mr->map[sge.m]->segs[sge.n].length;
296 * Copy from the SGEs to the data buffer.
298 static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
300 struct rvt_sge *sge = &ss->sge;
303 u32 len = sge->length;
307 if (len > sge->sge_length)
308 len = sge->sge_length;
310 memcpy(data, sge->vaddr, len);
313 sge->sge_length -= len;
314 if (sge->sge_length == 0) {
316 *sge = *ss->sg_list++;
317 } else if (sge->length == 0 && sge->mr->lkey) {
318 if (++sge->n >= RVT_SEGSZ) {
319 if (++sge->m >= sge->mr->mapsz)
324 sge->mr->map[sge->m]->segs[sge->n].vaddr;
326 sge->mr->map[sge->m]->segs[sge->n].length;
334 * qib_post_one_send - post one RC, UC, or UD send work request
335 * @qp: the QP to post on
336 * @wr: the work request to send
338 static int qib_post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr,
341 struct rvt_swqe *wqe;
348 struct rvt_lkey_table *rkt;
350 int avoid_schedule = 0;
352 spin_lock_irqsave(&qp->s_lock, flags);
354 /* Check that state is OK to post send. */
355 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
358 /* IB spec says that num_sge == 0 is OK. */
359 if (wr->num_sge > qp->s_max_sge)
363 * Don't allow RDMA reads or atomic operations on UC or
364 * undefined operations.
365 * Make sure buffer is large enough to hold the result for atomics.
367 if (qp->ibqp.qp_type == IB_QPT_UC) {
368 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
371 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
372 if (wr->opcode != IB_WR_SEND &&
373 wr->opcode != IB_WR_SEND_WITH_IMM)
375 /* Check UD destination address PD */
376 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
378 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
382 wr->sg_list[0].length < sizeof(u64) ||
383 wr->sg_list[0].addr & (sizeof(u64) - 1)))
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
388 next = qp->s_head + 1;
389 if (next >= qp->s_size)
391 if (next == qp->s_last) {
396 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
397 pd = ibpd_to_rvtpd(qp->ibqp.pd);
398 wqe = get_swqe_ptr(qp, qp->s_head);
400 if (qp->ibqp.qp_type != IB_QPT_UC &&
401 qp->ibqp.qp_type != IB_QPT_RC)
402 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
403 else if (wr->opcode == IB_WR_REG_MR)
404 memcpy(&wqe->reg_wr, reg_wr(wr),
405 sizeof(wqe->reg_wr));
406 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
407 wr->opcode == IB_WR_RDMA_WRITE ||
408 wr->opcode == IB_WR_RDMA_READ)
409 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
410 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
411 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
412 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
414 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
419 acc = wr->opcode >= IB_WR_RDMA_READ ?
420 IB_ACCESS_LOCAL_WRITE : 0;
421 for (i = 0; i < wr->num_sge; i++) {
422 u32 length = wr->sg_list[i].length;
427 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
428 &wr->sg_list[i], acc);
430 goto bail_inval_free;
431 wqe->length += length;
436 if (qp->ibqp.qp_type == IB_QPT_UC ||
437 qp->ibqp.qp_type == IB_QPT_RC) {
438 if (wqe->length > 0x80000000U)
439 goto bail_inval_free;
440 if (wqe->length <= qp->pmtu)
442 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
443 qp->port_num - 1)->ibmtu) {
444 goto bail_inval_free;
446 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
449 wqe->ssn = qp->s_ssn++;
457 struct rvt_sge *sge = &wqe->sg_list[--j];
464 if (!ret && !wr->next && !avoid_schedule &&
466 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
467 qib_schedule_send(qp);
470 spin_unlock_irqrestore(&qp->s_lock, flags);
475 * qib_post_send - post a send on a QP
476 * @ibqp: the QP to post the send on
477 * @wr: the list of work requests to post
478 * @bad_wr: the first bad WR is put here
480 * This may be called from interrupt context.
482 static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
483 struct ib_send_wr **bad_wr)
485 struct rvt_qp *qp = to_iqp(ibqp);
486 struct qib_qp_priv *priv = qp->priv;
490 for (; wr; wr = wr->next) {
491 err = qib_post_one_send(qp, wr, &scheduled);
498 /* Try to do the send work in the caller's context. */
500 qib_do_send(&priv->s_work);
507 * qib_post_receive - post a receive on a QP
508 * @ibqp: the QP to post the receive on
509 * @wr: the WR to post
510 * @bad_wr: the first bad WR is put here
512 * This may be called from interrupt context.
514 static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
515 struct ib_recv_wr **bad_wr)
517 struct rvt_qp *qp = to_iqp(ibqp);
518 struct rvt_rwq *wq = qp->r_rq.wq;
522 /* Check that state is OK to post receive. */
523 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
529 for (; wr; wr = wr->next) {
530 struct rvt_rwqe *wqe;
534 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
540 spin_lock_irqsave(&qp->r_rq.lock, flags);
542 if (next >= qp->r_rq.size)
544 if (next == wq->tail) {
545 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
551 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
552 wqe->wr_id = wr->wr_id;
553 wqe->num_sge = wr->num_sge;
554 for (i = 0; i < wr->num_sge; i++)
555 wqe->sg_list[i] = wr->sg_list[i];
556 /* Make sure queue entry is written before the head index. */
559 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
568 * qib_qp_rcv - processing an incoming packet on a QP
569 * @rcd: the context pointer
570 * @hdr: the packet header
571 * @has_grh: true if the packet has a GRH
572 * @data: the packet data
573 * @tlen: the packet length
574 * @qp: the QP the packet came on
576 * This is called from qib_ib_rcv() to process an incoming packet
578 * Called at interrupt level.
580 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
581 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
583 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
585 spin_lock(&qp->r_lock);
587 /* Check for valid receive state. */
588 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
589 ibp->rvp.n_pkt_drops++;
593 switch (qp->ibqp.qp_type) {
596 if (ib_qib_disable_sma)
600 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
604 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
608 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
616 spin_unlock(&qp->r_lock);
620 * qib_ib_rcv - process an incoming packet
621 * @rcd: the context pointer
622 * @rhdr: the header of the packet
623 * @data: the packet payload
624 * @tlen: the packet length
626 * This is called from qib_kreceive() to process an incoming packet at
627 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
629 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
631 struct qib_pportdata *ppd = rcd->ppd;
632 struct qib_ibport *ibp = &ppd->ibport_data;
633 struct qib_ib_header *hdr = rhdr;
634 struct qib_other_headers *ohdr;
641 /* 24 == LRH+BTH+CRC */
642 if (unlikely(tlen < 24))
645 /* Check for a valid destination LID (see ch. 7.11.1). */
646 lid = be16_to_cpu(hdr->lrh[1]);
647 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
648 lid &= ~((1 << ppd->lmc) - 1);
649 if (unlikely(lid != ppd->lid))
654 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
655 if (lnh == QIB_LRH_BTH)
657 else if (lnh == QIB_LRH_GRH) {
660 ohdr = &hdr->u.l.oth;
661 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
663 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
664 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
669 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
670 #ifdef CONFIG_DEBUG_FS
671 rcd->opstats->stats[opcode].n_bytes += tlen;
672 rcd->opstats->stats[opcode].n_packets++;
675 /* Get the destination QP number. */
676 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
677 if (qp_num == QIB_MULTICAST_QPN) {
678 struct qib_mcast *mcast;
679 struct qib_mcast_qp *p;
681 if (lnh != QIB_LRH_GRH)
683 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
686 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
687 list_for_each_entry_rcu(p, &mcast->qp_list, list)
688 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
690 * Notify qib_multicast_detach() if it is waiting for us
693 if (atomic_dec_return(&mcast->refcount) <= 1)
694 wake_up(&mcast->wait);
696 if (rcd->lookaside_qp) {
697 if (rcd->lookaside_qpn != qp_num) {
698 if (atomic_dec_and_test(
699 &rcd->lookaside_qp->refcount))
701 &rcd->lookaside_qp->wait);
702 rcd->lookaside_qp = NULL;
705 if (!rcd->lookaside_qp) {
706 qp = qib_lookup_qpn(ibp, qp_num);
709 rcd->lookaside_qp = qp;
710 rcd->lookaside_qpn = qp_num;
712 qp = rcd->lookaside_qp;
713 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
714 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
719 ibp->rvp.n_pkt_drops++;
723 * This is called from a timer to check for QPs
724 * which need kernel memory in order to send a packet.
726 static void mem_timer(unsigned long data)
728 struct qib_ibdev *dev = (struct qib_ibdev *) data;
729 struct list_head *list = &dev->memwait;
730 struct rvt_qp *qp = NULL;
731 struct qib_qp_priv *priv = NULL;
734 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
735 if (!list_empty(list)) {
736 priv = list_entry(list->next, struct qib_qp_priv, iowait);
738 list_del_init(&priv->iowait);
739 atomic_inc(&qp->refcount);
740 if (!list_empty(list))
741 mod_timer(&dev->mem_timer, jiffies + 1);
743 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
746 spin_lock_irqsave(&qp->s_lock, flags);
747 if (qp->s_flags & QIB_S_WAIT_KMEM) {
748 qp->s_flags &= ~QIB_S_WAIT_KMEM;
749 qib_schedule_send(qp);
751 spin_unlock_irqrestore(&qp->s_lock, flags);
752 if (atomic_dec_and_test(&qp->refcount))
757 static void update_sge(struct rvt_sge_state *ss, u32 length)
759 struct rvt_sge *sge = &ss->sge;
761 sge->vaddr += length;
762 sge->length -= length;
763 sge->sge_length -= length;
764 if (sge->sge_length == 0) {
766 *sge = *ss->sg_list++;
767 } else if (sge->length == 0 && sge->mr->lkey) {
768 if (++sge->n >= RVT_SEGSZ) {
769 if (++sge->m >= sge->mr->mapsz)
773 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
774 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
778 #ifdef __LITTLE_ENDIAN
779 static inline u32 get_upper_bits(u32 data, u32 shift)
781 return data >> shift;
784 static inline u32 set_upper_bits(u32 data, u32 shift)
786 return data << shift;
789 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
791 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
792 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
796 static inline u32 get_upper_bits(u32 data, u32 shift)
798 return data << shift;
801 static inline u32 set_upper_bits(u32 data, u32 shift)
803 return data >> shift;
806 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
808 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
809 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
814 static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
815 u32 length, unsigned flush_wc)
822 u32 len = ss->sge.length;
827 if (len > ss->sge.sge_length)
828 len = ss->sge.sge_length;
830 /* If the source address is not aligned, try to align it. */
831 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
833 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
835 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
838 y = sizeof(u32) - off;
841 if (len + extra >= sizeof(u32)) {
842 data |= set_upper_bits(v, extra *
844 len = sizeof(u32) - extra;
849 __raw_writel(data, piobuf);
854 /* Clear unused upper bytes */
855 data |= clear_upper_bytes(v, len, extra);
863 /* Source address is aligned. */
864 u32 *addr = (u32 *) ss->sge.vaddr;
865 int shift = extra * BITS_PER_BYTE;
866 int ushift = 32 - shift;
869 while (l >= sizeof(u32)) {
872 data |= set_upper_bits(v, shift);
873 __raw_writel(data, piobuf);
874 data = get_upper_bits(v, ushift);
880 * We still have 'extra' number of bytes leftover.
885 if (l + extra >= sizeof(u32)) {
886 data |= set_upper_bits(v, shift);
887 len -= l + extra - sizeof(u32);
892 __raw_writel(data, piobuf);
897 /* Clear unused upper bytes */
898 data |= clear_upper_bytes(v, l, extra);
905 } else if (len == length) {
909 } else if (len == length) {
913 * Need to round up for the last dword in the
917 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
919 last = ((u32 *) ss->sge.vaddr)[w - 1];
924 qib_pio_copy(piobuf, ss->sge.vaddr, w);
927 extra = len & (sizeof(u32) - 1);
929 u32 v = ((u32 *) ss->sge.vaddr)[w];
931 /* Clear unused upper bytes */
932 data = clear_upper_bytes(v, extra, 0);
938 /* Update address before sending packet. */
939 update_sge(ss, length);
941 /* must flush early everything before trigger word */
943 __raw_writel(last, piobuf);
944 /* be sure trigger word is written */
947 __raw_writel(last, piobuf);
950 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
953 struct qib_qp_priv *priv = qp->priv;
954 struct qib_verbs_txreq *tx;
957 spin_lock_irqsave(&qp->s_lock, flags);
958 spin_lock(&dev->rdi.pending_lock);
960 if (!list_empty(&dev->txreq_free)) {
961 struct list_head *l = dev->txreq_free.next;
964 spin_unlock(&dev->rdi.pending_lock);
965 spin_unlock_irqrestore(&qp->s_lock, flags);
966 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
968 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
969 list_empty(&priv->iowait)) {
971 qp->s_flags |= QIB_S_WAIT_TX;
972 list_add_tail(&priv->iowait, &dev->txwait);
974 qp->s_flags &= ~QIB_S_BUSY;
975 spin_unlock(&dev->rdi.pending_lock);
976 spin_unlock_irqrestore(&qp->s_lock, flags);
977 tx = ERR_PTR(-EBUSY);
982 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
985 struct qib_verbs_txreq *tx;
988 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
989 /* assume the list non empty */
990 if (likely(!list_empty(&dev->txreq_free))) {
991 struct list_head *l = dev->txreq_free.next;
994 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
995 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
997 /* call slow path to get the extra lock */
998 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
999 tx = __get_txreq(dev, qp);
1004 void qib_put_txreq(struct qib_verbs_txreq *tx)
1006 struct qib_ibdev *dev;
1008 struct qib_qp_priv *priv;
1009 unsigned long flags;
1012 dev = to_idev(qp->ibqp.device);
1014 if (atomic_dec_and_test(&qp->refcount))
1020 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
1021 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
1022 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
1023 tx->txreq.addr, tx->hdr_dwords << 2,
1025 kfree(tx->align_buf);
1028 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1030 /* Put struct back on free list */
1031 list_add(&tx->txreq.list, &dev->txreq_free);
1033 if (!list_empty(&dev->txwait)) {
1034 /* Wake up first QP wanting a free struct */
1035 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
1038 list_del_init(&priv->iowait);
1039 atomic_inc(&qp->refcount);
1040 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1042 spin_lock_irqsave(&qp->s_lock, flags);
1043 if (qp->s_flags & QIB_S_WAIT_TX) {
1044 qp->s_flags &= ~QIB_S_WAIT_TX;
1045 qib_schedule_send(qp);
1047 spin_unlock_irqrestore(&qp->s_lock, flags);
1049 if (atomic_dec_and_test(&qp->refcount))
1052 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1056 * This is called when there are send DMA descriptors that might be
1059 * This is called with ppd->sdma_lock held.
1061 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1063 struct rvt_qp *qp, *nqp;
1064 struct qib_qp_priv *qpp, *nqpp;
1065 struct rvt_qp *qps[20];
1066 struct qib_ibdev *dev;
1070 dev = &ppd->dd->verbs_dev;
1071 spin_lock(&dev->rdi.pending_lock);
1073 /* Search wait list for first QP wanting DMA descriptors. */
1074 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
1077 if (qp->port_num != ppd->port)
1079 if (n == ARRAY_SIZE(qps))
1081 if (qpp->s_tx->txreq.sg_count > avail)
1083 avail -= qpp->s_tx->txreq.sg_count;
1084 list_del_init(&qpp->iowait);
1085 atomic_inc(&qp->refcount);
1089 spin_unlock(&dev->rdi.pending_lock);
1091 for (i = 0; i < n; i++) {
1093 spin_lock(&qp->s_lock);
1094 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1095 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1096 qib_schedule_send(qp);
1098 spin_unlock(&qp->s_lock);
1099 if (atomic_dec_and_test(&qp->refcount))
1105 * This is called with ppd->sdma_lock held.
1107 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1109 struct qib_verbs_txreq *tx =
1110 container_of(cookie, struct qib_verbs_txreq, txreq);
1111 struct rvt_qp *qp = tx->qp;
1112 struct qib_qp_priv *priv = qp->priv;
1114 spin_lock(&qp->s_lock);
1116 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1117 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1118 struct qib_ib_header *hdr;
1120 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1121 hdr = &tx->align_buf->hdr;
1123 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1125 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1127 qib_rc_send_complete(qp, hdr);
1129 if (atomic_dec_and_test(&priv->s_dma_busy)) {
1130 if (qp->state == IB_QPS_RESET)
1131 wake_up(&priv->wait_dma);
1132 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1133 qp->s_flags &= ~QIB_S_WAIT_DMA;
1134 qib_schedule_send(qp);
1137 spin_unlock(&qp->s_lock);
1142 static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
1144 struct qib_qp_priv *priv = qp->priv;
1145 unsigned long flags;
1148 spin_lock_irqsave(&qp->s_lock, flags);
1149 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1150 spin_lock(&dev->rdi.pending_lock);
1151 if (list_empty(&priv->iowait)) {
1152 if (list_empty(&dev->memwait))
1153 mod_timer(&dev->mem_timer, jiffies + 1);
1154 qp->s_flags |= QIB_S_WAIT_KMEM;
1155 list_add_tail(&priv->iowait, &dev->memwait);
1157 spin_unlock(&dev->rdi.pending_lock);
1158 qp->s_flags &= ~QIB_S_BUSY;
1161 spin_unlock_irqrestore(&qp->s_lock, flags);
1166 static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
1167 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
1168 u32 plen, u32 dwords)
1170 struct qib_qp_priv *priv = qp->priv;
1171 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1172 struct qib_devdata *dd = dd_from_dev(dev);
1173 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1174 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1175 struct qib_verbs_txreq *tx;
1176 struct qib_pio_header *phdr;
1184 /* resend previously constructed packet */
1185 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1189 tx = get_txreq(dev, qp);
1193 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1194 be16_to_cpu(hdr->lrh[0]) >> 12);
1196 atomic_inc(&qp->refcount);
1197 tx->wqe = qp->s_wqe;
1198 tx->mr = qp->s_rdma_mr;
1200 qp->s_rdma_mr = NULL;
1201 tx->txreq.callback = sdma_complete;
1202 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1203 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1205 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1206 if (plen + 1 > dd->piosize2kmax_dwords)
1207 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1211 * Don't try to DMA if it takes more descriptors than
1214 ndesc = qib_count_sge(ss, len);
1215 if (ndesc >= ppd->sdma_descq_cnt)
1220 phdr = &dev->pio_hdrs[tx->hdr_inx];
1221 phdr->pbc[0] = cpu_to_le32(plen);
1222 phdr->pbc[1] = cpu_to_le32(control);
1223 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1224 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1225 tx->txreq.sg_count = ndesc;
1226 tx->txreq.addr = dev->pio_hdrs_phys +
1227 tx->hdr_inx * sizeof(struct qib_pio_header);
1228 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1229 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1233 /* Allocate a buffer and copy the header and payload to it. */
1234 tx->hdr_dwords = plen + 1;
1235 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1238 phdr->pbc[0] = cpu_to_le32(plen);
1239 phdr->pbc[1] = cpu_to_le32(control);
1240 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1241 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1243 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1244 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1245 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1247 tx->align_buf = phdr;
1248 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1249 tx->txreq.sg_count = 1;
1250 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1257 ret = wait_kmem(dev, qp);
1259 ibp->rvp.n_unaligned++;
1268 * If we are now in the error state, return zero to flush the
1269 * send work request.
1271 static int no_bufs_available(struct rvt_qp *qp)
1273 struct qib_qp_priv *priv = qp->priv;
1274 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1275 struct qib_devdata *dd;
1276 unsigned long flags;
1280 * Note that as soon as want_buffer() is called and
1281 * possibly before it returns, qib_ib_piobufavail()
1282 * could be called. Therefore, put QP on the I/O wait list before
1283 * enabling the PIO avail interrupt.
1285 spin_lock_irqsave(&qp->s_lock, flags);
1286 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1287 spin_lock(&dev->rdi.pending_lock);
1288 if (list_empty(&priv->iowait)) {
1290 qp->s_flags |= QIB_S_WAIT_PIO;
1291 list_add_tail(&priv->iowait, &dev->piowait);
1292 dd = dd_from_dev(dev);
1293 dd->f_wantpiobuf_intr(dd, 1);
1295 spin_unlock(&dev->rdi.pending_lock);
1296 qp->s_flags &= ~QIB_S_BUSY;
1299 spin_unlock_irqrestore(&qp->s_lock, flags);
1303 static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1304 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
1305 u32 plen, u32 dwords)
1307 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1308 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1309 u32 *hdr = (u32 *) ibhdr;
1310 u32 __iomem *piobuf_orig;
1311 u32 __iomem *piobuf;
1313 unsigned long flags;
1318 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1319 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1320 pbc = ((u64) control << 32) | plen;
1321 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1322 if (unlikely(piobuf == NULL))
1323 return no_bufs_available(qp);
1327 * We have to flush after the PBC for correctness on some cpus
1328 * or WC buffer can be written out of order.
1330 writeq(pbc, piobuf);
1331 piobuf_orig = piobuf;
1334 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1337 * If there is just the header portion, must flush before
1338 * writing last word of header for correctness, and after
1339 * the last header word (trigger word).
1343 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1345 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1348 qib_pio_copy(piobuf, hdr, hdrwords);
1354 qib_pio_copy(piobuf, hdr, hdrwords);
1357 /* The common case is aligned and contained in one segment. */
1358 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1359 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1360 u32 *addr = (u32 *) ss->sge.vaddr;
1362 /* Update address before sending packet. */
1363 update_sge(ss, len);
1365 qib_pio_copy(piobuf, addr, dwords - 1);
1366 /* must flush early everything before trigger word */
1368 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1369 /* be sure trigger word is written */
1372 qib_pio_copy(piobuf, addr, dwords);
1375 copy_io(piobuf, ss, len, flush_wc);
1377 if (dd->flags & QIB_USE_SPCL_TRIG) {
1378 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1381 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1383 qib_sendbuf_done(dd, pbufn);
1384 if (qp->s_rdma_mr) {
1385 rvt_put_mr(qp->s_rdma_mr);
1386 qp->s_rdma_mr = NULL;
1389 spin_lock_irqsave(&qp->s_lock, flags);
1390 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1391 spin_unlock_irqrestore(&qp->s_lock, flags);
1392 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1393 spin_lock_irqsave(&qp->s_lock, flags);
1394 qib_rc_send_complete(qp, ibhdr);
1395 spin_unlock_irqrestore(&qp->s_lock, flags);
1401 * qib_verbs_send - send a packet
1402 * @qp: the QP to send on
1403 * @hdr: the packet header
1404 * @hdrwords: the number of 32-bit words in the header
1405 * @ss: the SGE to send
1406 * @len: the length of the packet in bytes
1408 * Return zero if packet is sent or queued OK.
1409 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1411 int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1412 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
1414 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1417 u32 dwords = (len + 3) >> 2;
1420 * Calculate the send buffer trigger address.
1421 * The +1 counts for the pbc control dword following the pbc length.
1423 plen = hdrwords + dwords + 1;
1426 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1427 * can defer SDMA restart until link goes ACTIVE without
1428 * worrying about just how we got there.
1430 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1431 !(dd->flags & QIB_HAS_SEND_DMA))
1432 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1435 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1441 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1442 u64 *rwords, u64 *spkts, u64 *rpkts,
1446 struct qib_devdata *dd = ppd->dd;
1448 if (!(dd->flags & QIB_PRESENT)) {
1449 /* no hardware, freeze, etc. */
1453 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1454 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1455 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1456 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1457 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1466 * qib_get_counters - get various chip counters
1467 * @dd: the qlogic_ib device
1468 * @cntrs: counters are placed here
1470 * Return the counters needed by recv_pma_get_portcounters().
1472 int qib_get_counters(struct qib_pportdata *ppd,
1473 struct qib_verbs_counters *cntrs)
1477 if (!(ppd->dd->flags & QIB_PRESENT)) {
1478 /* no hardware, freeze, etc. */
1482 cntrs->symbol_error_counter =
1483 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1484 cntrs->link_error_recovery_counter =
1485 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1487 * The link downed counter counts when the other side downs the
1488 * connection. We add in the number of times we downed the link
1489 * due to local link integrity errors to compensate.
1491 cntrs->link_downed_counter =
1492 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1493 cntrs->port_rcv_errors =
1494 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1495 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1496 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1497 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1498 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1499 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1500 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1501 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1502 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1503 cntrs->port_rcv_errors +=
1504 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1505 cntrs->port_rcv_errors +=
1506 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1507 cntrs->port_rcv_remphys_errors =
1508 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1509 cntrs->port_xmit_discards =
1510 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1511 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1512 QIBPORTCNTR_WORDSEND);
1513 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1514 QIBPORTCNTR_WORDRCV);
1515 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1516 QIBPORTCNTR_PKTSEND);
1517 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1518 QIBPORTCNTR_PKTRCV);
1519 cntrs->local_link_integrity_errors =
1520 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1521 cntrs->excessive_buffer_overrun_errors =
1522 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1523 cntrs->vl15_dropped =
1524 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1533 * qib_ib_piobufavail - callback when a PIO buffer is available
1534 * @dd: the device pointer
1536 * This is called from qib_intr() at interrupt level when a PIO buffer is
1537 * available after qib_verbs_send() returned an error that no buffers were
1538 * available. Disable the interrupt if there are no more QPs waiting.
1540 void qib_ib_piobufavail(struct qib_devdata *dd)
1542 struct qib_ibdev *dev = &dd->verbs_dev;
1543 struct list_head *list;
1544 struct rvt_qp *qps[5];
1546 unsigned long flags;
1548 struct qib_qp_priv *priv;
1550 list = &dev->piowait;
1554 * Note: checking that the piowait list is empty and clearing
1555 * the buffer available interrupt needs to be atomic or we
1556 * could end up with QPs on the wait list with the interrupt
1559 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1560 while (!list_empty(list)) {
1561 if (n == ARRAY_SIZE(qps))
1563 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1565 list_del_init(&priv->iowait);
1566 atomic_inc(&qp->refcount);
1569 dd->f_wantpiobuf_intr(dd, 0);
1571 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1573 for (i = 0; i < n; i++) {
1576 spin_lock_irqsave(&qp->s_lock, flags);
1577 if (qp->s_flags & QIB_S_WAIT_PIO) {
1578 qp->s_flags &= ~QIB_S_WAIT_PIO;
1579 qib_schedule_send(qp);
1581 spin_unlock_irqrestore(&qp->s_lock, flags);
1583 /* Notify qib_destroy_qp() if it is waiting. */
1584 if (atomic_dec_and_test(&qp->refcount))
1589 static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1590 struct ib_udata *uhw)
1592 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1593 struct qib_ibdev *dev = to_idev(ibdev);
1595 if (uhw->inlen || uhw->outlen)
1597 memset(props, 0, sizeof(*props));
1599 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1600 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1601 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1602 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1603 props->page_size_cap = PAGE_SIZE;
1605 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1606 props->vendor_part_id = dd->deviceid;
1607 props->hw_ver = dd->minrev;
1608 props->sys_image_guid = ib_qib_sys_image_guid;
1609 props->max_mr_size = ~0ULL;
1610 props->max_qp = ib_qib_max_qps;
1611 props->max_qp_wr = ib_qib_max_qp_wrs;
1612 props->max_sge = ib_qib_max_sges;
1613 props->max_sge_rd = ib_qib_max_sges;
1614 props->max_cq = ib_qib_max_cqs;
1615 props->max_ah = ib_qib_max_ahs;
1616 props->max_cqe = ib_qib_max_cqes;
1617 props->max_mr = dev->rdi.lkey_table.max;
1618 props->max_fmr = dev->rdi.lkey_table.max;
1619 props->max_map_per_fmr = 32767;
1620 props->max_pd = dev->rdi.dparms.props.max_pd;
1621 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1622 props->max_qp_init_rd_atom = 255;
1623 /* props->max_res_rd_atom */
1624 props->max_srq = ib_qib_max_srqs;
1625 props->max_srq_wr = ib_qib_max_srq_wrs;
1626 props->max_srq_sge = ib_qib_max_srq_sges;
1627 /* props->local_ca_ack_delay */
1628 props->atomic_cap = IB_ATOMIC_GLOB;
1629 props->max_pkeys = qib_get_npkeys(dd);
1630 props->max_mcast_grp = ib_qib_max_mcast_grps;
1631 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1632 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1633 props->max_mcast_grp;
1638 static int qib_query_port(struct ib_device *ibdev, u8 port,
1639 struct ib_port_attr *props)
1641 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1642 struct qib_ibport *ibp = to_iport(ibdev, port);
1643 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1647 memset(props, 0, sizeof(*props));
1648 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1649 props->lmc = ppd->lmc;
1650 props->sm_lid = ibp->rvp.sm_lid;
1651 props->sm_sl = ibp->rvp.sm_sl;
1652 props->state = dd->f_iblink_state(ppd->lastibcstat);
1653 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1654 props->port_cap_flags = ibp->rvp.port_cap_flags;
1655 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1656 props->max_msg_sz = 0x80000000;
1657 props->pkey_tbl_len = qib_get_npkeys(dd);
1658 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1659 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
1660 props->active_width = ppd->link_width_active;
1661 /* See rate_show() */
1662 props->active_speed = ppd->link_speed_active;
1663 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1664 props->init_type_reply = 0;
1666 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1667 switch (ppd->ibmtu) {
1686 props->active_mtu = mtu;
1687 props->subnet_timeout = ibp->rvp.subnet_timeout;
1692 static int qib_modify_device(struct ib_device *device,
1693 int device_modify_mask,
1694 struct ib_device_modify *device_modify)
1696 struct qib_devdata *dd = dd_from_ibdev(device);
1700 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1701 IB_DEVICE_MODIFY_NODE_DESC)) {
1706 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1707 memcpy(device->node_desc, device_modify->node_desc, 64);
1708 for (i = 0; i < dd->num_pports; i++) {
1709 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1711 qib_node_desc_chg(ibp);
1715 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1716 ib_qib_sys_image_guid =
1717 cpu_to_be64(device_modify->sys_image_guid);
1718 for (i = 0; i < dd->num_pports; i++) {
1719 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1721 qib_sys_guid_chg(ibp);
1731 static int qib_modify_port(struct ib_device *ibdev, u8 port,
1732 int port_modify_mask, struct ib_port_modify *props)
1734 struct qib_ibport *ibp = to_iport(ibdev, port);
1735 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1737 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1738 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
1739 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1740 qib_cap_mask_chg(ibp);
1741 if (port_modify_mask & IB_PORT_SHUTDOWN)
1742 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1743 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1744 ibp->rvp.qkey_violations = 0;
1748 static int qib_query_gid(struct ib_device *ibdev, u8 port,
1749 int index, union ib_gid *gid)
1751 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1754 if (!port || port > dd->num_pports)
1757 struct qib_ibport *ibp = to_iport(ibdev, port);
1758 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1760 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
1762 gid->global.interface_id = ppd->guid;
1763 else if (index < QIB_GUIDS_PER_PORT)
1764 gid->global.interface_id = ibp->guids[index - 1];
1772 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1774 if (ah_attr->sl > 15)
1780 static void qib_notify_new_ah(struct ib_device *ibdev,
1781 struct ib_ah_attr *ah_attr,
1784 struct qib_ibport *ibp;
1785 struct qib_pportdata *ppd;
1788 * Do not trust reading anything from rvt_ah at this point as it is not
1789 * done being setup. We can however modify things which we need to set.
1792 ibp = to_iport(ibdev, ah_attr->port_num);
1793 ppd = ppd_from_ibp(ibp);
1794 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1795 ah->log_pmtu = ilog2(ppd->ibmtu);
1798 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1800 struct ib_ah_attr attr;
1801 struct ib_ah *ah = ERR_PTR(-EINVAL);
1804 memset(&attr, 0, sizeof(attr));
1806 attr.port_num = ppd_from_ibp(ibp)->port;
1808 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1810 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1816 * qib_get_npkeys - return the size of the PKEY table for context 0
1817 * @dd: the qlogic_ib device
1819 unsigned qib_get_npkeys(struct qib_devdata *dd)
1821 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1825 * Return the indexed PKEY from the port PKEY table.
1826 * No need to validate rcd[ctxt]; the port is setup if we are here.
1828 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1830 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1831 struct qib_devdata *dd = ppd->dd;
1832 unsigned ctxt = ppd->hw_pidx;
1835 /* dd->rcd null if mini_init or some init failures */
1836 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1839 ret = dd->rcd[ctxt]->pkeys[index];
1844 static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1847 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1850 if (index >= qib_get_npkeys(dd)) {
1855 *pkey = qib_get_pkey(to_iport(ibdev, port), index);
1863 * qib_alloc_ucontext - allocate a ucontest
1864 * @ibdev: the infiniband device
1865 * @udata: not used by the QLogic_IB driver
1868 static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1869 struct ib_udata *udata)
1871 struct qib_ucontext *context;
1872 struct ib_ucontext *ret;
1874 context = kmalloc(sizeof(*context), GFP_KERNEL);
1876 ret = ERR_PTR(-ENOMEM);
1880 ret = &context->ibucontext;
1886 static int qib_dealloc_ucontext(struct ib_ucontext *context)
1888 kfree(to_iucontext(context));
1892 static void init_ibport(struct qib_pportdata *ppd)
1894 struct qib_verbs_counters cntrs;
1895 struct qib_ibport *ibp = &ppd->ibport_data;
1897 spin_lock_init(&ibp->rvp.lock);
1898 /* Set the prefix to the default value (see ch. 4.1.1) */
1899 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1900 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1901 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1902 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1903 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1904 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1905 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1906 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1907 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1908 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1909 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1910 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1911 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1912 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1914 /* Snapshot current HW counters to "clear" them. */
1915 qib_get_counters(ppd, &cntrs);
1916 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1917 ibp->z_link_error_recovery_counter =
1918 cntrs.link_error_recovery_counter;
1919 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1920 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1921 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1922 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1923 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1924 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1925 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1926 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1927 ibp->z_local_link_integrity_errors =
1928 cntrs.local_link_integrity_errors;
1929 ibp->z_excessive_buffer_overrun_errors =
1930 cntrs.excessive_buffer_overrun_errors;
1931 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1932 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1933 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1936 static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
1937 struct ib_port_immutable *immutable)
1939 struct ib_port_attr attr;
1942 err = qib_query_port(ibdev, port_num, &attr);
1946 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1947 immutable->gid_tbl_len = attr.gid_tbl_len;
1948 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1949 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1955 * qib_register_ib_device - register our device with the infiniband core
1956 * @dd: the device data structure
1957 * Return the allocated qib_ibdev pointer or NULL on error.
1959 int qib_register_ib_device(struct qib_devdata *dd)
1961 struct qib_ibdev *dev = &dd->verbs_dev;
1962 struct ib_device *ibdev = &dev->rdi.ibdev;
1963 struct qib_pportdata *ppd = dd->pport;
1967 dev->qp_table_size = ib_qib_qp_table_size;
1968 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
1969 dev->qp_table = kmalloc_array(
1971 sizeof(*dev->qp_table),
1973 if (!dev->qp_table) {
1977 for (i = 0; i < dev->qp_table_size; i++)
1978 RCU_INIT_POINTER(dev->qp_table[i], NULL);
1980 for (i = 0; i < dd->num_pports; i++)
1981 init_ibport(ppd + i);
1983 /* Only need to initialize non-zero fields. */
1984 spin_lock_init(&dev->qpt_lock);
1985 spin_lock_init(&dev->n_cqs_lock);
1986 spin_lock_init(&dev->n_qps_lock);
1987 spin_lock_init(&dev->n_srqs_lock);
1988 spin_lock_init(&dev->n_mcast_grps_lock);
1989 init_timer(&dev->mem_timer);
1990 dev->mem_timer.function = mem_timer;
1991 dev->mem_timer.data = (unsigned long) dev;
1993 qib_init_qpn_table(dd, &dev->qpn_table);
1995 INIT_LIST_HEAD(&dev->piowait);
1996 INIT_LIST_HEAD(&dev->dmawait);
1997 INIT_LIST_HEAD(&dev->txwait);
1998 INIT_LIST_HEAD(&dev->memwait);
1999 INIT_LIST_HEAD(&dev->txreq_free);
2001 if (ppd->sdma_descq_cnt) {
2002 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2003 ppd->sdma_descq_cnt *
2004 sizeof(struct qib_pio_header),
2005 &dev->pio_hdrs_phys,
2007 if (!dev->pio_hdrs) {
2013 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2014 struct qib_verbs_txreq *tx;
2016 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
2022 list_add(&tx->txreq.list, &dev->txreq_free);
2026 * The system image GUID is supposed to be the same for all
2027 * IB HCAs in a single system but since there can be other
2028 * device types in the system, we can't be sure this is unique.
2030 if (!ib_qib_sys_image_guid)
2031 ib_qib_sys_image_guid = ppd->guid;
2033 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2034 ibdev->owner = THIS_MODULE;
2035 ibdev->node_guid = ppd->guid;
2036 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2037 ibdev->uverbs_cmd_mask =
2038 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2039 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2040 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2041 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2042 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2043 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2044 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2045 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2046 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2047 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2048 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2049 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2050 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2051 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2052 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2053 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2054 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2055 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2056 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2057 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2058 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2059 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2060 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2061 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2062 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2063 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2064 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2065 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2066 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2067 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2068 ibdev->node_type = RDMA_NODE_IB_CA;
2069 ibdev->phys_port_cnt = dd->num_pports;
2070 ibdev->num_comp_vectors = 1;
2071 ibdev->dma_device = &dd->pcidev->dev;
2072 ibdev->query_device = qib_query_device;
2073 ibdev->modify_device = qib_modify_device;
2074 ibdev->query_port = qib_query_port;
2075 ibdev->modify_port = qib_modify_port;
2076 ibdev->query_pkey = qib_query_pkey;
2077 ibdev->query_gid = qib_query_gid;
2078 ibdev->alloc_ucontext = qib_alloc_ucontext;
2079 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2080 ibdev->alloc_pd = NULL;
2081 ibdev->dealloc_pd = NULL;
2082 ibdev->create_ah = NULL;
2083 ibdev->destroy_ah = NULL;
2084 ibdev->modify_ah = NULL;
2085 ibdev->query_ah = NULL;
2086 ibdev->create_srq = qib_create_srq;
2087 ibdev->modify_srq = qib_modify_srq;
2088 ibdev->query_srq = qib_query_srq;
2089 ibdev->destroy_srq = qib_destroy_srq;
2090 ibdev->create_qp = qib_create_qp;
2091 ibdev->modify_qp = qib_modify_qp;
2092 ibdev->query_qp = qib_query_qp;
2093 ibdev->destroy_qp = qib_destroy_qp;
2094 ibdev->post_send = qib_post_send;
2095 ibdev->post_recv = qib_post_receive;
2096 ibdev->post_srq_recv = qib_post_srq_receive;
2097 ibdev->create_cq = qib_create_cq;
2098 ibdev->destroy_cq = qib_destroy_cq;
2099 ibdev->resize_cq = qib_resize_cq;
2100 ibdev->poll_cq = qib_poll_cq;
2101 ibdev->req_notify_cq = qib_req_notify_cq;
2102 ibdev->get_dma_mr = NULL;
2103 ibdev->reg_user_mr = NULL;
2104 ibdev->dereg_mr = NULL;
2105 ibdev->alloc_mr = NULL;
2106 ibdev->map_mr_sg = NULL;
2107 ibdev->alloc_fmr = NULL;
2108 ibdev->map_phys_fmr = NULL;
2109 ibdev->unmap_fmr = NULL;
2110 ibdev->dealloc_fmr = NULL;
2111 ibdev->attach_mcast = qib_multicast_attach;
2112 ibdev->detach_mcast = qib_multicast_detach;
2113 ibdev->process_mad = qib_process_mad;
2115 ibdev->dma_ops = NULL;
2116 ibdev->get_port_immutable = qib_port_immutable;
2118 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2119 "Intel Infiniband HCA %s", init_utsname()->nodename);
2122 * Fill in rvt info object.
2124 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
2125 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
2126 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
2127 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
2128 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
2129 dd->verbs_dev.rdi.dparms.props.max_pd = ib_qib_max_pds;
2130 dd->verbs_dev.rdi.dparms.props.max_ah = ib_qib_max_ahs;
2131 dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER |
2132 RVT_FLAG_CQ_INIT_DRIVER);
2133 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
2135 ret = rvt_register_device(&dd->verbs_dev.rdi);
2139 ret = qib_create_agents(dev);
2143 ret = qib_verbs_register_sysfs(dd);
2150 qib_free_agents(dev);
2152 rvt_unregister_device(&dd->verbs_dev.rdi);
2155 while (!list_empty(&dev->txreq_free)) {
2156 struct list_head *l = dev->txreq_free.next;
2157 struct qib_verbs_txreq *tx;
2160 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2163 if (ppd->sdma_descq_cnt)
2164 dma_free_coherent(&dd->pcidev->dev,
2165 ppd->sdma_descq_cnt *
2166 sizeof(struct qib_pio_header),
2167 dev->pio_hdrs, dev->pio_hdrs_phys);
2169 kfree(dev->qp_table);
2171 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2176 void qib_unregister_ib_device(struct qib_devdata *dd)
2178 struct qib_ibdev *dev = &dd->verbs_dev;
2181 qib_verbs_unregister_sysfs(dd);
2183 qib_free_agents(dev);
2185 rvt_unregister_device(&dd->verbs_dev.rdi);
2187 if (!list_empty(&dev->piowait))
2188 qib_dev_err(dd, "piowait list not empty!\n");
2189 if (!list_empty(&dev->dmawait))
2190 qib_dev_err(dd, "dmawait list not empty!\n");
2191 if (!list_empty(&dev->txwait))
2192 qib_dev_err(dd, "txwait list not empty!\n");
2193 if (!list_empty(&dev->memwait))
2194 qib_dev_err(dd, "memwait list not empty!\n");
2196 qps_inuse = qib_free_all_qps(dd);
2198 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2201 del_timer_sync(&dev->mem_timer);
2202 qib_free_qpn_table(&dev->qpn_table);
2203 while (!list_empty(&dev->txreq_free)) {
2204 struct list_head *l = dev->txreq_free.next;
2205 struct qib_verbs_txreq *tx;
2208 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2211 if (dd->pport->sdma_descq_cnt)
2212 dma_free_coherent(&dd->pcidev->dev,
2213 dd->pport->sdma_descq_cnt *
2214 sizeof(struct qib_pio_header),
2215 dev->pio_hdrs, dev->pio_hdrs_phys);
2216 kfree(dev->qp_table);
2220 * This must be called with s_lock held.
2222 void qib_schedule_send(struct rvt_qp *qp)
2224 struct qib_qp_priv *priv = qp->priv;
2225 if (qib_send_ok(qp)) {
2226 struct qib_ibport *ibp =
2227 to_iport(qp->ibqp.device, qp->port_num);
2228 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2230 queue_work(ppd->qib_wq, &priv->s_work);