2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
44 #include <rdma/rdma_vt.h>
47 #include "qib_common.h"
49 static unsigned int ib_qib_qp_table_size = 256;
50 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(qp_table_size, "QP table size");
53 static unsigned int qib_lkey_table_size = 16;
54 module_param_named(lkey_table_size, qib_lkey_table_size, uint,
56 MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
59 static unsigned int ib_qib_max_pds = 0xFFFF;
60 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61 MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
64 static unsigned int ib_qib_max_ahs = 0xFFFF;
65 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
68 unsigned int ib_qib_max_cqes = 0x2FFFF;
69 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
73 unsigned int ib_qib_max_cqs = 0x1FFFF;
74 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
77 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 unsigned int ib_qib_max_qps = 16384;
82 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
85 unsigned int ib_qib_max_sges = 0x60;
86 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
89 unsigned int ib_qib_max_mcast_grps = 16384;
90 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
94 unsigned int ib_qib_max_mcast_qp_attached = 16;
95 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
97 MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
100 unsigned int ib_qib_max_srqs = 1024;
101 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
104 unsigned int ib_qib_max_srq_sges = 128;
105 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
108 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
112 static unsigned int ib_qib_disable_sma;
113 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
117 * Note that it is OK to post send work requests in the SQE and ERR
118 * states; qib_do_send() will process them and generate error
119 * completions as per IB 1.2 C10-96.
121 const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
123 [IB_QPS_INIT] = QIB_POST_RECV_OK,
124 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
125 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
126 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
127 QIB_PROCESS_NEXT_SEND_OK,
128 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
130 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
131 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
132 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
133 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
137 * Translate ib_wr_opcode into ib_wc_opcode.
139 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
140 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
141 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
142 [IB_WR_SEND] = IB_WC_SEND,
143 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
144 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
145 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
146 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
152 __be64 ib_qib_sys_image_guid;
155 * qib_copy_sge - copy data to SGE memory
157 * @data: the data to copy
158 * @length: the length of the data
160 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
162 struct rvt_sge *sge = &ss->sge;
165 u32 len = sge->length;
169 if (len > sge->sge_length)
170 len = sge->sge_length;
172 memcpy(sge->vaddr, data, len);
175 sge->sge_length -= len;
176 if (sge->sge_length == 0) {
180 *sge = *ss->sg_list++;
181 } else if (sge->length == 0 && sge->mr->lkey) {
182 if (++sge->n >= RVT_SEGSZ) {
183 if (++sge->m >= sge->mr->mapsz)
188 sge->mr->map[sge->m]->segs[sge->n].vaddr;
190 sge->mr->map[sge->m]->segs[sge->n].length;
198 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
200 * @length: the number of bytes to skip
202 void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
204 struct rvt_sge *sge = &ss->sge;
207 u32 len = sge->length;
211 if (len > sge->sge_length)
212 len = sge->sge_length;
216 sge->sge_length -= len;
217 if (sge->sge_length == 0) {
221 *sge = *ss->sg_list++;
222 } else if (sge->length == 0 && sge->mr->lkey) {
223 if (++sge->n >= RVT_SEGSZ) {
224 if (++sge->m >= sge->mr->mapsz)
229 sge->mr->map[sge->m]->segs[sge->n].vaddr;
231 sge->mr->map[sge->m]->segs[sge->n].length;
238 * Count the number of DMA descriptors needed to send length bytes of data.
239 * Don't modify the qib_sge_state to get the count.
240 * Return zero if any of the segments is not aligned.
242 static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
244 struct rvt_sge *sg_list = ss->sg_list;
245 struct rvt_sge sge = ss->sge;
246 u8 num_sge = ss->num_sge;
247 u32 ndesc = 1; /* count the header */
250 u32 len = sge.length;
254 if (len > sge.sge_length)
255 len = sge.sge_length;
257 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
258 (len != length && (len & (sizeof(u32) - 1)))) {
265 sge.sge_length -= len;
266 if (sge.sge_length == 0) {
269 } else if (sge.length == 0 && sge.mr->lkey) {
270 if (++sge.n >= RVT_SEGSZ) {
271 if (++sge.m >= sge.mr->mapsz)
276 sge.mr->map[sge.m]->segs[sge.n].vaddr;
278 sge.mr->map[sge.m]->segs[sge.n].length;
286 * Copy from the SGEs to the data buffer.
288 static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
290 struct rvt_sge *sge = &ss->sge;
293 u32 len = sge->length;
297 if (len > sge->sge_length)
298 len = sge->sge_length;
300 memcpy(data, sge->vaddr, len);
303 sge->sge_length -= len;
304 if (sge->sge_length == 0) {
306 *sge = *ss->sg_list++;
307 } else if (sge->length == 0 && sge->mr->lkey) {
308 if (++sge->n >= RVT_SEGSZ) {
309 if (++sge->m >= sge->mr->mapsz)
314 sge->mr->map[sge->m]->segs[sge->n].vaddr;
316 sge->mr->map[sge->m]->segs[sge->n].length;
324 * qib_post_one_send - post one RC, UC, or UD send work request
325 * @qp: the QP to post on
326 * @wr: the work request to send
328 static int qib_post_one_send(struct rvt_qp *qp, struct ib_send_wr *wr,
331 struct rvt_swqe *wqe;
338 struct rvt_lkey_table *rkt;
340 int avoid_schedule = 0;
342 spin_lock_irqsave(&qp->s_lock, flags);
344 /* Check that state is OK to post send. */
345 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
348 /* IB spec says that num_sge == 0 is OK. */
349 if (wr->num_sge > qp->s_max_sge)
353 * Don't allow RDMA reads or atomic operations on UC or
354 * undefined operations.
355 * Make sure buffer is large enough to hold the result for atomics.
357 if (qp->ibqp.qp_type == IB_QPT_UC) {
358 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
360 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
361 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
362 if (wr->opcode != IB_WR_SEND &&
363 wr->opcode != IB_WR_SEND_WITH_IMM)
365 /* Check UD destination address PD */
366 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
368 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
370 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
372 wr->sg_list[0].length < sizeof(u64) ||
373 wr->sg_list[0].addr & (sizeof(u64) - 1)))
375 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
378 next = qp->s_head + 1;
379 if (next >= qp->s_size)
381 if (next == qp->s_last) {
386 rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table;
387 pd = ibpd_to_rvtpd(qp->ibqp.pd);
388 wqe = get_swqe_ptr(qp, qp->s_head);
390 if (qp->ibqp.qp_type != IB_QPT_UC &&
391 qp->ibqp.qp_type != IB_QPT_RC)
392 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
393 else if (wr->opcode == IB_WR_REG_MR)
394 memcpy(&wqe->reg_wr, reg_wr(wr),
395 sizeof(wqe->reg_wr));
396 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
397 wr->opcode == IB_WR_RDMA_WRITE ||
398 wr->opcode == IB_WR_RDMA_READ)
399 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
400 else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
401 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
402 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
404 memcpy(&wqe->wr, wr, sizeof(wqe->wr));
409 acc = wr->opcode >= IB_WR_RDMA_READ ?
410 IB_ACCESS_LOCAL_WRITE : 0;
411 for (i = 0; i < wr->num_sge; i++) {
412 u32 length = wr->sg_list[i].length;
417 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
418 &wr->sg_list[i], acc);
420 goto bail_inval_free;
421 wqe->length += length;
426 if (qp->ibqp.qp_type == IB_QPT_UC ||
427 qp->ibqp.qp_type == IB_QPT_RC) {
428 if (wqe->length > 0x80000000U)
429 goto bail_inval_free;
430 if (wqe->length <= qp->pmtu)
432 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
433 qp->port_num - 1)->ibmtu) {
434 goto bail_inval_free;
436 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
439 wqe->ssn = qp->s_ssn++;
447 struct rvt_sge *sge = &wqe->sg_list[--j];
454 if (!ret && !wr->next && !avoid_schedule &&
456 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
457 qib_schedule_send(qp);
460 spin_unlock_irqrestore(&qp->s_lock, flags);
465 * qib_post_send - post a send on a QP
466 * @ibqp: the QP to post the send on
467 * @wr: the list of work requests to post
468 * @bad_wr: the first bad WR is put here
470 * This may be called from interrupt context.
472 static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
473 struct ib_send_wr **bad_wr)
475 struct rvt_qp *qp = to_iqp(ibqp);
476 struct qib_qp_priv *priv = qp->priv;
480 for (; wr; wr = wr->next) {
481 err = qib_post_one_send(qp, wr, &scheduled);
488 /* Try to do the send work in the caller's context. */
490 qib_do_send(&priv->s_work);
497 * qib_post_receive - post a receive on a QP
498 * @ibqp: the QP to post the receive on
499 * @wr: the WR to post
500 * @bad_wr: the first bad WR is put here
502 * This may be called from interrupt context.
504 static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
505 struct ib_recv_wr **bad_wr)
507 struct rvt_qp *qp = to_iqp(ibqp);
508 struct rvt_rwq *wq = qp->r_rq.wq;
512 /* Check that state is OK to post receive. */
513 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
519 for (; wr; wr = wr->next) {
520 struct rvt_rwqe *wqe;
524 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
530 spin_lock_irqsave(&qp->r_rq.lock, flags);
532 if (next >= qp->r_rq.size)
534 if (next == wq->tail) {
535 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
541 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
542 wqe->wr_id = wr->wr_id;
543 wqe->num_sge = wr->num_sge;
544 for (i = 0; i < wr->num_sge; i++)
545 wqe->sg_list[i] = wr->sg_list[i];
546 /* Make sure queue entry is written before the head index. */
549 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
558 * qib_qp_rcv - processing an incoming packet on a QP
559 * @rcd: the context pointer
560 * @hdr: the packet header
561 * @has_grh: true if the packet has a GRH
562 * @data: the packet data
563 * @tlen: the packet length
564 * @qp: the QP the packet came on
566 * This is called from qib_ib_rcv() to process an incoming packet
568 * Called at interrupt level.
570 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
571 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
573 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
575 spin_lock(&qp->r_lock);
577 /* Check for valid receive state. */
578 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
579 ibp->rvp.n_pkt_drops++;
583 switch (qp->ibqp.qp_type) {
586 if (ib_qib_disable_sma)
590 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
594 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
598 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
606 spin_unlock(&qp->r_lock);
610 * qib_ib_rcv - process an incoming packet
611 * @rcd: the context pointer
612 * @rhdr: the header of the packet
613 * @data: the packet payload
614 * @tlen: the packet length
616 * This is called from qib_kreceive() to process an incoming packet at
617 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
619 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
621 struct qib_pportdata *ppd = rcd->ppd;
622 struct qib_ibport *ibp = &ppd->ibport_data;
623 struct qib_ib_header *hdr = rhdr;
624 struct qib_other_headers *ohdr;
631 /* 24 == LRH+BTH+CRC */
632 if (unlikely(tlen < 24))
635 /* Check for a valid destination LID (see ch. 7.11.1). */
636 lid = be16_to_cpu(hdr->lrh[1]);
637 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
638 lid &= ~((1 << ppd->lmc) - 1);
639 if (unlikely(lid != ppd->lid))
644 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
645 if (lnh == QIB_LRH_BTH)
647 else if (lnh == QIB_LRH_GRH) {
650 ohdr = &hdr->u.l.oth;
651 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
653 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
654 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
659 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
660 #ifdef CONFIG_DEBUG_FS
661 rcd->opstats->stats[opcode].n_bytes += tlen;
662 rcd->opstats->stats[opcode].n_packets++;
665 /* Get the destination QP number. */
666 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
667 if (qp_num == QIB_MULTICAST_QPN) {
668 struct qib_mcast *mcast;
669 struct qib_mcast_qp *p;
671 if (lnh != QIB_LRH_GRH)
673 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
676 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
677 list_for_each_entry_rcu(p, &mcast->qp_list, list)
678 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
680 * Notify qib_multicast_detach() if it is waiting for us
683 if (atomic_dec_return(&mcast->refcount) <= 1)
684 wake_up(&mcast->wait);
686 if (rcd->lookaside_qp) {
687 if (rcd->lookaside_qpn != qp_num) {
688 if (atomic_dec_and_test(
689 &rcd->lookaside_qp->refcount))
691 &rcd->lookaside_qp->wait);
692 rcd->lookaside_qp = NULL;
695 if (!rcd->lookaside_qp) {
696 qp = qib_lookup_qpn(ibp, qp_num);
699 rcd->lookaside_qp = qp;
700 rcd->lookaside_qpn = qp_num;
702 qp = rcd->lookaside_qp;
703 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
704 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
709 ibp->rvp.n_pkt_drops++;
713 * This is called from a timer to check for QPs
714 * which need kernel memory in order to send a packet.
716 static void mem_timer(unsigned long data)
718 struct qib_ibdev *dev = (struct qib_ibdev *) data;
719 struct list_head *list = &dev->memwait;
720 struct rvt_qp *qp = NULL;
721 struct qib_qp_priv *priv = NULL;
724 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
725 if (!list_empty(list)) {
726 priv = list_entry(list->next, struct qib_qp_priv, iowait);
728 list_del_init(&priv->iowait);
729 atomic_inc(&qp->refcount);
730 if (!list_empty(list))
731 mod_timer(&dev->mem_timer, jiffies + 1);
733 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
736 spin_lock_irqsave(&qp->s_lock, flags);
737 if (qp->s_flags & RVT_S_WAIT_KMEM) {
738 qp->s_flags &= ~RVT_S_WAIT_KMEM;
739 qib_schedule_send(qp);
741 spin_unlock_irqrestore(&qp->s_lock, flags);
742 if (atomic_dec_and_test(&qp->refcount))
747 static void update_sge(struct rvt_sge_state *ss, u32 length)
749 struct rvt_sge *sge = &ss->sge;
751 sge->vaddr += length;
752 sge->length -= length;
753 sge->sge_length -= length;
754 if (sge->sge_length == 0) {
756 *sge = *ss->sg_list++;
757 } else if (sge->length == 0 && sge->mr->lkey) {
758 if (++sge->n >= RVT_SEGSZ) {
759 if (++sge->m >= sge->mr->mapsz)
763 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
764 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
768 #ifdef __LITTLE_ENDIAN
769 static inline u32 get_upper_bits(u32 data, u32 shift)
771 return data >> shift;
774 static inline u32 set_upper_bits(u32 data, u32 shift)
776 return data << shift;
779 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
781 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
782 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
786 static inline u32 get_upper_bits(u32 data, u32 shift)
788 return data << shift;
791 static inline u32 set_upper_bits(u32 data, u32 shift)
793 return data >> shift;
796 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
798 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
799 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
804 static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
805 u32 length, unsigned flush_wc)
812 u32 len = ss->sge.length;
817 if (len > ss->sge.sge_length)
818 len = ss->sge.sge_length;
820 /* If the source address is not aligned, try to align it. */
821 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
823 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
825 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
828 y = sizeof(u32) - off;
831 if (len + extra >= sizeof(u32)) {
832 data |= set_upper_bits(v, extra *
834 len = sizeof(u32) - extra;
839 __raw_writel(data, piobuf);
844 /* Clear unused upper bytes */
845 data |= clear_upper_bytes(v, len, extra);
853 /* Source address is aligned. */
854 u32 *addr = (u32 *) ss->sge.vaddr;
855 int shift = extra * BITS_PER_BYTE;
856 int ushift = 32 - shift;
859 while (l >= sizeof(u32)) {
862 data |= set_upper_bits(v, shift);
863 __raw_writel(data, piobuf);
864 data = get_upper_bits(v, ushift);
870 * We still have 'extra' number of bytes leftover.
875 if (l + extra >= sizeof(u32)) {
876 data |= set_upper_bits(v, shift);
877 len -= l + extra - sizeof(u32);
882 __raw_writel(data, piobuf);
887 /* Clear unused upper bytes */
888 data |= clear_upper_bytes(v, l, extra);
895 } else if (len == length) {
899 } else if (len == length) {
903 * Need to round up for the last dword in the
907 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
909 last = ((u32 *) ss->sge.vaddr)[w - 1];
914 qib_pio_copy(piobuf, ss->sge.vaddr, w);
917 extra = len & (sizeof(u32) - 1);
919 u32 v = ((u32 *) ss->sge.vaddr)[w];
921 /* Clear unused upper bytes */
922 data = clear_upper_bytes(v, extra, 0);
928 /* Update address before sending packet. */
929 update_sge(ss, length);
931 /* must flush early everything before trigger word */
933 __raw_writel(last, piobuf);
934 /* be sure trigger word is written */
937 __raw_writel(last, piobuf);
940 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
943 struct qib_qp_priv *priv = qp->priv;
944 struct qib_verbs_txreq *tx;
947 spin_lock_irqsave(&qp->s_lock, flags);
948 spin_lock(&dev->rdi.pending_lock);
950 if (!list_empty(&dev->txreq_free)) {
951 struct list_head *l = dev->txreq_free.next;
954 spin_unlock(&dev->rdi.pending_lock);
955 spin_unlock_irqrestore(&qp->s_lock, flags);
956 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
958 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
959 list_empty(&priv->iowait)) {
961 qp->s_flags |= RVT_S_WAIT_TX;
962 list_add_tail(&priv->iowait, &dev->txwait);
964 qp->s_flags &= ~RVT_S_BUSY;
965 spin_unlock(&dev->rdi.pending_lock);
966 spin_unlock_irqrestore(&qp->s_lock, flags);
967 tx = ERR_PTR(-EBUSY);
972 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
975 struct qib_verbs_txreq *tx;
978 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
979 /* assume the list non empty */
980 if (likely(!list_empty(&dev->txreq_free))) {
981 struct list_head *l = dev->txreq_free.next;
984 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
985 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
987 /* call slow path to get the extra lock */
988 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
989 tx = __get_txreq(dev, qp);
994 void qib_put_txreq(struct qib_verbs_txreq *tx)
996 struct qib_ibdev *dev;
998 struct qib_qp_priv *priv;
1002 dev = to_idev(qp->ibqp.device);
1004 if (atomic_dec_and_test(&qp->refcount))
1010 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
1011 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
1012 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
1013 tx->txreq.addr, tx->hdr_dwords << 2,
1015 kfree(tx->align_buf);
1018 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1020 /* Put struct back on free list */
1021 list_add(&tx->txreq.list, &dev->txreq_free);
1023 if (!list_empty(&dev->txwait)) {
1024 /* Wake up first QP wanting a free struct */
1025 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
1028 list_del_init(&priv->iowait);
1029 atomic_inc(&qp->refcount);
1030 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1032 spin_lock_irqsave(&qp->s_lock, flags);
1033 if (qp->s_flags & RVT_S_WAIT_TX) {
1034 qp->s_flags &= ~RVT_S_WAIT_TX;
1035 qib_schedule_send(qp);
1037 spin_unlock_irqrestore(&qp->s_lock, flags);
1039 if (atomic_dec_and_test(&qp->refcount))
1042 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1046 * This is called when there are send DMA descriptors that might be
1049 * This is called with ppd->sdma_lock held.
1051 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1053 struct rvt_qp *qp, *nqp;
1054 struct qib_qp_priv *qpp, *nqpp;
1055 struct rvt_qp *qps[20];
1056 struct qib_ibdev *dev;
1060 dev = &ppd->dd->verbs_dev;
1061 spin_lock(&dev->rdi.pending_lock);
1063 /* Search wait list for first QP wanting DMA descriptors. */
1064 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
1067 if (qp->port_num != ppd->port)
1069 if (n == ARRAY_SIZE(qps))
1071 if (qpp->s_tx->txreq.sg_count > avail)
1073 avail -= qpp->s_tx->txreq.sg_count;
1074 list_del_init(&qpp->iowait);
1075 atomic_inc(&qp->refcount);
1079 spin_unlock(&dev->rdi.pending_lock);
1081 for (i = 0; i < n; i++) {
1083 spin_lock(&qp->s_lock);
1084 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
1085 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
1086 qib_schedule_send(qp);
1088 spin_unlock(&qp->s_lock);
1089 if (atomic_dec_and_test(&qp->refcount))
1095 * This is called with ppd->sdma_lock held.
1097 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1099 struct qib_verbs_txreq *tx =
1100 container_of(cookie, struct qib_verbs_txreq, txreq);
1101 struct rvt_qp *qp = tx->qp;
1102 struct qib_qp_priv *priv = qp->priv;
1104 spin_lock(&qp->s_lock);
1106 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1107 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1108 struct qib_ib_header *hdr;
1110 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1111 hdr = &tx->align_buf->hdr;
1113 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1115 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1117 qib_rc_send_complete(qp, hdr);
1119 if (atomic_dec_and_test(&priv->s_dma_busy)) {
1120 if (qp->state == IB_QPS_RESET)
1121 wake_up(&priv->wait_dma);
1122 else if (qp->s_flags & RVT_S_WAIT_DMA) {
1123 qp->s_flags &= ~RVT_S_WAIT_DMA;
1124 qib_schedule_send(qp);
1127 spin_unlock(&qp->s_lock);
1132 static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
1134 struct qib_qp_priv *priv = qp->priv;
1135 unsigned long flags;
1138 spin_lock_irqsave(&qp->s_lock, flags);
1139 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1140 spin_lock(&dev->rdi.pending_lock);
1141 if (list_empty(&priv->iowait)) {
1142 if (list_empty(&dev->memwait))
1143 mod_timer(&dev->mem_timer, jiffies + 1);
1144 qp->s_flags |= RVT_S_WAIT_KMEM;
1145 list_add_tail(&priv->iowait, &dev->memwait);
1147 spin_unlock(&dev->rdi.pending_lock);
1148 qp->s_flags &= ~RVT_S_BUSY;
1151 spin_unlock_irqrestore(&qp->s_lock, flags);
1156 static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
1157 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
1158 u32 plen, u32 dwords)
1160 struct qib_qp_priv *priv = qp->priv;
1161 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1162 struct qib_devdata *dd = dd_from_dev(dev);
1163 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1164 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1165 struct qib_verbs_txreq *tx;
1166 struct qib_pio_header *phdr;
1174 /* resend previously constructed packet */
1175 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1179 tx = get_txreq(dev, qp);
1183 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1184 be16_to_cpu(hdr->lrh[0]) >> 12);
1186 atomic_inc(&qp->refcount);
1187 tx->wqe = qp->s_wqe;
1188 tx->mr = qp->s_rdma_mr;
1190 qp->s_rdma_mr = NULL;
1191 tx->txreq.callback = sdma_complete;
1192 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1193 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1195 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1196 if (plen + 1 > dd->piosize2kmax_dwords)
1197 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1201 * Don't try to DMA if it takes more descriptors than
1204 ndesc = qib_count_sge(ss, len);
1205 if (ndesc >= ppd->sdma_descq_cnt)
1210 phdr = &dev->pio_hdrs[tx->hdr_inx];
1211 phdr->pbc[0] = cpu_to_le32(plen);
1212 phdr->pbc[1] = cpu_to_le32(control);
1213 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1214 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1215 tx->txreq.sg_count = ndesc;
1216 tx->txreq.addr = dev->pio_hdrs_phys +
1217 tx->hdr_inx * sizeof(struct qib_pio_header);
1218 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1219 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1223 /* Allocate a buffer and copy the header and payload to it. */
1224 tx->hdr_dwords = plen + 1;
1225 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1228 phdr->pbc[0] = cpu_to_le32(plen);
1229 phdr->pbc[1] = cpu_to_le32(control);
1230 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1231 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1233 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1234 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1235 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1237 tx->align_buf = phdr;
1238 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1239 tx->txreq.sg_count = 1;
1240 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1247 ret = wait_kmem(dev, qp);
1249 ibp->rvp.n_unaligned++;
1258 * If we are now in the error state, return zero to flush the
1259 * send work request.
1261 static int no_bufs_available(struct rvt_qp *qp)
1263 struct qib_qp_priv *priv = qp->priv;
1264 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1265 struct qib_devdata *dd;
1266 unsigned long flags;
1270 * Note that as soon as want_buffer() is called and
1271 * possibly before it returns, qib_ib_piobufavail()
1272 * could be called. Therefore, put QP on the I/O wait list before
1273 * enabling the PIO avail interrupt.
1275 spin_lock_irqsave(&qp->s_lock, flags);
1276 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1277 spin_lock(&dev->rdi.pending_lock);
1278 if (list_empty(&priv->iowait)) {
1280 qp->s_flags |= RVT_S_WAIT_PIO;
1281 list_add_tail(&priv->iowait, &dev->piowait);
1282 dd = dd_from_dev(dev);
1283 dd->f_wantpiobuf_intr(dd, 1);
1285 spin_unlock(&dev->rdi.pending_lock);
1286 qp->s_flags &= ~RVT_S_BUSY;
1289 spin_unlock_irqrestore(&qp->s_lock, flags);
1293 static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1294 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
1295 u32 plen, u32 dwords)
1297 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1298 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1299 u32 *hdr = (u32 *) ibhdr;
1300 u32 __iomem *piobuf_orig;
1301 u32 __iomem *piobuf;
1303 unsigned long flags;
1308 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1309 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1310 pbc = ((u64) control << 32) | plen;
1311 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1312 if (unlikely(piobuf == NULL))
1313 return no_bufs_available(qp);
1317 * We have to flush after the PBC for correctness on some cpus
1318 * or WC buffer can be written out of order.
1320 writeq(pbc, piobuf);
1321 piobuf_orig = piobuf;
1324 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1327 * If there is just the header portion, must flush before
1328 * writing last word of header for correctness, and after
1329 * the last header word (trigger word).
1333 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1335 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1338 qib_pio_copy(piobuf, hdr, hdrwords);
1344 qib_pio_copy(piobuf, hdr, hdrwords);
1347 /* The common case is aligned and contained in one segment. */
1348 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1349 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1350 u32 *addr = (u32 *) ss->sge.vaddr;
1352 /* Update address before sending packet. */
1353 update_sge(ss, len);
1355 qib_pio_copy(piobuf, addr, dwords - 1);
1356 /* must flush early everything before trigger word */
1358 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1359 /* be sure trigger word is written */
1362 qib_pio_copy(piobuf, addr, dwords);
1365 copy_io(piobuf, ss, len, flush_wc);
1367 if (dd->flags & QIB_USE_SPCL_TRIG) {
1368 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1371 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1373 qib_sendbuf_done(dd, pbufn);
1374 if (qp->s_rdma_mr) {
1375 rvt_put_mr(qp->s_rdma_mr);
1376 qp->s_rdma_mr = NULL;
1379 spin_lock_irqsave(&qp->s_lock, flags);
1380 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1381 spin_unlock_irqrestore(&qp->s_lock, flags);
1382 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1383 spin_lock_irqsave(&qp->s_lock, flags);
1384 qib_rc_send_complete(qp, ibhdr);
1385 spin_unlock_irqrestore(&qp->s_lock, flags);
1391 * qib_verbs_send - send a packet
1392 * @qp: the QP to send on
1393 * @hdr: the packet header
1394 * @hdrwords: the number of 32-bit words in the header
1395 * @ss: the SGE to send
1396 * @len: the length of the packet in bytes
1398 * Return zero if packet is sent or queued OK.
1399 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1401 int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1402 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
1404 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1407 u32 dwords = (len + 3) >> 2;
1410 * Calculate the send buffer trigger address.
1411 * The +1 counts for the pbc control dword following the pbc length.
1413 plen = hdrwords + dwords + 1;
1416 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1417 * can defer SDMA restart until link goes ACTIVE without
1418 * worrying about just how we got there.
1420 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1421 !(dd->flags & QIB_HAS_SEND_DMA))
1422 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1425 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1431 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1432 u64 *rwords, u64 *spkts, u64 *rpkts,
1436 struct qib_devdata *dd = ppd->dd;
1438 if (!(dd->flags & QIB_PRESENT)) {
1439 /* no hardware, freeze, etc. */
1443 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1444 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1445 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1446 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1447 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1456 * qib_get_counters - get various chip counters
1457 * @dd: the qlogic_ib device
1458 * @cntrs: counters are placed here
1460 * Return the counters needed by recv_pma_get_portcounters().
1462 int qib_get_counters(struct qib_pportdata *ppd,
1463 struct qib_verbs_counters *cntrs)
1467 if (!(ppd->dd->flags & QIB_PRESENT)) {
1468 /* no hardware, freeze, etc. */
1472 cntrs->symbol_error_counter =
1473 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1474 cntrs->link_error_recovery_counter =
1475 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1477 * The link downed counter counts when the other side downs the
1478 * connection. We add in the number of times we downed the link
1479 * due to local link integrity errors to compensate.
1481 cntrs->link_downed_counter =
1482 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1483 cntrs->port_rcv_errors =
1484 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1485 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1486 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1487 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1488 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1489 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1490 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1491 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1492 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1493 cntrs->port_rcv_errors +=
1494 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1495 cntrs->port_rcv_errors +=
1496 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1497 cntrs->port_rcv_remphys_errors =
1498 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1499 cntrs->port_xmit_discards =
1500 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1501 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1502 QIBPORTCNTR_WORDSEND);
1503 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1504 QIBPORTCNTR_WORDRCV);
1505 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1506 QIBPORTCNTR_PKTSEND);
1507 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1508 QIBPORTCNTR_PKTRCV);
1509 cntrs->local_link_integrity_errors =
1510 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1511 cntrs->excessive_buffer_overrun_errors =
1512 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1513 cntrs->vl15_dropped =
1514 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1523 * qib_ib_piobufavail - callback when a PIO buffer is available
1524 * @dd: the device pointer
1526 * This is called from qib_intr() at interrupt level when a PIO buffer is
1527 * available after qib_verbs_send() returned an error that no buffers were
1528 * available. Disable the interrupt if there are no more QPs waiting.
1530 void qib_ib_piobufavail(struct qib_devdata *dd)
1532 struct qib_ibdev *dev = &dd->verbs_dev;
1533 struct list_head *list;
1534 struct rvt_qp *qps[5];
1536 unsigned long flags;
1538 struct qib_qp_priv *priv;
1540 list = &dev->piowait;
1544 * Note: checking that the piowait list is empty and clearing
1545 * the buffer available interrupt needs to be atomic or we
1546 * could end up with QPs on the wait list with the interrupt
1549 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1550 while (!list_empty(list)) {
1551 if (n == ARRAY_SIZE(qps))
1553 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1555 list_del_init(&priv->iowait);
1556 atomic_inc(&qp->refcount);
1559 dd->f_wantpiobuf_intr(dd, 0);
1561 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1563 for (i = 0; i < n; i++) {
1566 spin_lock_irqsave(&qp->s_lock, flags);
1567 if (qp->s_flags & RVT_S_WAIT_PIO) {
1568 qp->s_flags &= ~RVT_S_WAIT_PIO;
1569 qib_schedule_send(qp);
1571 spin_unlock_irqrestore(&qp->s_lock, flags);
1573 /* Notify qib_destroy_qp() if it is waiting. */
1574 if (atomic_dec_and_test(&qp->refcount))
1579 static int qib_query_port(struct ib_device *ibdev, u8 port,
1580 struct ib_port_attr *props)
1582 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1583 struct qib_ibport *ibp = to_iport(ibdev, port);
1584 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1588 memset(props, 0, sizeof(*props));
1589 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1590 props->lmc = ppd->lmc;
1591 props->sm_lid = ibp->rvp.sm_lid;
1592 props->sm_sl = ibp->rvp.sm_sl;
1593 props->state = dd->f_iblink_state(ppd->lastibcstat);
1594 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1595 props->port_cap_flags = ibp->rvp.port_cap_flags;
1596 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1597 props->max_msg_sz = 0x80000000;
1598 props->pkey_tbl_len = qib_get_npkeys(dd);
1599 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1600 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
1601 props->active_width = ppd->link_width_active;
1602 /* See rate_show() */
1603 props->active_speed = ppd->link_speed_active;
1604 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1605 props->init_type_reply = 0;
1607 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1608 switch (ppd->ibmtu) {
1627 props->active_mtu = mtu;
1628 props->subnet_timeout = ibp->rvp.subnet_timeout;
1633 static int qib_modify_device(struct ib_device *device,
1634 int device_modify_mask,
1635 struct ib_device_modify *device_modify)
1637 struct qib_devdata *dd = dd_from_ibdev(device);
1641 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1642 IB_DEVICE_MODIFY_NODE_DESC)) {
1647 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1648 memcpy(device->node_desc, device_modify->node_desc, 64);
1649 for (i = 0; i < dd->num_pports; i++) {
1650 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1652 qib_node_desc_chg(ibp);
1656 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1657 ib_qib_sys_image_guid =
1658 cpu_to_be64(device_modify->sys_image_guid);
1659 for (i = 0; i < dd->num_pports; i++) {
1660 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1662 qib_sys_guid_chg(ibp);
1672 static int qib_modify_port(struct ib_device *ibdev, u8 port,
1673 int port_modify_mask, struct ib_port_modify *props)
1675 struct qib_ibport *ibp = to_iport(ibdev, port);
1676 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1678 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1679 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
1680 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1681 qib_cap_mask_chg(ibp);
1682 if (port_modify_mask & IB_PORT_SHUTDOWN)
1683 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1684 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1685 ibp->rvp.qkey_violations = 0;
1689 static int qib_query_gid(struct ib_device *ibdev, u8 port,
1690 int index, union ib_gid *gid)
1692 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1695 if (!port || port > dd->num_pports)
1698 struct qib_ibport *ibp = to_iport(ibdev, port);
1699 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1701 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
1703 gid->global.interface_id = ppd->guid;
1704 else if (index < QIB_GUIDS_PER_PORT)
1705 gid->global.interface_id = ibp->guids[index - 1];
1713 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1715 if (ah_attr->sl > 15)
1721 static void qib_notify_new_ah(struct ib_device *ibdev,
1722 struct ib_ah_attr *ah_attr,
1725 struct qib_ibport *ibp;
1726 struct qib_pportdata *ppd;
1729 * Do not trust reading anything from rvt_ah at this point as it is not
1730 * done being setup. We can however modify things which we need to set.
1733 ibp = to_iport(ibdev, ah_attr->port_num);
1734 ppd = ppd_from_ibp(ibp);
1735 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1736 ah->log_pmtu = ilog2(ppd->ibmtu);
1739 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1741 struct ib_ah_attr attr;
1742 struct ib_ah *ah = ERR_PTR(-EINVAL);
1745 memset(&attr, 0, sizeof(attr));
1747 attr.port_num = ppd_from_ibp(ibp)->port;
1749 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1751 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1757 * qib_get_npkeys - return the size of the PKEY table for context 0
1758 * @dd: the qlogic_ib device
1760 unsigned qib_get_npkeys(struct qib_devdata *dd)
1762 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1766 * Return the indexed PKEY from the port PKEY table.
1767 * No need to validate rcd[ctxt]; the port is setup if we are here.
1769 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1771 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1772 struct qib_devdata *dd = ppd->dd;
1773 unsigned ctxt = ppd->hw_pidx;
1776 /* dd->rcd null if mini_init or some init failures */
1777 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1780 ret = dd->rcd[ctxt]->pkeys[index];
1785 static void init_ibport(struct qib_pportdata *ppd)
1787 struct qib_verbs_counters cntrs;
1788 struct qib_ibport *ibp = &ppd->ibport_data;
1790 spin_lock_init(&ibp->rvp.lock);
1791 /* Set the prefix to the default value (see ch. 4.1.1) */
1792 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1793 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1794 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1795 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1796 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1797 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1798 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1799 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1800 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1801 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1802 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1803 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1804 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1805 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1807 /* Snapshot current HW counters to "clear" them. */
1808 qib_get_counters(ppd, &cntrs);
1809 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1810 ibp->z_link_error_recovery_counter =
1811 cntrs.link_error_recovery_counter;
1812 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1813 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1814 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1815 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1816 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1817 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1818 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1819 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1820 ibp->z_local_link_integrity_errors =
1821 cntrs.local_link_integrity_errors;
1822 ibp->z_excessive_buffer_overrun_errors =
1823 cntrs.excessive_buffer_overrun_errors;
1824 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1825 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1826 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1829 static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
1830 struct ib_port_immutable *immutable)
1832 struct ib_port_attr attr;
1835 err = qib_query_port(ibdev, port_num, &attr);
1839 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1840 immutable->gid_tbl_len = attr.gid_tbl_len;
1841 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1842 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1848 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1849 * @dd: the device data structure
1851 static void qib_fill_device_attr(struct qib_devdata *dd)
1853 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1855 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1857 rdi->dparms.props.max_pd = ib_qib_max_pds;
1858 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1859 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1860 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1861 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1862 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1863 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1864 rdi->dparms.props.vendor_id =
1865 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1866 rdi->dparms.props.vendor_part_id = dd->deviceid;
1867 rdi->dparms.props.hw_ver = dd->minrev;
1868 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1869 rdi->dparms.props.max_mr_size = ~0ULL;
1870 rdi->dparms.props.max_qp = ib_qib_max_qps;
1871 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1872 rdi->dparms.props.max_sge = ib_qib_max_sges;
1873 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1874 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1875 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1876 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1877 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1878 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1879 rdi->dparms.props.max_map_per_fmr = 32767;
1880 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1881 rdi->dparms.props.max_qp_init_rd_atom = 255;
1882 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1883 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1884 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1885 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1886 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1887 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1888 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1889 rdi->dparms.props.max_total_mcast_qp_attach =
1890 rdi->dparms.props.max_mcast_qp_attach *
1891 rdi->dparms.props.max_mcast_grp;
1895 * qib_register_ib_device - register our device with the infiniband core
1896 * @dd: the device data structure
1897 * Return the allocated qib_ibdev pointer or NULL on error.
1899 int qib_register_ib_device(struct qib_devdata *dd)
1901 struct qib_ibdev *dev = &dd->verbs_dev;
1902 struct ib_device *ibdev = &dev->rdi.ibdev;
1903 struct qib_pportdata *ppd = dd->pport;
1907 /* allocate parent object */
1908 dev->rdi.qp_dev = kzalloc(sizeof(*dev->rdi.qp_dev), GFP_KERNEL);
1909 if (!dev->rdi.qp_dev)
1911 dev->rdi.qp_dev->qp_table_size = ib_qib_qp_table_size;
1912 dev->rdi.qp_dev->qp_table_bits = ilog2(ib_qib_qp_table_size);
1913 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
1914 dev->rdi.qp_dev->qp_table = kmalloc_array(
1915 dev->rdi.qp_dev->qp_table_size,
1916 sizeof(*dev->rdi.qp_dev->qp_table),
1918 if (!dev->rdi.qp_dev->qp_table) {
1922 for (i = 0; i < dev->rdi.qp_dev->qp_table_size; i++)
1923 RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[i], NULL);
1925 for (i = 0; i < dd->num_pports; i++)
1926 init_ibport(ppd + i);
1928 /* Only need to initialize non-zero fields. */
1929 spin_lock_init(&dev->rdi.qp_dev->qpt_lock);
1930 spin_lock_init(&dev->n_cqs_lock);
1931 spin_lock_init(&dev->n_qps_lock);
1932 spin_lock_init(&dev->n_srqs_lock);
1933 spin_lock_init(&dev->n_mcast_grps_lock);
1934 init_timer(&dev->mem_timer);
1935 dev->mem_timer.function = mem_timer;
1936 dev->mem_timer.data = (unsigned long) dev;
1938 qib_init_qpn_table(dd, &dev->rdi.qp_dev->qpn_table);
1940 INIT_LIST_HEAD(&dev->piowait);
1941 INIT_LIST_HEAD(&dev->dmawait);
1942 INIT_LIST_HEAD(&dev->txwait);
1943 INIT_LIST_HEAD(&dev->memwait);
1944 INIT_LIST_HEAD(&dev->txreq_free);
1946 if (ppd->sdma_descq_cnt) {
1947 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1948 ppd->sdma_descq_cnt *
1949 sizeof(struct qib_pio_header),
1950 &dev->pio_hdrs_phys,
1952 if (!dev->pio_hdrs) {
1958 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1959 struct qib_verbs_txreq *tx;
1961 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
1967 list_add(&tx->txreq.list, &dev->txreq_free);
1971 * The system image GUID is supposed to be the same for all
1972 * IB HCAs in a single system but since there can be other
1973 * device types in the system, we can't be sure this is unique.
1975 if (!ib_qib_sys_image_guid)
1976 ib_qib_sys_image_guid = ppd->guid;
1978 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1979 ibdev->owner = THIS_MODULE;
1980 ibdev->node_guid = ppd->guid;
1981 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
1982 ibdev->uverbs_cmd_mask =
1983 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1984 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1985 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1986 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1987 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1988 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
1989 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
1990 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
1991 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
1992 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1993 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1994 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1995 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1996 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1997 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1998 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1999 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2000 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2001 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2002 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2003 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2004 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2005 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2006 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2007 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2008 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2009 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2010 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2011 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2012 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2013 ibdev->node_type = RDMA_NODE_IB_CA;
2014 ibdev->phys_port_cnt = dd->num_pports;
2015 ibdev->num_comp_vectors = 1;
2016 ibdev->dma_device = &dd->pcidev->dev;
2017 ibdev->query_device = NULL;
2018 ibdev->modify_device = qib_modify_device;
2019 ibdev->query_port = qib_query_port;
2020 ibdev->modify_port = qib_modify_port;
2021 ibdev->query_pkey = NULL;
2022 ibdev->query_gid = qib_query_gid;
2023 ibdev->alloc_ucontext = NULL;
2024 ibdev->dealloc_ucontext = NULL;
2025 ibdev->alloc_pd = NULL;
2026 ibdev->dealloc_pd = NULL;
2027 ibdev->create_ah = NULL;
2028 ibdev->destroy_ah = NULL;
2029 ibdev->modify_ah = NULL;
2030 ibdev->query_ah = NULL;
2031 ibdev->create_srq = qib_create_srq;
2032 ibdev->modify_srq = qib_modify_srq;
2033 ibdev->query_srq = qib_query_srq;
2034 ibdev->destroy_srq = qib_destroy_srq;
2035 ibdev->create_qp = qib_create_qp;
2036 ibdev->modify_qp = qib_modify_qp;
2037 ibdev->query_qp = qib_query_qp;
2038 ibdev->destroy_qp = qib_destroy_qp;
2039 ibdev->post_send = qib_post_send;
2040 ibdev->post_recv = qib_post_receive;
2041 ibdev->post_srq_recv = qib_post_srq_receive;
2042 ibdev->create_cq = qib_create_cq;
2043 ibdev->destroy_cq = qib_destroy_cq;
2044 ibdev->resize_cq = qib_resize_cq;
2045 ibdev->poll_cq = qib_poll_cq;
2046 ibdev->req_notify_cq = qib_req_notify_cq;
2047 ibdev->get_dma_mr = NULL;
2048 ibdev->reg_user_mr = NULL;
2049 ibdev->dereg_mr = NULL;
2050 ibdev->alloc_mr = NULL;
2051 ibdev->map_mr_sg = NULL;
2052 ibdev->alloc_fmr = NULL;
2053 ibdev->map_phys_fmr = NULL;
2054 ibdev->unmap_fmr = NULL;
2055 ibdev->dealloc_fmr = NULL;
2056 ibdev->attach_mcast = qib_multicast_attach;
2057 ibdev->detach_mcast = qib_multicast_detach;
2058 ibdev->process_mad = qib_process_mad;
2060 ibdev->dma_ops = NULL;
2061 ibdev->get_port_immutable = qib_port_immutable;
2063 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2064 "Intel Infiniband HCA %s", init_utsname()->nodename);
2067 * Fill in rvt info object.
2069 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
2070 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
2071 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
2072 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
2073 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
2074 dd->verbs_dev.rdi.flags = (RVT_FLAG_QP_INIT_DRIVER |
2075 RVT_FLAG_CQ_INIT_DRIVER);
2076 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
2077 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
2078 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
2080 qib_fill_device_attr(dd);
2083 for (i = 0; i < dd->num_pports; i++, ppd++) {
2084 ctxt = ppd->hw_pidx;
2085 rvt_init_port(&dd->verbs_dev.rdi,
2086 &ppd->ibport_data.rvp,
2088 dd->rcd[ctxt]->pkeys);
2091 ret = rvt_register_device(&dd->verbs_dev.rdi);
2095 ret = qib_create_agents(dev);
2099 ret = qib_verbs_register_sysfs(dd);
2106 qib_free_agents(dev);
2108 rvt_unregister_device(&dd->verbs_dev.rdi);
2111 while (!list_empty(&dev->txreq_free)) {
2112 struct list_head *l = dev->txreq_free.next;
2113 struct qib_verbs_txreq *tx;
2116 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2119 if (ppd->sdma_descq_cnt)
2120 dma_free_coherent(&dd->pcidev->dev,
2121 ppd->sdma_descq_cnt *
2122 sizeof(struct qib_pio_header),
2123 dev->pio_hdrs, dev->pio_hdrs_phys);
2125 kfree(dev->rdi.qp_dev->qp_table);
2127 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2132 void qib_unregister_ib_device(struct qib_devdata *dd)
2134 struct qib_ibdev *dev = &dd->verbs_dev;
2137 qib_verbs_unregister_sysfs(dd);
2139 qib_free_agents(dev);
2141 rvt_unregister_device(&dd->verbs_dev.rdi);
2143 if (!list_empty(&dev->piowait))
2144 qib_dev_err(dd, "piowait list not empty!\n");
2145 if (!list_empty(&dev->dmawait))
2146 qib_dev_err(dd, "dmawait list not empty!\n");
2147 if (!list_empty(&dev->txwait))
2148 qib_dev_err(dd, "txwait list not empty!\n");
2149 if (!list_empty(&dev->memwait))
2150 qib_dev_err(dd, "memwait list not empty!\n");
2152 qps_inuse = qib_free_all_qps(dd);
2154 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2157 del_timer_sync(&dev->mem_timer);
2158 qib_free_qpn_table(&dev->rdi.qp_dev->qpn_table);
2159 while (!list_empty(&dev->txreq_free)) {
2160 struct list_head *l = dev->txreq_free.next;
2161 struct qib_verbs_txreq *tx;
2164 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2167 if (dd->pport->sdma_descq_cnt)
2168 dma_free_coherent(&dd->pcidev->dev,
2169 dd->pport->sdma_descq_cnt *
2170 sizeof(struct qib_pio_header),
2171 dev->pio_hdrs, dev->pio_hdrs_phys);
2172 kfree(dev->rdi.qp_dev->qp_table);
2176 * This must be called with s_lock held.
2178 void qib_schedule_send(struct rvt_qp *qp)
2180 struct qib_qp_priv *priv = qp->priv;
2181 if (qib_send_ok(qp)) {
2182 struct qib_ibport *ibp =
2183 to_iport(qp->ibqp.device, qp->port_num);
2184 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2186 queue_work(ppd->qib_wq, &priv->s_work);