2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
43 #include <linux/vmalloc.h>
44 #include <rdma/rdma_vt.h>
47 #include "qib_common.h"
49 static unsigned int ib_qib_qp_table_size = 256;
50 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(qp_table_size, "QP table size");
53 static unsigned int qib_lkey_table_size = 16;
54 module_param_named(lkey_table_size, qib_lkey_table_size, uint,
56 MODULE_PARM_DESC(lkey_table_size,
57 "LKEY table size in bits (2^n, 1 <= n <= 23)");
59 static unsigned int ib_qib_max_pds = 0xFFFF;
60 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
61 MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
64 static unsigned int ib_qib_max_ahs = 0xFFFF;
65 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
68 unsigned int ib_qib_max_cqes = 0x2FFFF;
69 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
70 MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
73 unsigned int ib_qib_max_cqs = 0x1FFFF;
74 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
75 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
77 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
78 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
79 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81 unsigned int ib_qib_max_qps = 16384;
82 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
85 unsigned int ib_qib_max_sges = 0x60;
86 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
89 unsigned int ib_qib_max_mcast_grps = 16384;
90 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_mcast_grps,
92 "Maximum number of multicast groups to support");
94 unsigned int ib_qib_max_mcast_qp_attached = 16;
95 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
97 MODULE_PARM_DESC(max_mcast_qp_attached,
98 "Maximum number of attached QPs to support");
100 unsigned int ib_qib_max_srqs = 1024;
101 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
102 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
104 unsigned int ib_qib_max_srq_sges = 128;
105 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
106 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
108 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
109 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
110 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
112 static unsigned int ib_qib_disable_sma;
113 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
114 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
117 * Translate ib_wr_opcode into ib_wc_opcode.
119 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
120 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
121 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
122 [IB_WR_SEND] = IB_WC_SEND,
123 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
124 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
125 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
126 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
132 __be64 ib_qib_sys_image_guid;
135 * qib_copy_sge - copy data to SGE memory
137 * @data: the data to copy
138 * @length: the length of the data
140 void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
142 struct rvt_sge *sge = &ss->sge;
145 u32 len = sge->length;
149 if (len > sge->sge_length)
150 len = sge->sge_length;
152 memcpy(sge->vaddr, data, len);
155 sge->sge_length -= len;
156 if (sge->sge_length == 0) {
160 *sge = *ss->sg_list++;
161 } else if (sge->length == 0 && sge->mr->lkey) {
162 if (++sge->n >= RVT_SEGSZ) {
163 if (++sge->m >= sge->mr->mapsz)
168 sge->mr->map[sge->m]->segs[sge->n].vaddr;
170 sge->mr->map[sge->m]->segs[sge->n].length;
178 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
180 * @length: the number of bytes to skip
182 void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
184 struct rvt_sge *sge = &ss->sge;
187 u32 len = sge->length;
191 if (len > sge->sge_length)
192 len = sge->sge_length;
196 sge->sge_length -= len;
197 if (sge->sge_length == 0) {
201 *sge = *ss->sg_list++;
202 } else if (sge->length == 0 && sge->mr->lkey) {
203 if (++sge->n >= RVT_SEGSZ) {
204 if (++sge->m >= sge->mr->mapsz)
209 sge->mr->map[sge->m]->segs[sge->n].vaddr;
211 sge->mr->map[sge->m]->segs[sge->n].length;
218 * Count the number of DMA descriptors needed to send length bytes of data.
219 * Don't modify the qib_sge_state to get the count.
220 * Return zero if any of the segments is not aligned.
222 static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
224 struct rvt_sge *sg_list = ss->sg_list;
225 struct rvt_sge sge = ss->sge;
226 u8 num_sge = ss->num_sge;
227 u32 ndesc = 1; /* count the header */
230 u32 len = sge.length;
234 if (len > sge.sge_length)
235 len = sge.sge_length;
237 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
238 (len != length && (len & (sizeof(u32) - 1)))) {
245 sge.sge_length -= len;
246 if (sge.sge_length == 0) {
249 } else if (sge.length == 0 && sge.mr->lkey) {
250 if (++sge.n >= RVT_SEGSZ) {
251 if (++sge.m >= sge.mr->mapsz)
256 sge.mr->map[sge.m]->segs[sge.n].vaddr;
258 sge.mr->map[sge.m]->segs[sge.n].length;
266 * Copy from the SGEs to the data buffer.
268 static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
270 struct rvt_sge *sge = &ss->sge;
273 u32 len = sge->length;
277 if (len > sge->sge_length)
278 len = sge->sge_length;
280 memcpy(data, sge->vaddr, len);
283 sge->sge_length -= len;
284 if (sge->sge_length == 0) {
286 *sge = *ss->sg_list++;
287 } else if (sge->length == 0 && sge->mr->lkey) {
288 if (++sge->n >= RVT_SEGSZ) {
289 if (++sge->m >= sge->mr->mapsz)
294 sge->mr->map[sge->m]->segs[sge->n].vaddr;
296 sge->mr->map[sge->m]->segs[sge->n].length;
304 * qib_qp_rcv - processing an incoming packet on a QP
305 * @rcd: the context pointer
306 * @hdr: the packet header
307 * @has_grh: true if the packet has a GRH
308 * @data: the packet data
309 * @tlen: the packet length
310 * @qp: the QP the packet came on
312 * This is called from qib_ib_rcv() to process an incoming packet
314 * Called at interrupt level.
316 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
317 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
319 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
321 spin_lock(&qp->r_lock);
323 /* Check for valid receive state. */
324 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
325 ibp->rvp.n_pkt_drops++;
329 switch (qp->ibqp.qp_type) {
332 if (ib_qib_disable_sma)
336 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
340 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
344 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
352 spin_unlock(&qp->r_lock);
356 * qib_ib_rcv - process an incoming packet
357 * @rcd: the context pointer
358 * @rhdr: the header of the packet
359 * @data: the packet payload
360 * @tlen: the packet length
362 * This is called from qib_kreceive() to process an incoming packet at
363 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
365 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
367 struct qib_pportdata *ppd = rcd->ppd;
368 struct qib_ibport *ibp = &ppd->ibport_data;
369 struct qib_ib_header *hdr = rhdr;
370 struct qib_other_headers *ohdr;
377 /* 24 == LRH+BTH+CRC */
378 if (unlikely(tlen < 24))
381 /* Check for a valid destination LID (see ch. 7.11.1). */
382 lid = be16_to_cpu(hdr->lrh[1]);
383 if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
384 lid &= ~((1 << ppd->lmc) - 1);
385 if (unlikely(lid != ppd->lid))
390 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
391 if (lnh == QIB_LRH_BTH)
393 else if (lnh == QIB_LRH_GRH) {
396 ohdr = &hdr->u.l.oth;
397 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
399 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
400 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
405 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
406 #ifdef CONFIG_DEBUG_FS
407 rcd->opstats->stats[opcode].n_bytes += tlen;
408 rcd->opstats->stats[opcode].n_packets++;
411 /* Get the destination QP number. */
412 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
413 if (qp_num == QIB_MULTICAST_QPN) {
414 struct rvt_mcast *mcast;
415 struct rvt_mcast_qp *p;
417 if (lnh != QIB_LRH_GRH)
419 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
422 this_cpu_inc(ibp->pmastats->n_multicast_rcv);
423 list_for_each_entry_rcu(p, &mcast->qp_list, list)
424 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
426 * Notify rvt_multicast_detach() if it is waiting for us
429 if (atomic_dec_return(&mcast->refcount) <= 1)
430 wake_up(&mcast->wait);
432 if (rcd->lookaside_qp) {
433 if (rcd->lookaside_qpn != qp_num) {
434 if (atomic_dec_and_test(
435 &rcd->lookaside_qp->refcount))
437 &rcd->lookaside_qp->wait);
438 rcd->lookaside_qp = NULL;
441 if (!rcd->lookaside_qp) {
442 qp = qib_lookup_qpn(ibp, qp_num);
445 rcd->lookaside_qp = qp;
446 rcd->lookaside_qpn = qp_num;
448 qp = rcd->lookaside_qp;
449 this_cpu_inc(ibp->pmastats->n_unicast_rcv);
450 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
455 ibp->rvp.n_pkt_drops++;
459 * This is called from a timer to check for QPs
460 * which need kernel memory in order to send a packet.
462 static void mem_timer(unsigned long data)
464 struct qib_ibdev *dev = (struct qib_ibdev *) data;
465 struct list_head *list = &dev->memwait;
466 struct rvt_qp *qp = NULL;
467 struct qib_qp_priv *priv = NULL;
470 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
471 if (!list_empty(list)) {
472 priv = list_entry(list->next, struct qib_qp_priv, iowait);
474 list_del_init(&priv->iowait);
475 atomic_inc(&qp->refcount);
476 if (!list_empty(list))
477 mod_timer(&dev->mem_timer, jiffies + 1);
479 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
482 spin_lock_irqsave(&qp->s_lock, flags);
483 if (qp->s_flags & RVT_S_WAIT_KMEM) {
484 qp->s_flags &= ~RVT_S_WAIT_KMEM;
485 qib_schedule_send(qp);
487 spin_unlock_irqrestore(&qp->s_lock, flags);
488 if (atomic_dec_and_test(&qp->refcount))
493 static void update_sge(struct rvt_sge_state *ss, u32 length)
495 struct rvt_sge *sge = &ss->sge;
497 sge->vaddr += length;
498 sge->length -= length;
499 sge->sge_length -= length;
500 if (sge->sge_length == 0) {
502 *sge = *ss->sg_list++;
503 } else if (sge->length == 0 && sge->mr->lkey) {
504 if (++sge->n >= RVT_SEGSZ) {
505 if (++sge->m >= sge->mr->mapsz)
509 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
510 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
514 #ifdef __LITTLE_ENDIAN
515 static inline u32 get_upper_bits(u32 data, u32 shift)
517 return data >> shift;
520 static inline u32 set_upper_bits(u32 data, u32 shift)
522 return data << shift;
525 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
527 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
528 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
532 static inline u32 get_upper_bits(u32 data, u32 shift)
534 return data << shift;
537 static inline u32 set_upper_bits(u32 data, u32 shift)
539 return data >> shift;
542 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
544 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
545 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
550 static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
551 u32 length, unsigned flush_wc)
558 u32 len = ss->sge.length;
563 if (len > ss->sge.sge_length)
564 len = ss->sge.sge_length;
566 /* If the source address is not aligned, try to align it. */
567 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
569 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
571 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
574 y = sizeof(u32) - off;
577 if (len + extra >= sizeof(u32)) {
578 data |= set_upper_bits(v, extra *
580 len = sizeof(u32) - extra;
585 __raw_writel(data, piobuf);
590 /* Clear unused upper bytes */
591 data |= clear_upper_bytes(v, len, extra);
599 /* Source address is aligned. */
600 u32 *addr = (u32 *) ss->sge.vaddr;
601 int shift = extra * BITS_PER_BYTE;
602 int ushift = 32 - shift;
605 while (l >= sizeof(u32)) {
608 data |= set_upper_bits(v, shift);
609 __raw_writel(data, piobuf);
610 data = get_upper_bits(v, ushift);
616 * We still have 'extra' number of bytes leftover.
621 if (l + extra >= sizeof(u32)) {
622 data |= set_upper_bits(v, shift);
623 len -= l + extra - sizeof(u32);
628 __raw_writel(data, piobuf);
633 /* Clear unused upper bytes */
634 data |= clear_upper_bytes(v, l, extra);
641 } else if (len == length) {
645 } else if (len == length) {
649 * Need to round up for the last dword in the
653 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
655 last = ((u32 *) ss->sge.vaddr)[w - 1];
660 qib_pio_copy(piobuf, ss->sge.vaddr, w);
663 extra = len & (sizeof(u32) - 1);
665 u32 v = ((u32 *) ss->sge.vaddr)[w];
667 /* Clear unused upper bytes */
668 data = clear_upper_bytes(v, extra, 0);
674 /* Update address before sending packet. */
675 update_sge(ss, length);
677 /* must flush early everything before trigger word */
679 __raw_writel(last, piobuf);
680 /* be sure trigger word is written */
683 __raw_writel(last, piobuf);
686 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
689 struct qib_qp_priv *priv = qp->priv;
690 struct qib_verbs_txreq *tx;
693 spin_lock_irqsave(&qp->s_lock, flags);
694 spin_lock(&dev->rdi.pending_lock);
696 if (!list_empty(&dev->txreq_free)) {
697 struct list_head *l = dev->txreq_free.next;
700 spin_unlock(&dev->rdi.pending_lock);
701 spin_unlock_irqrestore(&qp->s_lock, flags);
702 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
704 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
705 list_empty(&priv->iowait)) {
707 qp->s_flags |= RVT_S_WAIT_TX;
708 list_add_tail(&priv->iowait, &dev->txwait);
710 qp->s_flags &= ~RVT_S_BUSY;
711 spin_unlock(&dev->rdi.pending_lock);
712 spin_unlock_irqrestore(&qp->s_lock, flags);
713 tx = ERR_PTR(-EBUSY);
718 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
721 struct qib_verbs_txreq *tx;
724 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
725 /* assume the list non empty */
726 if (likely(!list_empty(&dev->txreq_free))) {
727 struct list_head *l = dev->txreq_free.next;
730 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
731 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
733 /* call slow path to get the extra lock */
734 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
735 tx = __get_txreq(dev, qp);
740 void qib_put_txreq(struct qib_verbs_txreq *tx)
742 struct qib_ibdev *dev;
744 struct qib_qp_priv *priv;
748 dev = to_idev(qp->ibqp.device);
750 if (atomic_dec_and_test(&qp->refcount))
756 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
757 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
758 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
759 tx->txreq.addr, tx->hdr_dwords << 2,
761 kfree(tx->align_buf);
764 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
766 /* Put struct back on free list */
767 list_add(&tx->txreq.list, &dev->txreq_free);
769 if (!list_empty(&dev->txwait)) {
770 /* Wake up first QP wanting a free struct */
771 priv = list_entry(dev->txwait.next, struct qib_qp_priv,
774 list_del_init(&priv->iowait);
775 atomic_inc(&qp->refcount);
776 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
778 spin_lock_irqsave(&qp->s_lock, flags);
779 if (qp->s_flags & RVT_S_WAIT_TX) {
780 qp->s_flags &= ~RVT_S_WAIT_TX;
781 qib_schedule_send(qp);
783 spin_unlock_irqrestore(&qp->s_lock, flags);
785 if (atomic_dec_and_test(&qp->refcount))
788 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
792 * This is called when there are send DMA descriptors that might be
795 * This is called with ppd->sdma_lock held.
797 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
799 struct rvt_qp *qp, *nqp;
800 struct qib_qp_priv *qpp, *nqpp;
801 struct rvt_qp *qps[20];
802 struct qib_ibdev *dev;
806 dev = &ppd->dd->verbs_dev;
807 spin_lock(&dev->rdi.pending_lock);
809 /* Search wait list for first QP wanting DMA descriptors. */
810 list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
813 if (qp->port_num != ppd->port)
815 if (n == ARRAY_SIZE(qps))
817 if (qpp->s_tx->txreq.sg_count > avail)
819 avail -= qpp->s_tx->txreq.sg_count;
820 list_del_init(&qpp->iowait);
821 atomic_inc(&qp->refcount);
825 spin_unlock(&dev->rdi.pending_lock);
827 for (i = 0; i < n; i++) {
829 spin_lock(&qp->s_lock);
830 if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
831 qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
832 qib_schedule_send(qp);
834 spin_unlock(&qp->s_lock);
835 if (atomic_dec_and_test(&qp->refcount))
841 * This is called with ppd->sdma_lock held.
843 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
845 struct qib_verbs_txreq *tx =
846 container_of(cookie, struct qib_verbs_txreq, txreq);
847 struct rvt_qp *qp = tx->qp;
848 struct qib_qp_priv *priv = qp->priv;
850 spin_lock(&qp->s_lock);
852 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
853 else if (qp->ibqp.qp_type == IB_QPT_RC) {
854 struct qib_ib_header *hdr;
856 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
857 hdr = &tx->align_buf->hdr;
859 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
861 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
863 qib_rc_send_complete(qp, hdr);
865 if (atomic_dec_and_test(&priv->s_dma_busy)) {
866 if (qp->state == IB_QPS_RESET)
867 wake_up(&priv->wait_dma);
868 else if (qp->s_flags & RVT_S_WAIT_DMA) {
869 qp->s_flags &= ~RVT_S_WAIT_DMA;
870 qib_schedule_send(qp);
873 spin_unlock(&qp->s_lock);
878 static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
880 struct qib_qp_priv *priv = qp->priv;
884 spin_lock_irqsave(&qp->s_lock, flags);
885 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
886 spin_lock(&dev->rdi.pending_lock);
887 if (list_empty(&priv->iowait)) {
888 if (list_empty(&dev->memwait))
889 mod_timer(&dev->mem_timer, jiffies + 1);
890 qp->s_flags |= RVT_S_WAIT_KMEM;
891 list_add_tail(&priv->iowait, &dev->memwait);
893 spin_unlock(&dev->rdi.pending_lock);
894 qp->s_flags &= ~RVT_S_BUSY;
897 spin_unlock_irqrestore(&qp->s_lock, flags);
902 static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr,
903 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
904 u32 plen, u32 dwords)
906 struct qib_qp_priv *priv = qp->priv;
907 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
908 struct qib_devdata *dd = dd_from_dev(dev);
909 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
910 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
911 struct qib_verbs_txreq *tx;
912 struct qib_pio_header *phdr;
920 /* resend previously constructed packet */
921 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
925 tx = get_txreq(dev, qp);
929 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
930 be16_to_cpu(hdr->lrh[0]) >> 12);
932 atomic_inc(&qp->refcount);
934 tx->mr = qp->s_rdma_mr;
936 qp->s_rdma_mr = NULL;
937 tx->txreq.callback = sdma_complete;
938 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
939 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
941 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
942 if (plen + 1 > dd->piosize2kmax_dwords)
943 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
947 * Don't try to DMA if it takes more descriptors than
950 ndesc = qib_count_sge(ss, len);
951 if (ndesc >= ppd->sdma_descq_cnt)
956 phdr = &dev->pio_hdrs[tx->hdr_inx];
957 phdr->pbc[0] = cpu_to_le32(plen);
958 phdr->pbc[1] = cpu_to_le32(control);
959 memcpy(&phdr->hdr, hdr, hdrwords << 2);
960 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
961 tx->txreq.sg_count = ndesc;
962 tx->txreq.addr = dev->pio_hdrs_phys +
963 tx->hdr_inx * sizeof(struct qib_pio_header);
964 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
965 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
969 /* Allocate a buffer and copy the header and payload to it. */
970 tx->hdr_dwords = plen + 1;
971 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
974 phdr->pbc[0] = cpu_to_le32(plen);
975 phdr->pbc[1] = cpu_to_le32(control);
976 memcpy(&phdr->hdr, hdr, hdrwords << 2);
977 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
979 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
980 tx->hdr_dwords << 2, DMA_TO_DEVICE);
981 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
983 tx->align_buf = phdr;
984 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
985 tx->txreq.sg_count = 1;
986 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
993 ret = wait_kmem(dev, qp);
995 ibp->rvp.n_unaligned++;
1004 * If we are now in the error state, return zero to flush the
1005 * send work request.
1007 static int no_bufs_available(struct rvt_qp *qp)
1009 struct qib_qp_priv *priv = qp->priv;
1010 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1011 struct qib_devdata *dd;
1012 unsigned long flags;
1016 * Note that as soon as want_buffer() is called and
1017 * possibly before it returns, qib_ib_piobufavail()
1018 * could be called. Therefore, put QP on the I/O wait list before
1019 * enabling the PIO avail interrupt.
1021 spin_lock_irqsave(&qp->s_lock, flags);
1022 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
1023 spin_lock(&dev->rdi.pending_lock);
1024 if (list_empty(&priv->iowait)) {
1026 qp->s_flags |= RVT_S_WAIT_PIO;
1027 list_add_tail(&priv->iowait, &dev->piowait);
1028 dd = dd_from_dev(dev);
1029 dd->f_wantpiobuf_intr(dd, 1);
1031 spin_unlock(&dev->rdi.pending_lock);
1032 qp->s_flags &= ~RVT_S_BUSY;
1035 spin_unlock_irqrestore(&qp->s_lock, flags);
1039 static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr,
1040 u32 hdrwords, struct rvt_sge_state *ss, u32 len,
1041 u32 plen, u32 dwords)
1043 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1044 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1045 u32 *hdr = (u32 *) ibhdr;
1046 u32 __iomem *piobuf_orig;
1047 u32 __iomem *piobuf;
1049 unsigned long flags;
1054 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1055 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1056 pbc = ((u64) control << 32) | plen;
1057 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1058 if (unlikely(piobuf == NULL))
1059 return no_bufs_available(qp);
1063 * We have to flush after the PBC for correctness on some cpus
1064 * or WC buffer can be written out of order.
1066 writeq(pbc, piobuf);
1067 piobuf_orig = piobuf;
1070 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1073 * If there is just the header portion, must flush before
1074 * writing last word of header for correctness, and after
1075 * the last header word (trigger word).
1079 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1081 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1084 qib_pio_copy(piobuf, hdr, hdrwords);
1090 qib_pio_copy(piobuf, hdr, hdrwords);
1093 /* The common case is aligned and contained in one segment. */
1094 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1095 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1096 u32 *addr = (u32 *) ss->sge.vaddr;
1098 /* Update address before sending packet. */
1099 update_sge(ss, len);
1101 qib_pio_copy(piobuf, addr, dwords - 1);
1102 /* must flush early everything before trigger word */
1104 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1105 /* be sure trigger word is written */
1108 qib_pio_copy(piobuf, addr, dwords);
1111 copy_io(piobuf, ss, len, flush_wc);
1113 if (dd->flags & QIB_USE_SPCL_TRIG) {
1114 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1117 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1119 qib_sendbuf_done(dd, pbufn);
1120 if (qp->s_rdma_mr) {
1121 rvt_put_mr(qp->s_rdma_mr);
1122 qp->s_rdma_mr = NULL;
1125 spin_lock_irqsave(&qp->s_lock, flags);
1126 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1127 spin_unlock_irqrestore(&qp->s_lock, flags);
1128 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1129 spin_lock_irqsave(&qp->s_lock, flags);
1130 qib_rc_send_complete(qp, ibhdr);
1131 spin_unlock_irqrestore(&qp->s_lock, flags);
1137 * qib_verbs_send - send a packet
1138 * @qp: the QP to send on
1139 * @hdr: the packet header
1140 * @hdrwords: the number of 32-bit words in the header
1141 * @ss: the SGE to send
1142 * @len: the length of the packet in bytes
1144 * Return zero if packet is sent or queued OK.
1145 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1147 int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr,
1148 u32 hdrwords, struct rvt_sge_state *ss, u32 len)
1150 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1153 u32 dwords = (len + 3) >> 2;
1156 * Calculate the send buffer trigger address.
1157 * The +1 counts for the pbc control dword following the pbc length.
1159 plen = hdrwords + dwords + 1;
1162 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1163 * can defer SDMA restart until link goes ACTIVE without
1164 * worrying about just how we got there.
1166 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1167 !(dd->flags & QIB_HAS_SEND_DMA))
1168 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1171 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1177 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1178 u64 *rwords, u64 *spkts, u64 *rpkts,
1182 struct qib_devdata *dd = ppd->dd;
1184 if (!(dd->flags & QIB_PRESENT)) {
1185 /* no hardware, freeze, etc. */
1189 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1190 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1191 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1192 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1193 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1202 * qib_get_counters - get various chip counters
1203 * @dd: the qlogic_ib device
1204 * @cntrs: counters are placed here
1206 * Return the counters needed by recv_pma_get_portcounters().
1208 int qib_get_counters(struct qib_pportdata *ppd,
1209 struct qib_verbs_counters *cntrs)
1213 if (!(ppd->dd->flags & QIB_PRESENT)) {
1214 /* no hardware, freeze, etc. */
1218 cntrs->symbol_error_counter =
1219 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1220 cntrs->link_error_recovery_counter =
1221 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1223 * The link downed counter counts when the other side downs the
1224 * connection. We add in the number of times we downed the link
1225 * due to local link integrity errors to compensate.
1227 cntrs->link_downed_counter =
1228 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1229 cntrs->port_rcv_errors =
1230 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1231 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1232 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1233 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1234 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1235 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1236 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1237 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1238 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1239 cntrs->port_rcv_errors +=
1240 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1241 cntrs->port_rcv_errors +=
1242 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1243 cntrs->port_rcv_remphys_errors =
1244 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1245 cntrs->port_xmit_discards =
1246 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1247 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1248 QIBPORTCNTR_WORDSEND);
1249 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1250 QIBPORTCNTR_WORDRCV);
1251 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1252 QIBPORTCNTR_PKTSEND);
1253 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1254 QIBPORTCNTR_PKTRCV);
1255 cntrs->local_link_integrity_errors =
1256 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1257 cntrs->excessive_buffer_overrun_errors =
1258 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1259 cntrs->vl15_dropped =
1260 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1269 * qib_ib_piobufavail - callback when a PIO buffer is available
1270 * @dd: the device pointer
1272 * This is called from qib_intr() at interrupt level when a PIO buffer is
1273 * available after qib_verbs_send() returned an error that no buffers were
1274 * available. Disable the interrupt if there are no more QPs waiting.
1276 void qib_ib_piobufavail(struct qib_devdata *dd)
1278 struct qib_ibdev *dev = &dd->verbs_dev;
1279 struct list_head *list;
1280 struct rvt_qp *qps[5];
1282 unsigned long flags;
1284 struct qib_qp_priv *priv;
1286 list = &dev->piowait;
1290 * Note: checking that the piowait list is empty and clearing
1291 * the buffer available interrupt needs to be atomic or we
1292 * could end up with QPs on the wait list with the interrupt
1295 spin_lock_irqsave(&dev->rdi.pending_lock, flags);
1296 while (!list_empty(list)) {
1297 if (n == ARRAY_SIZE(qps))
1299 priv = list_entry(list->next, struct qib_qp_priv, iowait);
1301 list_del_init(&priv->iowait);
1302 atomic_inc(&qp->refcount);
1305 dd->f_wantpiobuf_intr(dd, 0);
1307 spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
1309 for (i = 0; i < n; i++) {
1312 spin_lock_irqsave(&qp->s_lock, flags);
1313 if (qp->s_flags & RVT_S_WAIT_PIO) {
1314 qp->s_flags &= ~RVT_S_WAIT_PIO;
1315 qib_schedule_send(qp);
1317 spin_unlock_irqrestore(&qp->s_lock, flags);
1319 /* Notify qib_destroy_qp() if it is waiting. */
1320 if (atomic_dec_and_test(&qp->refcount))
1325 static int qib_query_port(struct ib_device *ibdev, u8 port,
1326 struct ib_port_attr *props)
1328 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1329 struct qib_ibport *ibp = to_iport(ibdev, port);
1330 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1334 memset(props, 0, sizeof(*props));
1335 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1336 props->lmc = ppd->lmc;
1337 props->sm_lid = ibp->rvp.sm_lid;
1338 props->sm_sl = ibp->rvp.sm_sl;
1339 props->state = dd->f_iblink_state(ppd->lastibcstat);
1340 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1341 props->port_cap_flags = ibp->rvp.port_cap_flags;
1342 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1343 props->max_msg_sz = 0x80000000;
1344 props->pkey_tbl_len = qib_get_npkeys(dd);
1345 props->bad_pkey_cntr = ibp->rvp.pkey_violations;
1346 props->qkey_viol_cntr = ibp->rvp.qkey_violations;
1347 props->active_width = ppd->link_width_active;
1348 /* See rate_show() */
1349 props->active_speed = ppd->link_speed_active;
1350 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1351 props->init_type_reply = 0;
1353 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1354 switch (ppd->ibmtu) {
1373 props->active_mtu = mtu;
1374 props->subnet_timeout = ibp->rvp.subnet_timeout;
1379 static int qib_modify_device(struct ib_device *device,
1380 int device_modify_mask,
1381 struct ib_device_modify *device_modify)
1383 struct qib_devdata *dd = dd_from_ibdev(device);
1387 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1388 IB_DEVICE_MODIFY_NODE_DESC)) {
1393 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1394 memcpy(device->node_desc, device_modify->node_desc, 64);
1395 for (i = 0; i < dd->num_pports; i++) {
1396 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1398 qib_node_desc_chg(ibp);
1402 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1403 ib_qib_sys_image_guid =
1404 cpu_to_be64(device_modify->sys_image_guid);
1405 for (i = 0; i < dd->num_pports; i++) {
1406 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1408 qib_sys_guid_chg(ibp);
1418 static int qib_modify_port(struct ib_device *ibdev, u8 port,
1419 int port_modify_mask, struct ib_port_modify *props)
1421 struct qib_ibport *ibp = to_iport(ibdev, port);
1422 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1424 ibp->rvp.port_cap_flags |= props->set_port_cap_mask;
1425 ibp->rvp.port_cap_flags &= ~props->clr_port_cap_mask;
1426 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1427 qib_cap_mask_chg(ibp);
1428 if (port_modify_mask & IB_PORT_SHUTDOWN)
1429 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1430 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1431 ibp->rvp.qkey_violations = 0;
1435 static int qib_query_gid(struct ib_device *ibdev, u8 port,
1436 int index, union ib_gid *gid)
1438 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1441 if (!port || port > dd->num_pports)
1444 struct qib_ibport *ibp = to_iport(ibdev, port);
1445 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1447 gid->global.subnet_prefix = ibp->rvp.gid_prefix;
1449 gid->global.interface_id = ppd->guid;
1450 else if (index < QIB_GUIDS_PER_PORT)
1451 gid->global.interface_id = ibp->guids[index - 1];
1459 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1461 if (ah_attr->sl > 15)
1467 static void qib_notify_new_ah(struct ib_device *ibdev,
1468 struct ib_ah_attr *ah_attr,
1471 struct qib_ibport *ibp;
1472 struct qib_pportdata *ppd;
1475 * Do not trust reading anything from rvt_ah at this point as it is not
1476 * done being setup. We can however modify things which we need to set.
1479 ibp = to_iport(ibdev, ah_attr->port_num);
1480 ppd = ppd_from_ibp(ibp);
1481 ah->vl = ibp->sl_to_vl[ah->attr.sl];
1482 ah->log_pmtu = ilog2(ppd->ibmtu);
1485 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1487 struct ib_ah_attr attr;
1488 struct ib_ah *ah = ERR_PTR(-EINVAL);
1491 memset(&attr, 0, sizeof(attr));
1493 attr.port_num = ppd_from_ibp(ibp)->port;
1495 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1497 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1503 * qib_get_npkeys - return the size of the PKEY table for context 0
1504 * @dd: the qlogic_ib device
1506 unsigned qib_get_npkeys(struct qib_devdata *dd)
1508 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1512 * Return the indexed PKEY from the port PKEY table.
1513 * No need to validate rcd[ctxt]; the port is setup if we are here.
1515 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1517 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1518 struct qib_devdata *dd = ppd->dd;
1519 unsigned ctxt = ppd->hw_pidx;
1522 /* dd->rcd null if mini_init or some init failures */
1523 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1526 ret = dd->rcd[ctxt]->pkeys[index];
1531 static void init_ibport(struct qib_pportdata *ppd)
1533 struct qib_verbs_counters cntrs;
1534 struct qib_ibport *ibp = &ppd->ibport_data;
1536 spin_lock_init(&ibp->rvp.lock);
1537 /* Set the prefix to the default value (see ch. 4.1.1) */
1538 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1539 ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
1540 ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
1541 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
1542 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
1543 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
1544 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
1545 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
1546 ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
1547 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1548 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1549 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1550 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1551 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1553 /* Snapshot current HW counters to "clear" them. */
1554 qib_get_counters(ppd, &cntrs);
1555 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
1556 ibp->z_link_error_recovery_counter =
1557 cntrs.link_error_recovery_counter;
1558 ibp->z_link_downed_counter = cntrs.link_downed_counter;
1559 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
1560 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
1561 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
1562 ibp->z_port_xmit_data = cntrs.port_xmit_data;
1563 ibp->z_port_rcv_data = cntrs.port_rcv_data;
1564 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
1565 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
1566 ibp->z_local_link_integrity_errors =
1567 cntrs.local_link_integrity_errors;
1568 ibp->z_excessive_buffer_overrun_errors =
1569 cntrs.excessive_buffer_overrun_errors;
1570 ibp->z_vl15_dropped = cntrs.vl15_dropped;
1571 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1572 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1575 static int qib_port_immutable(struct ib_device *ibdev, u8 port_num,
1576 struct ib_port_immutable *immutable)
1578 struct ib_port_attr attr;
1581 err = qib_query_port(ibdev, port_num, &attr);
1585 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1586 immutable->gid_tbl_len = attr.gid_tbl_len;
1587 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1588 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1594 * qib_fill_device_attr - Fill in rvt dev info device attributes.
1595 * @dd: the device data structure
1597 static void qib_fill_device_attr(struct qib_devdata *dd)
1599 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1601 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1603 rdi->dparms.props.max_pd = ib_qib_max_pds;
1604 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1605 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1606 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1607 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1608 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1609 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1610 rdi->dparms.props.vendor_id =
1611 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1612 rdi->dparms.props.vendor_part_id = dd->deviceid;
1613 rdi->dparms.props.hw_ver = dd->minrev;
1614 rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
1615 rdi->dparms.props.max_mr_size = ~0ULL;
1616 rdi->dparms.props.max_qp = ib_qib_max_qps;
1617 rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
1618 rdi->dparms.props.max_sge = ib_qib_max_sges;
1619 rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
1620 rdi->dparms.props.max_cq = ib_qib_max_cqs;
1621 rdi->dparms.props.max_cqe = ib_qib_max_cqes;
1622 rdi->dparms.props.max_ah = ib_qib_max_ahs;
1623 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1624 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1625 rdi->dparms.props.max_map_per_fmr = 32767;
1626 rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1627 rdi->dparms.props.max_qp_init_rd_atom = 255;
1628 rdi->dparms.props.max_srq = ib_qib_max_srqs;
1629 rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
1630 rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
1631 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1632 rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
1633 rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
1634 rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1635 rdi->dparms.props.max_total_mcast_qp_attach =
1636 rdi->dparms.props.max_mcast_qp_attach *
1637 rdi->dparms.props.max_mcast_grp;
1641 * qib_register_ib_device - register our device with the infiniband core
1642 * @dd: the device data structure
1643 * Return the allocated qib_ibdev pointer or NULL on error.
1645 int qib_register_ib_device(struct qib_devdata *dd)
1647 struct qib_ibdev *dev = &dd->verbs_dev;
1648 struct ib_device *ibdev = &dev->rdi.ibdev;
1649 struct qib_pportdata *ppd = dd->pport;
1653 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
1654 for (i = 0; i < dd->num_pports; i++)
1655 init_ibport(ppd + i);
1657 /* Only need to initialize non-zero fields. */
1658 spin_lock_init(&dev->n_qps_lock);
1659 init_timer(&dev->mem_timer);
1660 dev->mem_timer.function = mem_timer;
1661 dev->mem_timer.data = (unsigned long) dev;
1663 qpt_mask = dd->qpn_mask;
1665 INIT_LIST_HEAD(&dev->piowait);
1666 INIT_LIST_HEAD(&dev->dmawait);
1667 INIT_LIST_HEAD(&dev->txwait);
1668 INIT_LIST_HEAD(&dev->memwait);
1669 INIT_LIST_HEAD(&dev->txreq_free);
1671 if (ppd->sdma_descq_cnt) {
1672 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
1673 ppd->sdma_descq_cnt *
1674 sizeof(struct qib_pio_header),
1675 &dev->pio_hdrs_phys,
1677 if (!dev->pio_hdrs) {
1683 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
1684 struct qib_verbs_txreq *tx;
1686 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
1692 list_add(&tx->txreq.list, &dev->txreq_free);
1696 * The system image GUID is supposed to be the same for all
1697 * IB HCAs in a single system but since there can be other
1698 * device types in the system, we can't be sure this is unique.
1700 if (!ib_qib_sys_image_guid)
1701 ib_qib_sys_image_guid = ppd->guid;
1703 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
1704 ibdev->owner = THIS_MODULE;
1705 ibdev->node_guid = ppd->guid;
1706 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
1707 ibdev->uverbs_cmd_mask =
1708 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1709 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1710 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1711 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1712 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1713 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
1714 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
1715 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
1716 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
1717 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1718 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1719 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1720 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1721 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1722 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1723 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1724 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1725 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1726 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1727 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1728 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1729 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1730 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
1731 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1732 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1733 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1734 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1735 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1736 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1737 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
1738 ibdev->node_type = RDMA_NODE_IB_CA;
1739 ibdev->phys_port_cnt = dd->num_pports;
1740 ibdev->num_comp_vectors = 1;
1741 ibdev->dma_device = &dd->pcidev->dev;
1742 ibdev->query_device = NULL;
1743 ibdev->modify_device = qib_modify_device;
1744 ibdev->query_port = qib_query_port;
1745 ibdev->modify_port = qib_modify_port;
1746 ibdev->query_pkey = NULL;
1747 ibdev->query_gid = qib_query_gid;
1748 ibdev->alloc_ucontext = NULL;
1749 ibdev->dealloc_ucontext = NULL;
1750 ibdev->alloc_pd = NULL;
1751 ibdev->dealloc_pd = NULL;
1752 ibdev->create_ah = NULL;
1753 ibdev->destroy_ah = NULL;
1754 ibdev->modify_ah = NULL;
1755 ibdev->query_ah = NULL;
1756 ibdev->create_qp = NULL;
1757 ibdev->modify_qp = qib_modify_qp;
1758 ibdev->query_qp = NULL;
1759 ibdev->destroy_qp = qib_destroy_qp;
1760 ibdev->post_send = NULL;
1761 ibdev->post_recv = NULL;
1762 ibdev->create_cq = NULL;
1763 ibdev->destroy_cq = NULL;
1764 ibdev->resize_cq = NULL;
1765 ibdev->poll_cq = NULL;
1766 ibdev->req_notify_cq = NULL;
1767 ibdev->get_dma_mr = NULL;
1768 ibdev->reg_user_mr = NULL;
1769 ibdev->dereg_mr = NULL;
1770 ibdev->alloc_mr = NULL;
1771 ibdev->map_mr_sg = NULL;
1772 ibdev->alloc_fmr = NULL;
1773 ibdev->map_phys_fmr = NULL;
1774 ibdev->unmap_fmr = NULL;
1775 ibdev->dealloc_fmr = NULL;
1776 ibdev->attach_mcast = NULL;
1777 ibdev->detach_mcast = NULL;
1778 ibdev->process_mad = qib_process_mad;
1780 ibdev->dma_ops = NULL;
1781 ibdev->get_port_immutable = qib_port_immutable;
1783 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
1784 "Intel Infiniband HCA %s", init_utsname()->nodename);
1787 * Fill in rvt info object.
1789 dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
1790 dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
1791 dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
1792 dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
1793 dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
1794 dd->verbs_dev.rdi.driver_f.alloc_qpn = alloc_qpn;
1795 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1796 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1797 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
1798 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1799 dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
1800 dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
1802 dd->verbs_dev.rdi.flags = 0;
1804 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
1805 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
1806 dd->verbs_dev.rdi.dparms.qpn_start = 1;
1807 dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
1808 dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
1809 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1810 dd->verbs_dev.rdi.dparms.qos_shift = 1;
1811 dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
1812 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1813 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
1814 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
1815 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1816 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1817 "qib_cq%d", dd->unit);
1819 qib_fill_device_attr(dd);
1822 for (i = 0; i < dd->num_pports; i++, ppd++) {
1823 ctxt = ppd->hw_pidx;
1824 rvt_init_port(&dd->verbs_dev.rdi,
1825 &ppd->ibport_data.rvp,
1827 dd->rcd[ctxt]->pkeys);
1830 ret = rvt_register_device(&dd->verbs_dev.rdi);
1834 ret = qib_verbs_register_sysfs(dd);
1841 rvt_unregister_device(&dd->verbs_dev.rdi);
1843 while (!list_empty(&dev->txreq_free)) {
1844 struct list_head *l = dev->txreq_free.next;
1845 struct qib_verbs_txreq *tx;
1848 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1851 if (ppd->sdma_descq_cnt)
1852 dma_free_coherent(&dd->pcidev->dev,
1853 ppd->sdma_descq_cnt *
1854 sizeof(struct qib_pio_header),
1855 dev->pio_hdrs, dev->pio_hdrs_phys);
1857 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1861 void qib_unregister_ib_device(struct qib_devdata *dd)
1863 struct qib_ibdev *dev = &dd->verbs_dev;
1865 qib_verbs_unregister_sysfs(dd);
1867 rvt_unregister_device(&dd->verbs_dev.rdi);
1869 if (!list_empty(&dev->piowait))
1870 qib_dev_err(dd, "piowait list not empty!\n");
1871 if (!list_empty(&dev->dmawait))
1872 qib_dev_err(dd, "dmawait list not empty!\n");
1873 if (!list_empty(&dev->txwait))
1874 qib_dev_err(dd, "txwait list not empty!\n");
1875 if (!list_empty(&dev->memwait))
1876 qib_dev_err(dd, "memwait list not empty!\n");
1878 del_timer_sync(&dev->mem_timer);
1879 while (!list_empty(&dev->txreq_free)) {
1880 struct list_head *l = dev->txreq_free.next;
1881 struct qib_verbs_txreq *tx;
1884 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
1887 if (dd->pport->sdma_descq_cnt)
1888 dma_free_coherent(&dd->pcidev->dev,
1889 dd->pport->sdma_descq_cnt *
1890 sizeof(struct qib_pio_header),
1891 dev->pio_hdrs, dev->pio_hdrs_phys);
1895 * This must be called with s_lock held.
1897 void qib_schedule_send(struct rvt_qp *qp)
1899 struct qib_qp_priv *priv = qp->priv;
1900 if (qib_send_ok(qp)) {
1901 struct qib_ibport *ibp =
1902 to_iport(qp->ibqp.device, qp->port_num);
1903 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1905 queue_work(ppd->qib_wq, &priv->s_work);