2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
56 #include <rdma/uverbs_ioctl.h>
60 #include "iwch_provider.h"
62 #include <rdma/cxgb3-abi.h>
65 static void iwch_dealloc_ucontext(struct ib_ucontext *context)
67 struct iwch_dev *rhp = to_iwch_dev(context->device);
68 struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
69 struct iwch_mm_entry *mm, *tmp;
71 pr_debug("%s context %p\n", __func__, context);
72 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
74 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
77 static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
78 struct ib_udata *udata)
80 struct ib_device *ibdev = ucontext->device;
81 struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
82 struct iwch_dev *rhp = to_iwch_dev(ibdev);
84 pr_debug("%s ibdev %p\n", __func__, ibdev);
85 cxio_init_ucontext(&rhp->rdev, &context->uctx);
86 INIT_LIST_HEAD(&context->mmaps);
87 spin_lock_init(&context->mmap_lock);
91 static int iwch_destroy_cq(struct ib_cq *ib_cq)
95 pr_debug("%s ib_cq %p\n", __func__, ib_cq);
96 chp = to_iwch_cq(ib_cq);
98 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
99 atomic_dec(&chp->refcnt);
100 wait_event(chp->wait, !atomic_read(&chp->refcnt));
102 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
107 static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
108 const struct ib_cq_init_attr *attr,
109 struct ib_ucontext *ib_context,
110 struct ib_udata *udata)
112 int entries = attr->cqe;
113 struct iwch_dev *rhp;
115 struct iwch_create_cq_resp uresp;
116 struct iwch_create_cq_req ureq;
117 struct iwch_ucontext *ucontext = NULL;
121 pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
123 return ERR_PTR(-EINVAL);
125 rhp = to_iwch_dev(ibdev);
126 chp = kzalloc(sizeof(*chp), GFP_KERNEL);
128 return ERR_PTR(-ENOMEM);
131 ucontext = to_iwch_ucontext(ib_context);
132 if (!t3a_device(rhp)) {
133 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
135 return ERR_PTR(-EFAULT);
137 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
141 if (t3a_device(rhp)) {
144 * T3A: Add some fluff to handle extra CQEs inserted
145 * for various errors.
146 * Additional CQE possibilities:
148 * incoming RDMA WRITE Failures
149 * incoming RDMA READ REQUEST FAILUREs
150 * NOTE: We cannot ensure the CQ won't overflow.
154 entries = roundup_pow_of_two(entries);
155 chp->cq.size_log2 = ilog2(entries);
157 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
159 return ERR_PTR(-ENOMEM);
162 chp->ibcq.cqe = 1 << chp->cq.size_log2;
163 spin_lock_init(&chp->lock);
164 spin_lock_init(&chp->comp_handler_lock);
165 atomic_set(&chp->refcnt, 1);
166 init_waitqueue_head(&chp->wait);
167 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
168 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
170 return ERR_PTR(-ENOMEM);
174 struct iwch_mm_entry *mm;
176 mm = kmalloc(sizeof *mm, GFP_KERNEL);
178 iwch_destroy_cq(&chp->ibcq);
179 return ERR_PTR(-ENOMEM);
181 uresp.cqid = chp->cq.cqid;
182 uresp.size_log2 = chp->cq.size_log2;
183 spin_lock(&ucontext->mmap_lock);
184 uresp.key = ucontext->key;
185 ucontext->key += PAGE_SIZE;
186 spin_unlock(&ucontext->mmap_lock);
188 mm->addr = virt_to_phys(chp->cq.queue);
189 if (udata->outlen < sizeof uresp) {
191 pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
192 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
193 sizeof(struct t3_cqe));
194 resplen = sizeof(struct iwch_create_cq_resp_v0);
196 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
197 sizeof(struct t3_cqe));
198 uresp.memsize = mm->len;
200 resplen = sizeof uresp;
202 if (ib_copy_to_udata(udata, &uresp, resplen)) {
204 iwch_destroy_cq(&chp->ibcq);
205 return ERR_PTR(-EFAULT);
207 insert_mmap(ucontext, mm);
209 pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
210 chp->cq.cqid, chp, (1 << chp->cq.size_log2),
211 (unsigned long long)chp->cq.dma_addr);
215 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
218 struct iwch_cq *chp = to_iwch_cq(cq);
219 struct t3_cq oldcq, newcq;
222 pr_debug("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
224 /* We don't downsize... */
228 /* create new t3_cq with new size */
229 cqe = roundup_pow_of_two(cqe+1);
230 newcq.size_log2 = ilog2(cqe);
232 /* Dont allow resize to less than the current wce count */
233 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
237 /* Quiesce all QPs using this CQ */
238 ret = iwch_quiesce_qps(chp);
243 ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
249 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
250 sizeof(struct t3_cqe));
252 /* old iwch_qp gets new t3_cq but keeps old cqid */
255 chp->cq.cqid = oldcq.cqid;
257 /* resize new t3_cq to update the HW context */
258 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
263 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;
265 /* destroy old t3_cq */
266 oldcq.cqid = newcq.cqid;
267 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
269 pr_err("%s - cxio_destroy_cq failed %d\n", __func__, ret);
272 /* add user hooks here */
275 ret = iwch_resume_qps(chp);
282 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
284 struct iwch_dev *rhp;
286 enum t3_cq_opcode cq_op;
291 chp = to_iwch_cq(ibcq);
293 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
297 if (chp->user_rptr_addr) {
298 if (get_user(rptr, chp->user_rptr_addr))
300 spin_lock_irqsave(&chp->lock, flag);
303 spin_lock_irqsave(&chp->lock, flag);
304 pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr);
305 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
306 spin_unlock_irqrestore(&chp->lock, flag);
308 pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid);
309 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
314 static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
316 int len = vma->vm_end - vma->vm_start;
317 u32 key = vma->vm_pgoff << PAGE_SHIFT;
318 struct cxio_rdev *rdev_p;
320 struct iwch_mm_entry *mm;
321 struct iwch_ucontext *ucontext;
324 pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
327 if (vma->vm_start & (PAGE_SIZE-1)) {
331 rdev_p = &(to_iwch_dev(context->device)->rdev);
332 ucontext = to_iwch_ucontext(context);
334 mm = remove_mmap(ucontext, key, len);
340 if ((addr >= rdev_p->rnic_info.udbell_physbase) &&
341 (addr < (rdev_p->rnic_info.udbell_physbase +
342 rdev_p->rnic_info.udbell_len))) {
345 * Map T3 DB register.
347 if (vma->vm_flags & VM_READ) {
351 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
352 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
353 vma->vm_flags &= ~VM_MAYREAD;
354 ret = io_remap_pfn_range(vma, vma->vm_start,
356 len, vma->vm_page_prot);
360 * Map WQ or CQ contig dma memory...
362 ret = remap_pfn_range(vma, vma->vm_start,
364 len, vma->vm_page_prot);
370 static void iwch_deallocate_pd(struct ib_pd *pd)
372 struct iwch_dev *rhp;
375 php = to_iwch_pd(pd);
377 pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
378 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
381 static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
382 struct ib_udata *udata)
384 struct iwch_pd *php = to_iwch_pd(pd);
385 struct ib_device *ibdev = pd->device;
387 struct iwch_dev *rhp;
389 pr_debug("%s ibdev %p\n", __func__, ibdev);
390 rhp = (struct iwch_dev *) ibdev;
391 pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
398 struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
400 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
401 iwch_deallocate_pd(&php->ibpd);
405 pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
409 static int iwch_dereg_mr(struct ib_mr *ib_mr)
411 struct iwch_dev *rhp;
415 pr_debug("%s ib_mr %p\n", __func__, ib_mr);
417 mhp = to_iwch_mr(ib_mr);
420 mmid = mhp->attr.stag >> 8;
421 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
424 remove_handle(rhp, &rhp->mmidr, mmid);
426 kfree((void *) (unsigned long) mhp->kva);
428 ib_umem_release(mhp->umem);
429 pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
434 static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
436 const u64 total_size = 0xffffffff;
437 const u64 mask = (total_size + PAGE_SIZE - 1) & PAGE_MASK;
438 struct iwch_pd *php = to_iwch_pd(pd);
439 struct iwch_dev *rhp = php->rhp;
442 int shift = 26, npages, ret, i;
444 pr_debug("%s ib_pd %p\n", __func__, pd);
447 * T3 only supports 32 bits of size.
449 if (sizeof(phys_addr_t) > 4) {
450 pr_warn_once("Cannot support dma_mrs on this platform\n");
451 return ERR_PTR(-ENOTSUPP);
454 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
456 return ERR_PTR(-ENOMEM);
460 npages = (total_size + (1ULL << shift) - 1) >> shift;
466 page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL);
472 for (i = 0; i < npages; i++)
473 page_list[i] = cpu_to_be64((u64)i << shift);
475 pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
476 __func__, mask, shift, total_size, npages);
478 ret = iwch_alloc_pbl(mhp, npages);
484 ret = iwch_write_pbl(mhp, page_list, npages, 0);
489 mhp->attr.pdid = php->pdid;
492 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
493 mhp->attr.va_fbo = 0;
494 mhp->attr.page_size = shift - 12;
496 mhp->attr.len = (u32) total_size;
497 mhp->attr.pbl_size = npages;
498 ret = iwch_register_mem(rhp, php, mhp, shift);
512 static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
513 u64 virt, int acc, struct ib_udata *udata)
518 struct iwch_dev *rhp;
521 struct iwch_reg_user_mr_resp uresp;
522 struct sg_dma_page_iter sg_iter;
523 pr_debug("%s ib_pd %p\n", __func__, pd);
525 php = to_iwch_pd(pd);
527 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
529 return ERR_PTR(-ENOMEM);
533 mhp->umem = ib_umem_get(udata, start, length, acc, 0);
534 if (IS_ERR(mhp->umem)) {
535 err = PTR_ERR(mhp->umem);
544 err = iwch_alloc_pbl(mhp, n);
548 pages = (__be64 *) __get_free_page(GFP_KERNEL);
556 for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
557 pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
558 if (i == PAGE_SIZE / sizeof *pages) {
559 err = iwch_write_pbl(mhp, pages, i, n);
568 err = iwch_write_pbl(mhp, pages, i, n);
571 free_page((unsigned long) pages);
575 mhp->attr.pdid = php->pdid;
577 mhp->attr.perms = iwch_ib_to_tpt_access(acc);
578 mhp->attr.va_fbo = virt;
579 mhp->attr.page_size = shift - 12;
580 mhp->attr.len = (u32) length;
582 err = iwch_register_mem(rhp, php, mhp, shift);
586 if (udata && !t3a_device(rhp)) {
587 uresp.pbl_addr = (mhp->attr.pbl_addr -
588 rhp->rdev.rnic_info.pbl_base) >> 3;
589 pr_debug("%s user resp pbl_addr 0x%x\n", __func__,
592 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
593 iwch_dereg_mr(&mhp->ibmr);
605 ib_umem_release(mhp->umem);
610 static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
611 struct ib_udata *udata)
613 struct iwch_dev *rhp;
620 if (type != IB_MW_TYPE_1)
621 return ERR_PTR(-EINVAL);
623 php = to_iwch_pd(pd);
625 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
627 return ERR_PTR(-ENOMEM);
628 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid);
634 mhp->attr.pdid = php->pdid;
635 mhp->attr.type = TPT_MW;
636 mhp->attr.stag = stag;
638 mhp->ibmw.rkey = stag;
639 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
640 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
642 return ERR_PTR(-ENOMEM);
644 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
648 static int iwch_dealloc_mw(struct ib_mw *mw)
650 struct iwch_dev *rhp;
654 mhp = to_iwch_mw(mw);
656 mmid = (mw->rkey) >> 8;
657 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
658 remove_handle(rhp, &rhp->mmidr, mmid);
659 pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
664 static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
665 enum ib_mr_type mr_type,
668 struct iwch_dev *rhp;
675 if (mr_type != IB_MR_TYPE_MEM_REG ||
676 max_num_sg > T3_MAX_FASTREG_DEPTH)
677 return ERR_PTR(-EINVAL);
679 php = to_iwch_pd(pd);
681 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
685 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
690 ret = iwch_alloc_pbl(mhp, max_num_sg);
693 mhp->attr.pbl_size = max_num_sg;
694 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
695 mhp->attr.pbl_size, mhp->attr.pbl_addr);
698 mhp->attr.pdid = php->pdid;
699 mhp->attr.type = TPT_NON_SHARED_MR;
700 mhp->attr.stag = stag;
703 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
704 ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
708 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
711 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
723 static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
725 struct iwch_mr *mhp = to_iwch_mr(ibmr);
727 if (unlikely(mhp->npages == mhp->attr.pbl_size))
730 mhp->pages[mhp->npages++] = addr;
735 static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
736 int sg_nents, unsigned int *sg_offset)
738 struct iwch_mr *mhp = to_iwch_mr(ibmr);
742 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
745 static int iwch_destroy_qp(struct ib_qp *ib_qp)
747 struct iwch_dev *rhp;
749 struct iwch_qp_attributes attrs;
750 struct iwch_ucontext *ucontext;
752 qhp = to_iwch_qp(ib_qp);
755 attrs.next_state = IWCH_QP_STATE_ERROR;
756 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0);
757 wait_event(qhp->wait, !qhp->ep);
759 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid);
761 atomic_dec(&qhp->refcnt);
762 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
764 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context)
766 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
767 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
769 pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
770 ib_qp, qhp->wq.qpid, qhp);
775 static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
776 struct ib_qp_init_attr *attrs,
777 struct ib_udata *udata)
779 struct iwch_dev *rhp;
782 struct iwch_cq *schp;
783 struct iwch_cq *rchp;
784 struct iwch_create_qp_resp uresp;
785 int wqsize, sqsize, rqsize;
786 struct iwch_ucontext *ucontext;
788 pr_debug("%s ib_pd %p\n", __func__, pd);
789 if (attrs->qp_type != IB_QPT_RC)
790 return ERR_PTR(-EINVAL);
791 php = to_iwch_pd(pd);
793 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid);
794 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid);
796 return ERR_PTR(-EINVAL);
798 /* The RQT size must be # of entries + 1 rounded up to a power of two */
799 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr);
800 if (rqsize == attrs->cap.max_recv_wr)
801 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1);
803 /* T3 doesn't support RQT depth < 16 */
807 if (rqsize > T3_MAX_RQ_SIZE)
808 return ERR_PTR(-EINVAL);
810 if (attrs->cap.max_inline_data > T3_MAX_INLINE)
811 return ERR_PTR(-EINVAL);
814 * NOTE: The SQ and total WQ sizes don't need to be
815 * a power of two. However, all the code assumes
816 * they are. EG: Q_FREECNT() and friends.
818 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
819 wqsize = roundup_pow_of_two(rqsize + sqsize);
822 * Kernel users need more wq space for fastreg WRs which can take
825 ucontext = rdma_udata_to_drv_context(udata, struct iwch_ucontext,
827 if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
828 wqsize = roundup_pow_of_two(rqsize +
829 roundup_pow_of_two(attrs->cap.max_send_wr * 2));
830 pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__,
831 wqsize, sqsize, rqsize);
832 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
834 return ERR_PTR(-ENOMEM);
835 qhp->wq.size_log2 = ilog2(wqsize);
836 qhp->wq.rq_size_log2 = ilog2(rqsize);
837 qhp->wq.sq_size_log2 = ilog2(sqsize);
838 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq,
839 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) {
841 return ERR_PTR(-ENOMEM);
844 attrs->cap.max_recv_wr = rqsize - 1;
845 attrs->cap.max_send_wr = sqsize;
846 attrs->cap.max_inline_data = T3_MAX_INLINE;
849 qhp->attr.pd = php->pdid;
850 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid;
851 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid;
852 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
853 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
854 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
855 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
856 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
857 qhp->attr.state = IWCH_QP_STATE_IDLE;
858 qhp->attr.next_state = IWCH_QP_STATE_IDLE;
861 * XXX - These don't get passed in from the openib user
862 * at create time. The CM sets them via a QP modify.
863 * Need to fix... I think the CM should
865 qhp->attr.enable_rdma_read = 1;
866 qhp->attr.enable_rdma_write = 1;
867 qhp->attr.enable_bind = 1;
868 qhp->attr.max_ord = 1;
869 qhp->attr.max_ird = 1;
871 spin_lock_init(&qhp->lock);
872 init_waitqueue_head(&qhp->wait);
873 atomic_set(&qhp->refcnt, 1);
875 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
876 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
877 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
879 return ERR_PTR(-ENOMEM);
884 struct iwch_mm_entry *mm1, *mm2;
886 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
888 iwch_destroy_qp(&qhp->ibqp);
889 return ERR_PTR(-ENOMEM);
892 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
895 iwch_destroy_qp(&qhp->ibqp);
896 return ERR_PTR(-ENOMEM);
899 uresp.qpid = qhp->wq.qpid;
900 uresp.size_log2 = qhp->wq.size_log2;
901 uresp.sq_size_log2 = qhp->wq.sq_size_log2;
902 uresp.rq_size_log2 = qhp->wq.rq_size_log2;
903 spin_lock(&ucontext->mmap_lock);
904 uresp.key = ucontext->key;
905 ucontext->key += PAGE_SIZE;
906 uresp.db_key = ucontext->key;
907 ucontext->key += PAGE_SIZE;
908 spin_unlock(&ucontext->mmap_lock);
909 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
912 iwch_destroy_qp(&qhp->ibqp);
913 return ERR_PTR(-EFAULT);
915 mm1->key = uresp.key;
916 mm1->addr = virt_to_phys(qhp->wq.queue);
917 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr));
918 insert_mmap(ucontext, mm1);
919 mm2->key = uresp.db_key;
920 mm2->addr = qhp->wq.udb & PAGE_MASK;
921 mm2->len = PAGE_SIZE;
922 insert_mmap(ucontext, mm2);
924 qhp->ibqp.qp_num = qhp->wq.qpid;
925 pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
926 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
927 qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
928 1 << qhp->wq.size_log2, qhp->wq.rq_addr);
932 static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
933 int attr_mask, struct ib_udata *udata)
935 struct iwch_dev *rhp;
937 enum iwch_qp_attr_mask mask = 0;
938 struct iwch_qp_attributes attrs;
940 pr_debug("%s ib_qp %p\n", __func__, ibqp);
942 /* iwarp does not support the RTR state */
943 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
944 attr_mask &= ~IB_QP_STATE;
946 /* Make sure we still have something left to do */
950 memset(&attrs, 0, sizeof attrs);
951 qhp = to_iwch_qp(ibqp);
954 attrs.next_state = iwch_convert_state(attr->qp_state);
955 attrs.enable_rdma_read = (attr->qp_access_flags &
956 IB_ACCESS_REMOTE_READ) ? 1 : 0;
957 attrs.enable_rdma_write = (attr->qp_access_flags &
958 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
959 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
962 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0;
963 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
964 (IWCH_QP_ATTR_ENABLE_RDMA_READ |
965 IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
966 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0;
968 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0);
971 void iwch_qp_add_ref(struct ib_qp *qp)
973 pr_debug("%s ib_qp %p\n", __func__, qp);
974 atomic_inc(&(to_iwch_qp(qp)->refcnt));
977 void iwch_qp_rem_ref(struct ib_qp *qp)
979 pr_debug("%s ib_qp %p\n", __func__, qp);
980 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
981 wake_up(&(to_iwch_qp(qp)->wait));
984 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
986 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
987 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
991 static int iwch_query_pkey(struct ib_device *ibdev,
992 u8 port, u16 index, u16 * pkey)
994 pr_debug("%s ibdev %p\n", __func__, ibdev);
999 static int iwch_query_gid(struct ib_device *ibdev, u8 port,
1000 int index, union ib_gid *gid)
1002 struct iwch_dev *dev;
1004 pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
1005 __func__, ibdev, port, index, gid);
1006 dev = to_iwch_dev(ibdev);
1007 BUG_ON(port == 0 || port > 2);
1008 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
1009 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6);
1013 static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1015 struct ethtool_drvinfo info;
1016 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1018 unsigned fw_maj, fw_min, fw_mic;
1020 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1022 next = info.fw_version + 1;
1023 cp = strsep(&next, ".");
1024 sscanf(cp, "%i", &fw_maj);
1025 cp = strsep(&next, ".");
1026 sscanf(cp, "%i", &fw_min);
1027 cp = strsep(&next, ".");
1028 sscanf(cp, "%i", &fw_mic);
1030 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) |
1034 static int iwch_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
1035 struct ib_udata *uhw)
1038 struct iwch_dev *dev;
1040 pr_debug("%s ibdev %p\n", __func__, ibdev);
1042 if (uhw->inlen || uhw->outlen)
1045 dev = to_iwch_dev(ibdev);
1046 memset(props, 0, sizeof *props);
1047 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1048 props->hw_ver = dev->rdev.t3cdev_p->type;
1049 props->fw_ver = fw_vers_string_to_u64(dev);
1050 props->device_cap_flags = dev->device_cap_flags;
1051 props->page_size_cap = dev->attr.mem_pgsizes_bitmask;
1052 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
1053 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
1054 props->max_mr_size = dev->attr.max_mr_size;
1055 props->max_qp = dev->attr.max_qps;
1056 props->max_qp_wr = dev->attr.max_wrs;
1057 props->max_send_sge = dev->attr.max_sge_per_wr;
1058 props->max_recv_sge = dev->attr.max_sge_per_wr;
1059 props->max_sge_rd = 1;
1060 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
1061 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
1062 props->max_cq = dev->attr.max_cqs;
1063 props->max_cqe = dev->attr.max_cqes_per_cq;
1064 props->max_mr = dev->attr.max_mem_regs;
1065 props->max_pd = dev->attr.max_pds;
1066 props->local_ca_ack_delay = 0;
1067 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH;
1072 static int iwch_query_port(struct ib_device *ibdev,
1073 u8 port, struct ib_port_attr *props)
1075 struct iwch_dev *dev;
1076 struct net_device *netdev;
1077 struct in_device *inetdev;
1079 pr_debug("%s ibdev %p\n", __func__, ibdev);
1081 dev = to_iwch_dev(ibdev);
1082 netdev = dev->rdev.port_info.lldevs[port-1];
1084 /* props being zeroed by the caller, avoid zeroing it here */
1085 props->max_mtu = IB_MTU_4096;
1086 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
1088 if (!netif_carrier_ok(netdev))
1089 props->state = IB_PORT_DOWN;
1091 inetdev = in_dev_get(netdev);
1093 if (inetdev->ifa_list)
1094 props->state = IB_PORT_ACTIVE;
1096 props->state = IB_PORT_INIT;
1097 in_dev_put(inetdev);
1099 props->state = IB_PORT_INIT;
1102 props->port_cap_flags =
1104 IB_PORT_SNMP_TUNNEL_SUP |
1105 IB_PORT_REINIT_SUP |
1106 IB_PORT_DEVICE_MGMT_SUP |
1107 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1108 props->gid_tbl_len = 1;
1109 props->pkey_tbl_len = 1;
1110 props->active_width = 2;
1111 props->active_speed = IB_SPEED_DDR;
1112 props->max_msg_sz = -1;
1117 static ssize_t hw_rev_show(struct device *dev,
1118 struct device_attribute *attr, char *buf)
1120 struct iwch_dev *iwch_dev =
1121 rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
1123 pr_debug("%s dev 0x%p\n", __func__, dev);
1124 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
1126 static DEVICE_ATTR_RO(hw_rev);
1128 static ssize_t hca_type_show(struct device *dev,
1129 struct device_attribute *attr, char *buf)
1131 struct iwch_dev *iwch_dev =
1132 rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
1133 struct ethtool_drvinfo info;
1134 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1136 pr_debug("%s dev 0x%p\n", __func__, dev);
1137 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1138 return sprintf(buf, "%s\n", info.driver);
1140 static DEVICE_ATTR_RO(hca_type);
1142 static ssize_t board_id_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
1145 struct iwch_dev *iwch_dev =
1146 rdma_device_to_drv_device(dev, struct iwch_dev, ibdev);
1148 pr_debug("%s dev 0x%p\n", __func__, dev);
1149 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
1150 iwch_dev->rdev.rnic_info.pdev->device);
1152 static DEVICE_ATTR_RO(board_id);
1183 static const char * const names[] = {
1184 [IPINRECEIVES] = "ipInReceives",
1185 [IPINHDRERRORS] = "ipInHdrErrors",
1186 [IPINADDRERRORS] = "ipInAddrErrors",
1187 [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
1188 [IPINDISCARDS] = "ipInDiscards",
1189 [IPINDELIVERS] = "ipInDelivers",
1190 [IPOUTREQUESTS] = "ipOutRequests",
1191 [IPOUTDISCARDS] = "ipOutDiscards",
1192 [IPOUTNOROUTES] = "ipOutNoRoutes",
1193 [IPREASMTIMEOUT] = "ipReasmTimeout",
1194 [IPREASMREQDS] = "ipReasmReqds",
1195 [IPREASMOKS] = "ipReasmOKs",
1196 [IPREASMFAILS] = "ipReasmFails",
1197 [TCPACTIVEOPENS] = "tcpActiveOpens",
1198 [TCPPASSIVEOPENS] = "tcpPassiveOpens",
1199 [TCPATTEMPTFAILS] = "tcpAttemptFails",
1200 [TCPESTABRESETS] = "tcpEstabResets",
1201 [TCPCURRESTAB] = "tcpCurrEstab",
1202 [TCPINSEGS] = "tcpInSegs",
1203 [TCPOUTSEGS] = "tcpOutSegs",
1204 [TCPRETRANSSEGS] = "tcpRetransSegs",
1205 [TCPINERRS] = "tcpInErrs",
1206 [TCPOUTRSTS] = "tcpOutRsts",
1207 [TCPRTOMIN] = "tcpRtoMin",
1208 [TCPRTOMAX] = "tcpRtoMax",
1211 static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
1214 BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
1216 /* Our driver only supports device level stats */
1220 return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
1221 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1224 static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1227 struct iwch_dev *dev;
1228 struct tp_mib_stats m;
1231 if (port != 0 || !stats)
1234 pr_debug("%s ibdev %p\n", __func__, ibdev);
1235 dev = to_iwch_dev(ibdev);
1236 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
1240 stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
1241 stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
1242 stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
1243 stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
1244 stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
1245 stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
1246 stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
1247 stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
1248 stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
1249 stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
1250 stats->value[IPREASMREQDS] = m.ipReasmReqds;
1251 stats->value[IPREASMOKS] = m.ipReasmOKs;
1252 stats->value[IPREASMFAILS] = m.ipReasmFails;
1253 stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
1254 stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
1255 stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
1256 stats->value[TCPESTABRESETS] = m.tcpEstabResets;
1257 stats->value[TCPCURRESTAB] = m.tcpOutRsts;
1258 stats->value[TCPINSEGS] = m.tcpCurrEstab;
1259 stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
1260 stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
1261 stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
1262 stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
1263 stats->value[TCPRTOMIN] = m.tcpRtoMin;
1264 stats->value[TCPRTOMAX] = m.tcpRtoMax;
1266 return stats->num_counters;
1269 static struct attribute *iwch_class_attributes[] = {
1270 &dev_attr_hw_rev.attr,
1271 &dev_attr_hca_type.attr,
1272 &dev_attr_board_id.attr,
1276 static const struct attribute_group iwch_attr_group = {
1277 .attrs = iwch_class_attributes,
1280 static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
1281 struct ib_port_immutable *immutable)
1283 struct ib_port_attr attr;
1286 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
1288 err = ib_query_port(ibdev, port_num, &attr);
1292 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1293 immutable->gid_tbl_len = attr.gid_tbl_len;
1298 static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
1300 struct iwch_dev *iwch_dev = to_iwch_dev(ibdev);
1301 struct ethtool_drvinfo info;
1302 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1304 pr_debug("%s dev 0x%p\n", __func__, iwch_dev);
1305 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1306 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
1309 static const struct ib_device_ops iwch_dev_ops = {
1310 .alloc_hw_stats = iwch_alloc_stats,
1311 .alloc_mr = iwch_alloc_mr,
1312 .alloc_mw = iwch_alloc_mw,
1313 .alloc_pd = iwch_allocate_pd,
1314 .alloc_ucontext = iwch_alloc_ucontext,
1315 .create_cq = iwch_create_cq,
1316 .create_qp = iwch_create_qp,
1317 .dealloc_mw = iwch_dealloc_mw,
1318 .dealloc_pd = iwch_deallocate_pd,
1319 .dealloc_ucontext = iwch_dealloc_ucontext,
1320 .dereg_mr = iwch_dereg_mr,
1321 .destroy_cq = iwch_destroy_cq,
1322 .destroy_qp = iwch_destroy_qp,
1323 .get_dev_fw_str = get_dev_fw_ver_str,
1324 .get_dma_mr = iwch_get_dma_mr,
1325 .get_hw_stats = iwch_get_mib,
1326 .get_port_immutable = iwch_port_immutable,
1327 .map_mr_sg = iwch_map_mr_sg,
1329 .modify_qp = iwch_ib_modify_qp,
1330 .poll_cq = iwch_poll_cq,
1331 .post_recv = iwch_post_receive,
1332 .post_send = iwch_post_send,
1333 .query_device = iwch_query_device,
1334 .query_gid = iwch_query_gid,
1335 .query_pkey = iwch_query_pkey,
1336 .query_port = iwch_query_port,
1337 .reg_user_mr = iwch_reg_user_mr,
1338 .req_notify_cq = iwch_arm_cq,
1339 .resize_cq = iwch_resize_cq,
1340 INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
1341 INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
1344 int iwch_register_device(struct iwch_dev *dev)
1348 pr_debug("%s iwch_dev %p\n", __func__, dev);
1349 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
1350 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
1351 dev->ibdev.owner = THIS_MODULE;
1352 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
1353 IB_DEVICE_MEM_WINDOW |
1354 IB_DEVICE_MEM_MGT_EXTENSIONS;
1356 /* cxgb3 supports STag 0. */
1357 dev->ibdev.local_dma_lkey = 0;
1359 dev->ibdev.uverbs_cmd_mask =
1360 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1361 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1362 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1363 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1364 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1365 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1366 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1367 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1368 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1369 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1370 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
1371 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1372 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1373 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
1374 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1375 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
1376 (1ull << IB_USER_VERBS_CMD_POST_RECV);
1377 dev->ibdev.node_type = RDMA_NODE_RNIC;
1378 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
1379 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
1380 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
1381 dev->ibdev.num_comp_vectors = 1;
1382 dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev;
1383 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
1385 dev->ibdev.iwcm = kzalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
1386 if (!dev->ibdev.iwcm)
1389 dev->ibdev.iwcm->connect = iwch_connect;
1390 dev->ibdev.iwcm->accept = iwch_accept_cr;
1391 dev->ibdev.iwcm->reject = iwch_reject_cr;
1392 dev->ibdev.iwcm->create_listen = iwch_create_listen;
1393 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen;
1394 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
1395 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
1396 dev->ibdev.iwcm->get_qp = iwch_get_qp;
1397 memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
1398 sizeof(dev->ibdev.iwcm->ifname));
1400 dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
1401 rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
1402 ib_set_device_ops(&dev->ibdev, &iwch_dev_ops);
1403 ret = ib_register_device(&dev->ibdev, "cxgb3_%d");
1405 kfree(dev->ibdev.iwcm);
1409 void iwch_unregister_device(struct iwch_dev *dev)
1411 pr_debug("%s iwch_dev %p\n", __func__, dev);
1412 ib_unregister_device(&dev->ibdev);
1413 kfree(dev->ibdev.iwcm);