2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_umem.h>
39 #include <rdma/ib_user_verbs.h>
40 #include <rdma/uverbs_ioctl.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/stat.h>
46 #include <linux/export.h>
48 #include "mthca_dev.h"
49 #include "mthca_cmd.h"
50 #include <rdma/mthca-abi.h>
51 #include "mthca_memfree.h"
53 static void init_query_mad(struct ib_smp *mad)
55 mad->base_version = 1;
56 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
57 mad->class_version = 1;
58 mad->method = IB_MGMT_METHOD_GET;
61 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
64 struct ib_smp *in_mad = NULL;
65 struct ib_smp *out_mad = NULL;
67 struct mthca_dev *mdev = to_mdev(ibdev);
69 if (uhw->inlen || uhw->outlen)
72 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
73 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
74 if (!in_mad || !out_mad)
77 memset(props, 0, sizeof *props);
79 props->fw_ver = mdev->fw_ver;
81 init_query_mad(in_mad);
82 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
84 err = mthca_MAD_IFC(mdev, 1, 1,
85 1, NULL, NULL, in_mad, out_mad);
89 props->device_cap_flags = mdev->device_cap_flags;
90 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
92 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
93 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
94 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
96 props->max_mr_size = ~0ull;
97 props->page_size_cap = mdev->limits.page_size_cap;
98 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
99 props->max_qp_wr = mdev->limits.max_wqes;
100 props->max_send_sge = mdev->limits.max_sg;
101 props->max_recv_sge = mdev->limits.max_sg;
102 props->max_sge_rd = mdev->limits.max_sg;
103 props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
104 props->max_cqe = mdev->limits.max_cqes;
105 props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
106 props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds;
107 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift;
108 props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
109 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
110 props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
111 props->max_srq_wr = mdev->limits.max_srq_wqes;
112 props->max_srq_sge = mdev->limits.max_srq_sge;
113 props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay;
114 props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
115 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
116 props->max_pkeys = mdev->limits.pkey_table_len;
117 props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms;
118 props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
119 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
120 props->max_mcast_grp;
129 static int mthca_query_port(struct ib_device *ibdev,
130 u8 port, struct ib_port_attr *props)
132 struct ib_smp *in_mad = NULL;
133 struct ib_smp *out_mad = NULL;
136 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
137 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
138 if (!in_mad || !out_mad)
141 /* props being zeroed by the caller, avoid zeroing it here */
143 init_query_mad(in_mad);
144 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
145 in_mad->attr_mod = cpu_to_be32(port);
147 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
148 port, NULL, NULL, in_mad, out_mad);
152 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
153 props->lmc = out_mad->data[34] & 0x7;
154 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
155 props->sm_sl = out_mad->data[36] & 0xf;
156 props->state = out_mad->data[32] & 0xf;
157 props->phys_state = out_mad->data[33] >> 4;
158 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
159 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len;
160 props->max_msg_sz = 0x80000000;
161 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len;
162 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
163 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
164 props->active_width = out_mad->data[31] & 0xf;
165 props->active_speed = out_mad->data[35] >> 4;
166 props->max_mtu = out_mad->data[41] & 0xf;
167 props->active_mtu = out_mad->data[36] >> 4;
168 props->subnet_timeout = out_mad->data[51] & 0x1f;
169 props->max_vl_num = out_mad->data[37] >> 4;
170 props->init_type_reply = out_mad->data[41] >> 4;
178 static int mthca_modify_device(struct ib_device *ibdev,
180 struct ib_device_modify *props)
182 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
185 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
186 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
188 memcpy(ibdev->node_desc, props->node_desc,
189 IB_DEVICE_NODE_DESC_MAX);
190 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
196 static int mthca_modify_port(struct ib_device *ibdev,
197 u8 port, int port_modify_mask,
198 struct ib_port_modify *props)
200 struct mthca_set_ib_param set_ib;
201 struct ib_port_attr attr;
204 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
207 err = ib_query_port(ibdev, port, &attr);
211 set_ib.set_si_guid = 0;
212 set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
214 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
215 ~props->clr_port_cap_mask;
217 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
221 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
225 static int mthca_query_pkey(struct ib_device *ibdev,
226 u8 port, u16 index, u16 *pkey)
228 struct ib_smp *in_mad = NULL;
229 struct ib_smp *out_mad = NULL;
232 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
233 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
234 if (!in_mad || !out_mad)
237 init_query_mad(in_mad);
238 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
239 in_mad->attr_mod = cpu_to_be32(index / 32);
241 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
242 port, NULL, NULL, in_mad, out_mad);
246 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
254 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
255 int index, union ib_gid *gid)
257 struct ib_smp *in_mad = NULL;
258 struct ib_smp *out_mad = NULL;
261 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
262 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
263 if (!in_mad || !out_mad)
266 init_query_mad(in_mad);
267 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
268 in_mad->attr_mod = cpu_to_be32(port);
270 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
271 port, NULL, NULL, in_mad, out_mad);
275 memcpy(gid->raw, out_mad->data + 8, 8);
277 init_query_mad(in_mad);
278 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
279 in_mad->attr_mod = cpu_to_be32(index / 8);
281 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
282 port, NULL, NULL, in_mad, out_mad);
286 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
294 static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
295 struct ib_udata *udata)
297 struct ib_device *ibdev = uctx->device;
298 struct mthca_alloc_ucontext_resp uresp = {};
299 struct mthca_ucontext *context = to_mucontext(uctx);
302 if (!(to_mdev(ibdev)->active))
305 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
306 if (mthca_is_memfree(to_mdev(ibdev)))
307 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
311 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
315 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
316 if (IS_ERR(context->db_tab)) {
317 err = PTR_ERR(context->db_tab);
318 mthca_uar_free(to_mdev(ibdev), &context->uar);
322 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
323 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
324 mthca_uar_free(to_mdev(ibdev), &context->uar);
328 context->reg_mr_warned = 0;
333 static void mthca_dealloc_ucontext(struct ib_ucontext *context)
335 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
336 to_mucontext(context)->db_tab);
337 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
340 static int mthca_mmap_uar(struct ib_ucontext *context,
341 struct vm_area_struct *vma)
343 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
346 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
348 if (io_remap_pfn_range(vma, vma->vm_start,
349 to_mucontext(context)->uar.pfn,
350 PAGE_SIZE, vma->vm_page_prot))
356 static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
358 struct ib_device *ibdev = ibpd->device;
359 struct mthca_pd *pd = to_mpd(ibpd);
362 err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
367 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
368 mthca_pd_free(to_mdev(ibdev), pd);
376 static int mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
378 mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
382 static int mthca_ah_create(struct ib_ah *ibah,
383 struct rdma_ah_init_attr *init_attr,
384 struct ib_udata *udata)
387 struct mthca_ah *ah = to_mah(ibah);
389 return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd),
390 init_attr->ah_attr, ah);
393 static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
395 mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
399 static int mthca_create_srq(struct ib_srq *ibsrq,
400 struct ib_srq_init_attr *init_attr,
401 struct ib_udata *udata)
403 struct mthca_create_srq ucmd;
404 struct mthca_ucontext *context = rdma_udata_to_drv_context(
405 udata, struct mthca_ucontext, ibucontext);
406 struct mthca_srq *srq = to_msrq(ibsrq);
409 if (init_attr->srq_type != IB_SRQT_BASIC)
413 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
416 err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
417 context->db_tab, ucmd.db_index,
423 srq->mr.ibmr.lkey = ucmd.lkey;
424 srq->db_index = ucmd.db_index;
427 err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
428 &init_attr->attr, srq, udata);
431 mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
432 context->db_tab, ucmd.db_index);
437 if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
438 mthca_free_srq(to_mdev(ibsrq->device), srq);
445 static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
448 struct mthca_ucontext *context =
449 rdma_udata_to_drv_context(
451 struct mthca_ucontext,
454 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
455 context->db_tab, to_msrq(srq)->db_index);
458 mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
462 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
463 struct ib_qp_init_attr *init_attr,
464 struct ib_udata *udata)
466 struct mthca_ucontext *context = rdma_udata_to_drv_context(
467 udata, struct mthca_ucontext, ibucontext);
468 struct mthca_create_qp ucmd;
472 if (init_attr->create_flags)
473 return ERR_PTR(-EOPNOTSUPP);
475 switch (init_attr->qp_type) {
480 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
482 return ERR_PTR(-ENOMEM);
485 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
487 return ERR_PTR(-EFAULT);
490 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
492 ucmd.sq_db_index, ucmd.sq_db_page);
498 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
500 ucmd.rq_db_index, ucmd.rq_db_page);
502 mthca_unmap_user_db(to_mdev(pd->device),
510 qp->mr.ibmr.lkey = ucmd.lkey;
511 qp->sq.db_index = ucmd.sq_db_index;
512 qp->rq.db_index = ucmd.rq_db_index;
515 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
516 to_mcq(init_attr->send_cq),
517 to_mcq(init_attr->recv_cq),
518 init_attr->qp_type, init_attr->sq_sig_type,
519 &init_attr->cap, qp, udata);
522 mthca_unmap_user_db(to_mdev(pd->device),
526 mthca_unmap_user_db(to_mdev(pd->device),
532 qp->ibqp.qp_num = qp->qpn;
538 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
540 return ERR_PTR(-ENOMEM);
541 qp->sqp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
544 return ERR_PTR(-ENOMEM);
547 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
549 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
550 to_mcq(init_attr->send_cq),
551 to_mcq(init_attr->recv_cq),
552 init_attr->sq_sig_type, &init_attr->cap,
553 qp->ibqp.qp_num, init_attr->port_num,
558 /* Don't support raw QPs */
559 return ERR_PTR(-EOPNOTSUPP);
568 init_attr->cap.max_send_wr = qp->sq.max;
569 init_attr->cap.max_recv_wr = qp->rq.max;
570 init_attr->cap.max_send_sge = qp->sq.max_gs;
571 init_attr->cap.max_recv_sge = qp->rq.max_gs;
572 init_attr->cap.max_inline_data = qp->max_inline_data;
577 static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
580 struct mthca_ucontext *context =
581 rdma_udata_to_drv_context(
583 struct mthca_ucontext,
586 mthca_unmap_user_db(to_mdev(qp->device),
589 to_mqp(qp)->sq.db_index);
590 mthca_unmap_user_db(to_mdev(qp->device),
593 to_mqp(qp)->rq.db_index);
595 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
596 kfree(to_mqp(qp)->sqp);
601 static int mthca_create_cq(struct ib_cq *ibcq,
602 const struct ib_cq_init_attr *attr,
603 struct ib_udata *udata)
605 struct ib_device *ibdev = ibcq->device;
606 int entries = attr->cqe;
607 struct mthca_create_cq ucmd;
611 struct mthca_ucontext *context = rdma_udata_to_drv_context(
612 udata, struct mthca_ucontext, ibucontext);
617 if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
621 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
624 err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
625 context->db_tab, ucmd.set_db_index,
630 err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
631 context->db_tab, ucmd.arm_db_index,
640 cq->buf.mr.ibmr.lkey = ucmd.lkey;
641 cq->set_ci_db_index = ucmd.set_db_index;
642 cq->arm_db_index = ucmd.arm_db_index;
645 for (nent = 1; nent <= entries; nent <<= 1)
648 err = mthca_init_cq(to_mdev(ibdev), nent, context,
649 udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
654 if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
655 mthca_free_cq(to_mdev(ibdev), cq);
660 cq->resize_buf = NULL;
666 mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
667 context->db_tab, ucmd.arm_db_index);
671 mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
672 context->db_tab, ucmd.set_db_index);
677 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
682 spin_lock_irq(&cq->lock);
683 if (cq->resize_buf) {
688 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
689 if (!cq->resize_buf) {
694 cq->resize_buf->state = CQ_RESIZE_ALLOC;
699 spin_unlock_irq(&cq->lock);
704 ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
706 spin_lock_irq(&cq->lock);
707 kfree(cq->resize_buf);
708 cq->resize_buf = NULL;
709 spin_unlock_irq(&cq->lock);
713 cq->resize_buf->cqe = entries - 1;
715 spin_lock_irq(&cq->lock);
716 cq->resize_buf->state = CQ_RESIZE_READY;
717 spin_unlock_irq(&cq->lock);
722 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
724 struct mthca_dev *dev = to_mdev(ibcq->device);
725 struct mthca_cq *cq = to_mcq(ibcq);
726 struct mthca_resize_cq ucmd;
730 if (entries < 1 || entries > dev->limits.max_cqes)
733 mutex_lock(&cq->mutex);
735 entries = roundup_pow_of_two(entries + 1);
736 if (entries == ibcq->cqe + 1) {
742 ret = mthca_alloc_resize_buf(dev, cq, entries);
745 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
747 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
754 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
757 if (cq->resize_buf) {
758 mthca_free_cq_buf(dev, &cq->resize_buf->buf,
759 cq->resize_buf->cqe);
760 kfree(cq->resize_buf);
761 spin_lock_irq(&cq->lock);
762 cq->resize_buf = NULL;
763 spin_unlock_irq(&cq->lock);
769 struct mthca_cq_buf tbuf;
772 spin_lock_irq(&cq->lock);
773 if (cq->resize_buf->state == CQ_RESIZE_READY) {
774 mthca_cq_resize_copy_cqes(cq);
777 cq->buf = cq->resize_buf->buf;
778 cq->ibcq.cqe = cq->resize_buf->cqe;
780 tbuf = cq->resize_buf->buf;
781 tcqe = cq->resize_buf->cqe;
784 kfree(cq->resize_buf);
785 cq->resize_buf = NULL;
786 spin_unlock_irq(&cq->lock);
788 mthca_free_cq_buf(dev, &tbuf, tcqe);
790 ibcq->cqe = entries - 1;
793 mutex_unlock(&cq->mutex);
798 static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
801 struct mthca_ucontext *context =
802 rdma_udata_to_drv_context(
804 struct mthca_ucontext,
807 mthca_unmap_user_db(to_mdev(cq->device),
810 to_mcq(cq)->arm_db_index);
811 mthca_unmap_user_db(to_mdev(cq->device),
814 to_mcq(cq)->set_ci_db_index);
816 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
820 static inline u32 convert_access(int acc)
822 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) |
823 (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
824 (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) |
825 (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) |
826 MTHCA_MPT_FLAG_LOCAL_READ;
829 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
834 mr = kmalloc(sizeof *mr, GFP_KERNEL);
836 return ERR_PTR(-ENOMEM);
838 err = mthca_mr_alloc_notrans(to_mdev(pd->device),
840 convert_access(acc), mr);
852 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
853 u64 virt, int acc, struct ib_udata *udata)
855 struct mthca_dev *dev = to_mdev(pd->device);
856 struct ib_block_iter biter;
857 struct mthca_ucontext *context = rdma_udata_to_drv_context(
858 udata, struct mthca_ucontext, ibucontext);
860 struct mthca_reg_mr ucmd;
866 if (udata->inlen < sizeof ucmd) {
867 if (!context->reg_mr_warned) {
868 mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
870 mthca_warn(dev, " Update libmthca to fix this.\n");
872 ++context->reg_mr_warned;
874 } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
875 return ERR_PTR(-EFAULT);
877 mr = kmalloc(sizeof *mr, GFP_KERNEL);
879 return ERR_PTR(-ENOMEM);
881 mr->umem = ib_umem_get(pd->device, start, length, acc);
882 if (IS_ERR(mr->umem)) {
883 err = PTR_ERR(mr->umem);
887 n = ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE);
889 mr->mtt = mthca_alloc_mtt(dev, n);
890 if (IS_ERR(mr->mtt)) {
891 err = PTR_ERR(mr->mtt);
895 pages = (u64 *) __get_free_page(GFP_KERNEL);
903 write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
905 rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
906 pages[i++] = rdma_block_iter_dma_address(&biter);
909 * Be friendly to write_mtt and pass it chunks
910 * of appropriate size.
912 if (i == write_mtt_size) {
913 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
922 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
924 free_page((unsigned long) pages);
928 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
929 convert_access(acc), mr);
937 mthca_free_mtt(dev, mr->mtt);
940 ib_umem_release(mr->umem);
947 static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
949 struct mthca_mr *mmr = to_mmr(mr);
951 mthca_free_mr(to_mdev(mr->device), mmr);
952 ib_umem_release(mmr->umem);
958 static ssize_t hw_rev_show(struct device *device,
959 struct device_attribute *attr, char *buf)
961 struct mthca_dev *dev =
962 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
964 return sysfs_emit(buf, "%x\n", dev->rev_id);
966 static DEVICE_ATTR_RO(hw_rev);
968 static ssize_t hca_type_show(struct device *device,
969 struct device_attribute *attr, char *buf)
971 struct mthca_dev *dev =
972 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
974 switch (dev->pdev->device) {
975 case PCI_DEVICE_ID_MELLANOX_TAVOR:
976 return sysfs_emit(buf, "MT23108\n");
977 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
978 return sysfs_emit(buf, "MT25208 (MT23108 compat mode)\n");
979 case PCI_DEVICE_ID_MELLANOX_ARBEL:
980 return sysfs_emit(buf, "MT25208\n");
981 case PCI_DEVICE_ID_MELLANOX_SINAI:
982 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
983 return sysfs_emit(buf, "MT25204\n");
985 return sysfs_emit(buf, "unknown\n");
988 static DEVICE_ATTR_RO(hca_type);
990 static ssize_t board_id_show(struct device *device,
991 struct device_attribute *attr, char *buf)
993 struct mthca_dev *dev =
994 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
996 return sysfs_emit(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
998 static DEVICE_ATTR_RO(board_id);
1000 static struct attribute *mthca_dev_attributes[] = {
1001 &dev_attr_hw_rev.attr,
1002 &dev_attr_hca_type.attr,
1003 &dev_attr_board_id.attr,
1007 static const struct attribute_group mthca_attr_group = {
1008 .attrs = mthca_dev_attributes,
1011 static int mthca_init_node_data(struct mthca_dev *dev)
1013 struct ib_smp *in_mad = NULL;
1014 struct ib_smp *out_mad = NULL;
1017 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1018 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1019 if (!in_mad || !out_mad)
1022 init_query_mad(in_mad);
1023 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1025 err = mthca_MAD_IFC(dev, 1, 1,
1026 1, NULL, NULL, in_mad, out_mad);
1030 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1032 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1034 err = mthca_MAD_IFC(dev, 1, 1,
1035 1, NULL, NULL, in_mad, out_mad);
1039 if (mthca_is_memfree(dev))
1040 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1041 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1049 static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1050 struct ib_port_immutable *immutable)
1052 struct ib_port_attr attr;
1055 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1057 err = ib_query_port(ibdev, port_num, &attr);
1061 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1062 immutable->gid_tbl_len = attr.gid_tbl_len;
1063 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1068 static void get_dev_fw_str(struct ib_device *device, char *str)
1070 struct mthca_dev *dev =
1071 container_of(device, struct mthca_dev, ib_dev);
1072 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
1073 (int) (dev->fw_ver >> 32),
1074 (int) (dev->fw_ver >> 16) & 0xffff,
1075 (int) dev->fw_ver & 0xffff);
1078 static const struct ib_device_ops mthca_dev_ops = {
1079 .owner = THIS_MODULE,
1080 .driver_id = RDMA_DRIVER_MTHCA,
1081 .uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION,
1082 .uverbs_no_driver_id_binding = 1,
1084 .alloc_pd = mthca_alloc_pd,
1085 .alloc_ucontext = mthca_alloc_ucontext,
1086 .attach_mcast = mthca_multicast_attach,
1087 .create_ah = mthca_ah_create,
1088 .create_cq = mthca_create_cq,
1089 .create_qp = mthca_create_qp,
1090 .dealloc_pd = mthca_dealloc_pd,
1091 .dealloc_ucontext = mthca_dealloc_ucontext,
1092 .dereg_mr = mthca_dereg_mr,
1093 .destroy_ah = mthca_ah_destroy,
1094 .destroy_cq = mthca_destroy_cq,
1095 .destroy_qp = mthca_destroy_qp,
1096 .detach_mcast = mthca_multicast_detach,
1097 .get_dev_fw_str = get_dev_fw_str,
1098 .get_dma_mr = mthca_get_dma_mr,
1099 .get_port_immutable = mthca_port_immutable,
1100 .mmap = mthca_mmap_uar,
1101 .modify_device = mthca_modify_device,
1102 .modify_port = mthca_modify_port,
1103 .modify_qp = mthca_modify_qp,
1104 .poll_cq = mthca_poll_cq,
1105 .process_mad = mthca_process_mad,
1106 .query_ah = mthca_ah_query,
1107 .query_device = mthca_query_device,
1108 .query_gid = mthca_query_gid,
1109 .query_pkey = mthca_query_pkey,
1110 .query_port = mthca_query_port,
1111 .query_qp = mthca_query_qp,
1112 .reg_user_mr = mthca_reg_user_mr,
1113 .resize_cq = mthca_resize_cq,
1115 INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
1116 INIT_RDMA_OBJ_SIZE(ib_cq, mthca_cq, ibcq),
1117 INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
1118 INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
1121 static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
1122 .create_srq = mthca_create_srq,
1123 .destroy_srq = mthca_destroy_srq,
1124 .modify_srq = mthca_modify_srq,
1125 .post_srq_recv = mthca_arbel_post_srq_recv,
1126 .query_srq = mthca_query_srq,
1128 INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
1131 static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
1132 .create_srq = mthca_create_srq,
1133 .destroy_srq = mthca_destroy_srq,
1134 .modify_srq = mthca_modify_srq,
1135 .post_srq_recv = mthca_tavor_post_srq_recv,
1136 .query_srq = mthca_query_srq,
1138 INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
1141 static const struct ib_device_ops mthca_dev_arbel_ops = {
1142 .post_recv = mthca_arbel_post_receive,
1143 .post_send = mthca_arbel_post_send,
1144 .req_notify_cq = mthca_arbel_arm_cq,
1147 static const struct ib_device_ops mthca_dev_tavor_ops = {
1148 .post_recv = mthca_tavor_post_receive,
1149 .post_send = mthca_tavor_post_send,
1150 .req_notify_cq = mthca_tavor_arm_cq,
1153 int mthca_register_device(struct mthca_dev *dev)
1157 ret = mthca_init_node_data(dev);
1161 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1162 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
1163 dev->ib_dev.num_comp_vectors = 1;
1164 dev->ib_dev.dev.parent = &dev->pdev->dev;
1166 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1167 if (mthca_is_memfree(dev))
1168 ib_set_device_ops(&dev->ib_dev,
1169 &mthca_dev_arbel_srq_ops);
1171 ib_set_device_ops(&dev->ib_dev,
1172 &mthca_dev_tavor_srq_ops);
1175 ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
1177 if (mthca_is_memfree(dev))
1178 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
1180 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
1182 mutex_init(&dev->cap_mask_mutex);
1184 rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
1185 ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
1189 mthca_start_catas_poll(dev);
1194 void mthca_unregister_device(struct mthca_dev *dev)
1196 mthca_stop_catas_poll(dev);
1197 ib_unregister_device(&dev->ib_dev);