1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/dma-mapping.h>
8 #include <net/addrconf.h>
9 #include <rdma/uverbs_ioctl.h>
12 #include "rxe_queue.h"
13 #include "rxe_hw_counters.h"
15 static int rxe_query_device(struct ib_device *dev,
16 struct ib_device_attr *attr,
19 struct rxe_dev *rxe = to_rdev(dev);
21 if (uhw->inlen || uhw->outlen)
28 static int rxe_query_port(struct ib_device *dev,
29 u32 port_num, struct ib_port_attr *attr)
31 struct rxe_dev *rxe = to_rdev(dev);
34 /* *attr being zeroed by the caller, avoid zeroing it here */
35 *attr = rxe->port.attr;
37 mutex_lock(&rxe->usdev_lock);
38 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
41 if (attr->state == IB_PORT_ACTIVE)
42 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
43 else if (dev_get_flags(rxe->ndev) & IFF_UP)
44 attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
46 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
48 mutex_unlock(&rxe->usdev_lock);
53 static int rxe_query_pkey(struct ib_device *device,
54 u32 port_num, u16 index, u16 *pkey)
59 *pkey = IB_DEFAULT_PKEY_FULL;
63 static int rxe_modify_device(struct ib_device *dev,
64 int mask, struct ib_device_modify *attr)
66 struct rxe_dev *rxe = to_rdev(dev);
68 if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
69 IB_DEVICE_MODIFY_NODE_DESC))
72 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
73 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
75 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
76 memcpy(rxe->ib_dev.node_desc,
77 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
83 static int rxe_modify_port(struct ib_device *dev,
84 u32 port_num, int mask, struct ib_port_modify *attr)
86 struct rxe_dev *rxe = to_rdev(dev);
87 struct rxe_port *port;
91 port->attr.port_cap_flags |= attr->set_port_cap_mask;
92 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
94 if (mask & IB_PORT_RESET_QKEY_CNTR)
95 port->attr.qkey_viol_cntr = 0;
100 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
103 return IB_LINK_LAYER_ETHERNET;
106 static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
108 struct rxe_dev *rxe = to_rdev(ibuc->device);
109 struct rxe_ucontext *uc = to_ruc(ibuc);
111 return rxe_add_to_pool(&rxe->uc_pool, uc);
114 static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
116 struct rxe_ucontext *uc = to_ruc(ibuc);
121 static int rxe_port_immutable(struct ib_device *dev, u32 port_num,
122 struct ib_port_immutable *immutable)
125 struct ib_port_attr attr;
127 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
129 err = ib_query_port(dev, port_num, &attr);
133 immutable->pkey_tbl_len = attr.pkey_tbl_len;
134 immutable->gid_tbl_len = attr.gid_tbl_len;
135 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
140 static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
142 struct rxe_dev *rxe = to_rdev(ibpd->device);
143 struct rxe_pd *pd = to_rpd(ibpd);
145 return rxe_add_to_pool(&rxe->pd_pool, pd);
148 static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
150 struct rxe_pd *pd = to_rpd(ibpd);
156 static int rxe_create_ah(struct ib_ah *ibah,
157 struct rdma_ah_init_attr *init_attr,
158 struct ib_udata *udata)
162 struct rxe_dev *rxe = to_rdev(ibah->device);
163 struct rxe_ah *ah = to_rah(ibah);
165 err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
169 err = rxe_add_to_pool(&rxe->ah_pool, ah);
173 rxe_init_av(init_attr->ah_attr, &ah->av);
177 static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
180 struct rxe_dev *rxe = to_rdev(ibah->device);
181 struct rxe_ah *ah = to_rah(ibah);
183 err = rxe_av_chk_attr(rxe, attr);
187 rxe_init_av(attr, &ah->av);
191 static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
193 struct rxe_ah *ah = to_rah(ibah);
195 memset(attr, 0, sizeof(*attr));
196 attr->type = ibah->type;
197 rxe_av_to_attr(&ah->av, attr);
201 static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
203 struct rxe_ah *ah = to_rah(ibah);
209 static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
214 struct rxe_recv_wqe *recv_wqe;
215 int num_sge = ibwr->num_sge;
219 full = queue_full(rq->queue, QUEUE_TYPE_FROM_USER);
221 full = queue_full(rq->queue, QUEUE_TYPE_KERNEL);
223 if (unlikely(full)) {
228 if (unlikely(num_sge > rq->max_sge)) {
234 for (i = 0; i < num_sge; i++)
235 length += ibwr->sg_list[i].length;
238 recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_FROM_USER);
240 recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_KERNEL);
242 recv_wqe->wr_id = ibwr->wr_id;
243 recv_wqe->num_sge = num_sge;
245 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
246 num_sge * sizeof(struct ib_sge));
248 recv_wqe->dma.length = length;
249 recv_wqe->dma.resid = length;
250 recv_wqe->dma.num_sge = num_sge;
251 recv_wqe->dma.cur_sge = 0;
252 recv_wqe->dma.sge_offset = 0;
255 advance_producer(rq->queue, QUEUE_TYPE_FROM_USER);
257 advance_producer(rq->queue, QUEUE_TYPE_KERNEL);
265 static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
266 struct ib_udata *udata)
269 struct rxe_dev *rxe = to_rdev(ibsrq->device);
270 struct rxe_pd *pd = to_rpd(ibsrq->pd);
271 struct rxe_srq *srq = to_rsrq(ibsrq);
272 struct rxe_create_srq_resp __user *uresp = NULL;
274 if (init->srq_type != IB_SRQT_BASIC)
278 if (udata->outlen < sizeof(*uresp))
280 uresp = udata->outbuf;
283 srq->is_user = false;
286 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
290 err = rxe_add_to_pool(&rxe->srq_pool, srq);
297 err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
310 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
311 enum ib_srq_attr_mask mask,
312 struct ib_udata *udata)
315 struct rxe_srq *srq = to_rsrq(ibsrq);
316 struct rxe_dev *rxe = to_rdev(ibsrq->device);
317 struct rxe_modify_srq_cmd ucmd = {};
320 if (udata->inlen < sizeof(ucmd))
323 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
328 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
332 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
342 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
344 struct rxe_srq *srq = to_rsrq(ibsrq);
349 attr->max_wr = srq->rq.queue->buf->index_mask;
350 attr->max_sge = srq->rq.max_sge;
351 attr->srq_limit = srq->limit;
355 static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
357 struct rxe_srq *srq = to_rsrq(ibsrq);
360 rxe_queue_cleanup(srq->rq.queue);
362 rxe_drop_ref(srq->pd);
367 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
368 const struct ib_recv_wr **bad_wr)
372 struct rxe_srq *srq = to_rsrq(ibsrq);
374 spin_lock_irqsave(&srq->rq.producer_lock, flags);
377 err = post_one_recv(&srq->rq, wr);
383 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
391 static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
392 struct ib_udata *udata)
395 struct rxe_dev *rxe = to_rdev(ibqp->device);
396 struct rxe_pd *pd = to_rpd(ibqp->pd);
397 struct rxe_qp *qp = to_rqp(ibqp);
398 struct rxe_create_qp_resp __user *uresp = NULL;
401 if (udata->outlen < sizeof(*uresp))
403 uresp = udata->outbuf;
406 if (init->create_flags)
409 err = rxe_qp_chk_init(rxe, init);
422 err = rxe_add_to_pool(&rxe->qp_pool, qp);
427 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
439 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
440 int mask, struct ib_udata *udata)
443 struct rxe_dev *rxe = to_rdev(ibqp->device);
444 struct rxe_qp *qp = to_rqp(ibqp);
446 if (mask & ~IB_QP_ATTR_STANDARD_BITS)
449 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
453 err = rxe_qp_from_attr(qp, attr, mask, udata);
463 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
464 int mask, struct ib_qp_init_attr *init)
466 struct rxe_qp *qp = to_rqp(ibqp);
468 rxe_qp_to_init(qp, init);
469 rxe_qp_to_attr(qp, attr, mask);
474 static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
476 struct rxe_qp *qp = to_rqp(ibqp);
484 static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
485 unsigned int mask, unsigned int length)
487 int num_sge = ibwr->num_sge;
488 struct rxe_sq *sq = &qp->sq;
490 if (unlikely(num_sge > sq->max_sge))
493 if (unlikely(mask & WR_ATOMIC_MASK)) {
497 if (atomic_wr(ibwr)->remote_addr & 0x7)
501 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
502 (length > sq->max_inline)))
511 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
512 const struct ib_send_wr *ibwr)
514 wr->wr_id = ibwr->wr_id;
515 wr->num_sge = ibwr->num_sge;
516 wr->opcode = ibwr->opcode;
517 wr->send_flags = ibwr->send_flags;
519 if (qp_type(qp) == IB_QPT_UD ||
520 qp_type(qp) == IB_QPT_SMI ||
521 qp_type(qp) == IB_QPT_GSI) {
522 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
523 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
524 if (qp_type(qp) == IB_QPT_GSI)
525 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
526 if (wr->opcode == IB_WR_SEND_WITH_IMM)
527 wr->ex.imm_data = ibwr->ex.imm_data;
529 switch (wr->opcode) {
530 case IB_WR_RDMA_WRITE_WITH_IMM:
531 wr->ex.imm_data = ibwr->ex.imm_data;
533 case IB_WR_RDMA_READ:
534 case IB_WR_RDMA_WRITE:
535 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
536 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
538 case IB_WR_SEND_WITH_IMM:
539 wr->ex.imm_data = ibwr->ex.imm_data;
541 case IB_WR_SEND_WITH_INV:
542 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
544 case IB_WR_ATOMIC_CMP_AND_SWP:
545 case IB_WR_ATOMIC_FETCH_AND_ADD:
546 wr->wr.atomic.remote_addr =
547 atomic_wr(ibwr)->remote_addr;
548 wr->wr.atomic.compare_add =
549 atomic_wr(ibwr)->compare_add;
550 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
551 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
553 case IB_WR_LOCAL_INV:
554 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
557 wr->wr.reg.mr = reg_wr(ibwr)->mr;
558 wr->wr.reg.key = reg_wr(ibwr)->key;
559 wr->wr.reg.access = reg_wr(ibwr)->access;
567 static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
568 const struct ib_send_wr *ibwr)
570 struct ib_sge *sge = ibwr->sg_list;
571 u8 *p = wqe->dma.inline_data;
574 for (i = 0; i < ibwr->num_sge; i++, sge++) {
575 memcpy(p, (void *)(uintptr_t)sge->addr, sge->length);
580 static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
581 unsigned int mask, unsigned int length,
582 struct rxe_send_wqe *wqe)
584 int num_sge = ibwr->num_sge;
586 init_send_wr(qp, &wqe->wr, ibwr);
588 /* local operation */
589 if (unlikely(mask & WR_LOCAL_OP_MASK)) {
591 wqe->state = wqe_state_posted;
595 if (qp_type(qp) == IB_QPT_UD ||
596 qp_type(qp) == IB_QPT_SMI ||
597 qp_type(qp) == IB_QPT_GSI)
598 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
600 if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
601 copy_inline_data_to_wqe(wqe, ibwr);
603 memcpy(wqe->dma.sge, ibwr->sg_list,
604 num_sge * sizeof(struct ib_sge));
606 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
607 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
609 wqe->dma.length = length;
610 wqe->dma.resid = length;
611 wqe->dma.num_sge = num_sge;
612 wqe->dma.cur_sge = 0;
613 wqe->dma.sge_offset = 0;
614 wqe->state = wqe_state_posted;
615 wqe->ssn = atomic_add_return(1, &qp->ssn);
618 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
619 unsigned int mask, u32 length)
622 struct rxe_sq *sq = &qp->sq;
623 struct rxe_send_wqe *send_wqe;
627 err = validate_send_wr(qp, ibwr, mask, length);
631 spin_lock_irqsave(&qp->sq.sq_lock, flags);
634 full = queue_full(sq->queue, QUEUE_TYPE_FROM_USER);
636 full = queue_full(sq->queue, QUEUE_TYPE_KERNEL);
638 if (unlikely(full)) {
639 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
644 send_wqe = producer_addr(sq->queue, QUEUE_TYPE_FROM_USER);
646 send_wqe = producer_addr(sq->queue, QUEUE_TYPE_KERNEL);
648 init_send_wqe(qp, ibwr, mask, length, send_wqe);
651 advance_producer(sq->queue, QUEUE_TYPE_FROM_USER);
653 advance_producer(sq->queue, QUEUE_TYPE_KERNEL);
655 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
660 static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
661 const struct ib_send_wr **bad_wr)
665 unsigned int length = 0;
667 struct ib_send_wr *next;
670 mask = wr_opcode_mask(wr->opcode, qp);
671 if (unlikely(!mask)) {
677 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
678 !(mask & WR_INLINE_MASK))) {
687 for (i = 0; i < wr->num_sge; i++)
688 length += wr->sg_list[i].length;
690 err = post_one_send(qp, wr, mask, length);
699 rxe_run_task(&qp->req.task, 1);
700 if (unlikely(qp->req.state == QP_STATE_ERROR))
701 rxe_run_task(&qp->comp.task, 1);
706 static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
707 const struct ib_send_wr **bad_wr)
709 struct rxe_qp *qp = to_rqp(ibqp);
711 if (unlikely(!qp->valid)) {
716 if (unlikely(qp->req.state < QP_STATE_READY)) {
722 /* Utilize process context to do protocol processing */
723 rxe_run_task(&qp->req.task, 0);
726 return rxe_post_send_kernel(qp, wr, bad_wr);
729 static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
730 const struct ib_recv_wr **bad_wr)
733 struct rxe_qp *qp = to_rqp(ibqp);
734 struct rxe_rq *rq = &qp->rq;
737 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
743 if (unlikely(qp->srq)) {
749 spin_lock_irqsave(&rq->producer_lock, flags);
752 err = post_one_recv(rq, wr);
760 spin_unlock_irqrestore(&rq->producer_lock, flags);
762 if (qp->resp.state == QP_STATE_ERROR)
763 rxe_run_task(&qp->resp.task, 1);
769 static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
770 struct ib_udata *udata)
773 struct ib_device *dev = ibcq->device;
774 struct rxe_dev *rxe = to_rdev(dev);
775 struct rxe_cq *cq = to_rcq(ibcq);
776 struct rxe_create_cq_resp __user *uresp = NULL;
779 if (udata->outlen < sizeof(*uresp))
781 uresp = udata->outbuf;
787 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
791 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
796 return rxe_add_to_pool(&rxe->cq_pool, cq);
799 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
801 struct rxe_cq *cq = to_rcq(ibcq);
809 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
812 struct rxe_cq *cq = to_rcq(ibcq);
813 struct rxe_dev *rxe = to_rdev(ibcq->device);
814 struct rxe_resize_cq_resp __user *uresp = NULL;
817 if (udata->outlen < sizeof(*uresp))
819 uresp = udata->outbuf;
822 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
826 err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
836 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
839 struct rxe_cq *cq = to_rcq(ibcq);
843 spin_lock_irqsave(&cq->cq_lock, flags);
844 for (i = 0; i < num_entries; i++) {
846 cqe = queue_head(cq->queue, QUEUE_TYPE_TO_USER);
848 cqe = queue_head(cq->queue, QUEUE_TYPE_KERNEL);
852 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
854 advance_consumer(cq->queue, QUEUE_TYPE_TO_USER);
856 advance_consumer(cq->queue, QUEUE_TYPE_KERNEL);
858 spin_unlock_irqrestore(&cq->cq_lock, flags);
863 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
865 struct rxe_cq *cq = to_rcq(ibcq);
869 count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
871 count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
873 return (count > wc_cnt) ? wc_cnt : count;
876 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
878 struct rxe_cq *cq = to_rcq(ibcq);
879 unsigned long irq_flags;
883 spin_lock_irqsave(&cq->cq_lock, irq_flags);
884 if (cq->notify != IB_CQ_NEXT_COMP)
885 cq->notify = flags & IB_CQ_SOLICITED_MASK;
888 empty = queue_empty(cq->queue, QUEUE_TYPE_TO_USER);
890 empty = queue_empty(cq->queue, QUEUE_TYPE_KERNEL);
892 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
895 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
900 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
902 struct rxe_dev *rxe = to_rdev(ibpd->device);
903 struct rxe_pd *pd = to_rpd(ibpd);
906 mr = rxe_alloc(&rxe->mr_pool);
908 return ERR_PTR(-ENOMEM);
912 rxe_mr_init_dma(pd, access, mr);
917 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
921 int access, struct ib_udata *udata)
924 struct rxe_dev *rxe = to_rdev(ibpd->device);
925 struct rxe_pd *pd = to_rpd(ibpd);
928 mr = rxe_alloc(&rxe->mr_pool);
938 err = rxe_mr_init_user(pd, start, length, iova, access, mr);
952 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
955 struct rxe_dev *rxe = to_rdev(ibpd->device);
956 struct rxe_pd *pd = to_rpd(ibpd);
960 if (mr_type != IB_MR_TYPE_MEM_REG)
961 return ERR_PTR(-EINVAL);
963 mr = rxe_alloc(&rxe->mr_pool);
973 err = rxe_mr_init_fast(pd, max_num_sg, mr);
987 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
989 struct rxe_mr *mr = to_rmr(ibmr);
991 struct rxe_phys_buf *buf;
993 if (unlikely(mr->nbuf == mr->num_buf))
996 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
997 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1000 buf->size = ibmr->page_size;
1006 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1007 int sg_nents, unsigned int *sg_offset)
1009 struct rxe_mr *mr = to_rmr(ibmr);
1014 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1016 mr->va = ibmr->iova;
1017 mr->iova = ibmr->iova;
1018 mr->length = ibmr->length;
1019 mr->page_shift = ilog2(ibmr->page_size);
1020 mr->page_mask = ibmr->page_size - 1;
1021 mr->offset = mr->iova & mr->page_mask;
1026 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1029 struct rxe_dev *rxe = to_rdev(ibqp->device);
1030 struct rxe_qp *qp = to_rqp(ibqp);
1031 struct rxe_mc_grp *grp;
1033 /* takes a ref on grp if successful */
1034 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1038 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1044 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1046 struct rxe_dev *rxe = to_rdev(ibqp->device);
1047 struct rxe_qp *qp = to_rqp(ibqp);
1049 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1052 static ssize_t parent_show(struct device *device,
1053 struct device_attribute *attr, char *buf)
1055 struct rxe_dev *rxe =
1056 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
1058 return sysfs_emit(buf, "%s\n", rxe_parent_name(rxe, 1));
1061 static DEVICE_ATTR_RO(parent);
1063 static struct attribute *rxe_dev_attributes[] = {
1064 &dev_attr_parent.attr,
1068 static const struct attribute_group rxe_attr_group = {
1069 .attrs = rxe_dev_attributes,
1072 static int rxe_enable_driver(struct ib_device *ib_dev)
1074 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1076 rxe_set_port_state(rxe);
1077 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1081 static const struct ib_device_ops rxe_dev_ops = {
1082 .owner = THIS_MODULE,
1083 .driver_id = RDMA_DRIVER_RXE,
1084 .uverbs_abi_ver = RXE_UVERBS_ABI_VERSION,
1086 .alloc_hw_port_stats = rxe_ib_alloc_hw_port_stats,
1087 .alloc_mr = rxe_alloc_mr,
1088 .alloc_mw = rxe_alloc_mw,
1089 .alloc_pd = rxe_alloc_pd,
1090 .alloc_ucontext = rxe_alloc_ucontext,
1091 .attach_mcast = rxe_attach_mcast,
1092 .create_ah = rxe_create_ah,
1093 .create_cq = rxe_create_cq,
1094 .create_qp = rxe_create_qp,
1095 .create_srq = rxe_create_srq,
1096 .create_user_ah = rxe_create_ah,
1097 .dealloc_driver = rxe_dealloc,
1098 .dealloc_mw = rxe_dealloc_mw,
1099 .dealloc_pd = rxe_dealloc_pd,
1100 .dealloc_ucontext = rxe_dealloc_ucontext,
1101 .dereg_mr = rxe_dereg_mr,
1102 .destroy_ah = rxe_destroy_ah,
1103 .destroy_cq = rxe_destroy_cq,
1104 .destroy_qp = rxe_destroy_qp,
1105 .destroy_srq = rxe_destroy_srq,
1106 .detach_mcast = rxe_detach_mcast,
1107 .device_group = &rxe_attr_group,
1108 .enable_driver = rxe_enable_driver,
1109 .get_dma_mr = rxe_get_dma_mr,
1110 .get_hw_stats = rxe_ib_get_hw_stats,
1111 .get_link_layer = rxe_get_link_layer,
1112 .get_port_immutable = rxe_port_immutable,
1113 .map_mr_sg = rxe_map_mr_sg,
1115 .modify_ah = rxe_modify_ah,
1116 .modify_device = rxe_modify_device,
1117 .modify_port = rxe_modify_port,
1118 .modify_qp = rxe_modify_qp,
1119 .modify_srq = rxe_modify_srq,
1120 .peek_cq = rxe_peek_cq,
1121 .poll_cq = rxe_poll_cq,
1122 .post_recv = rxe_post_recv,
1123 .post_send = rxe_post_send,
1124 .post_srq_recv = rxe_post_srq_recv,
1125 .query_ah = rxe_query_ah,
1126 .query_device = rxe_query_device,
1127 .query_pkey = rxe_query_pkey,
1128 .query_port = rxe_query_port,
1129 .query_qp = rxe_query_qp,
1130 .query_srq = rxe_query_srq,
1131 .reg_user_mr = rxe_reg_user_mr,
1132 .req_notify_cq = rxe_req_notify_cq,
1133 .resize_cq = rxe_resize_cq,
1135 INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
1136 INIT_RDMA_OBJ_SIZE(ib_cq, rxe_cq, ibcq),
1137 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
1138 INIT_RDMA_OBJ_SIZE(ib_qp, rxe_qp, ibqp),
1139 INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
1140 INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
1141 INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
1144 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
1147 struct ib_device *dev = &rxe->ib_dev;
1149 strscpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1151 dev->node_type = RDMA_NODE_IB_CA;
1152 dev->phys_port_cnt = 1;
1153 dev->num_comp_vectors = num_possible_cpus();
1154 dev->local_dma_lkey = 0;
1155 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1156 rxe->ndev->dev_addr);
1158 dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
1159 BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
1161 ib_set_device_ops(dev, &rxe_dev_ops);
1162 err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1166 err = rxe_icrc_init(rxe);
1170 err = ib_register_device(dev, ibdev_name, NULL);
1172 pr_warn("%s failed with error %d\n", __func__, err);
1175 * Note that rxe may be invalid at this point if another thread