1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
6 #include <linux/errno.h>
7 #include <linux/types.h>
8 #include <linux/uaccess.h>
9 #include <linux/vmalloc.h>
10 #include <linux/xarray.h>
12 #include <rdma/iw_cm.h>
13 #include <rdma/ib_verbs.h>
14 #include <rdma/ib_user_verbs.h>
15 #include <rdma/uverbs_ioctl.h>
18 #include "siw_verbs.h"
21 static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
22 [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
23 [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
24 [IB_QPS_RTR] = SIW_QP_STATE_RTR,
25 [IB_QPS_RTS] = SIW_QP_STATE_RTS,
26 [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
27 [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
28 [IB_QPS_ERR] = SIW_QP_STATE_ERROR
31 static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
32 [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
33 [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
37 static u32 siw_create_uobj(struct siw_ucontext *uctx, void *vaddr, u32 size)
39 struct siw_uobj *uobj;
40 struct xa_limit limit = XA_LIMIT(0, SIW_UOBJ_MAX_KEY);
43 uobj = kzalloc(sizeof(*uobj), GFP_KERNEL);
45 return SIW_INVAL_UOBJ_KEY;
47 if (xa_alloc_cyclic(&uctx->xa, &key, uobj, limit, &uctx->uobj_nextkey,
50 return SIW_INVAL_UOBJ_KEY;
52 uobj->size = PAGE_ALIGN(size);
58 static struct siw_uobj *siw_get_uobj(struct siw_ucontext *uctx,
59 unsigned long off, u32 size)
61 struct siw_uobj *uobj = xa_load(&uctx->xa, off);
63 if (uobj && uobj->size == size)
69 int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
71 struct siw_ucontext *uctx = to_siw_ctx(ctx);
72 struct siw_uobj *uobj;
73 unsigned long off = vma->vm_pgoff;
74 int size = vma->vm_end - vma->vm_start;
78 * Must be page aligned
80 if (vma->vm_start & (PAGE_SIZE - 1)) {
81 pr_warn("siw: mmap not page aligned\n");
84 uobj = siw_get_uobj(uctx, off, size);
86 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %u\n",
90 rv = remap_vmalloc_range(vma, uobj->addr, 0);
92 pr_warn("remap_vmalloc_range failed: %lu, %u\n", off, size);
97 int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
99 struct siw_device *sdev = to_siw_dev(base_ctx->device);
100 struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
101 struct siw_uresp_alloc_ctx uresp = {};
104 if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
108 xa_init_flags(&ctx->xa, XA_FLAGS_ALLOC);
109 ctx->uobj_nextkey = 0;
112 uresp.dev_id = sdev->vendor_part_id;
114 if (udata->outlen < sizeof(uresp)) {
118 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
122 siw_dbg(base_ctx->device, "success. now %d context(s)\n",
123 atomic_read(&sdev->num_ctx));
128 atomic_dec(&sdev->num_ctx);
129 siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
130 atomic_read(&sdev->num_ctx));
135 void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
137 struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
142 * Make sure all user mmap objects are gone. Since QP, CQ
143 * and SRQ destroy routines destroy related objects, nothing
144 * should be found here.
146 xa_for_each(&uctx->xa, index, entry) {
147 kfree(xa_erase(&uctx->xa, index));
148 pr_warn("siw: dropping orphaned uobj at %lu\n", index);
150 xa_destroy(&uctx->xa);
151 atomic_dec(&uctx->sdev->num_ctx);
154 int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
155 struct ib_udata *udata)
157 struct siw_device *sdev = to_siw_dev(base_dev);
159 if (udata->inlen || udata->outlen)
162 memset(attr, 0, sizeof(*attr));
164 /* Revisit atomic caps if RFC 7306 gets supported */
165 attr->atomic_cap = 0;
166 attr->device_cap_flags =
167 IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG;
168 attr->max_cq = sdev->attrs.max_cq;
169 attr->max_cqe = sdev->attrs.max_cqe;
170 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
171 attr->max_fmr = sdev->attrs.max_fmr;
172 attr->max_mr = sdev->attrs.max_mr;
173 attr->max_mw = sdev->attrs.max_mw;
174 attr->max_mr_size = ~0ull;
175 attr->max_pd = sdev->attrs.max_pd;
176 attr->max_qp = sdev->attrs.max_qp;
177 attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
178 attr->max_qp_rd_atom = sdev->attrs.max_ord;
179 attr->max_qp_wr = sdev->attrs.max_qp_wr;
180 attr->max_recv_sge = sdev->attrs.max_sge;
181 attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
182 attr->max_send_sge = sdev->attrs.max_sge;
183 attr->max_sge_rd = sdev->attrs.max_sge_rd;
184 attr->max_srq = sdev->attrs.max_srq;
185 attr->max_srq_sge = sdev->attrs.max_srq_sge;
186 attr->max_srq_wr = sdev->attrs.max_srq_wr;
187 attr->page_size_cap = PAGE_SIZE;
188 attr->vendor_id = SIW_VENDOR_ID;
189 attr->vendor_part_id = sdev->vendor_part_id;
191 memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
196 int siw_query_port(struct ib_device *base_dev, u8 port,
197 struct ib_port_attr *attr)
199 struct siw_device *sdev = to_siw_dev(base_dev);
201 memset(attr, 0, sizeof(*attr));
203 attr->active_mtu = attr->max_mtu;
204 attr->active_speed = 2;
205 attr->active_width = 2;
206 attr->gid_tbl_len = 1;
207 attr->max_msg_sz = -1;
208 attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
209 attr->phys_state = sdev->state == IB_PORT_ACTIVE ? 5 : 3;
210 attr->pkey_tbl_len = 1;
211 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
212 attr->state = sdev->state;
217 * attr->bad_pkey_cntr = 0;
218 * attr->qkey_viol_cntr = 0;
221 * attr->max_vl_num = 0;
223 * attr->subnet_timeout = 0;
224 * attr->init_type_repy = 0;
229 int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
230 struct ib_port_immutable *port_immutable)
232 struct ib_port_attr attr;
233 int rv = siw_query_port(base_dev, port, &attr);
238 port_immutable->pkey_tbl_len = attr.pkey_tbl_len;
239 port_immutable->gid_tbl_len = attr.gid_tbl_len;
240 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
245 int siw_query_pkey(struct ib_device *base_dev, u8 port, u16 idx, u16 *pkey)
247 /* Report the default pkey */
252 int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
255 struct siw_device *sdev = to_siw_dev(base_dev);
257 /* subnet_prefix == interface_id == 0; */
258 memset(gid, 0, sizeof(*gid));
259 memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
264 int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
266 struct siw_device *sdev = to_siw_dev(pd->device);
268 if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
269 atomic_dec(&sdev->num_pd);
272 siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
277 void siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
279 struct siw_device *sdev = to_siw_dev(pd->device);
281 siw_dbg_pd(pd, "free PD\n");
282 atomic_dec(&sdev->num_pd);
285 void siw_qp_get_ref(struct ib_qp *base_qp)
287 siw_qp_get(to_siw_qp(base_qp));
290 void siw_qp_put_ref(struct ib_qp *base_qp)
292 siw_qp_put(to_siw_qp(base_qp));
298 * Create QP of requested size on given device.
300 * @pd: Protection Domain
301 * @attrs: Initial QP attributes.
302 * @udata: used to provide QP ID, SQ and RQ size back to user.
305 struct ib_qp *siw_create_qp(struct ib_pd *pd,
306 struct ib_qp_init_attr *attrs,
307 struct ib_udata *udata)
309 struct siw_qp *qp = NULL;
310 struct siw_base_qp *siw_base_qp = NULL;
311 struct ib_device *base_dev = pd->device;
312 struct siw_device *sdev = to_siw_dev(base_dev);
313 struct siw_ucontext *uctx =
314 rdma_udata_to_drv_context(udata, struct siw_ucontext,
316 struct siw_cq *scq = NULL, *rcq = NULL;
318 int num_sqe, num_rqe, rv = 0;
320 siw_dbg(base_dev, "create new QP\n");
322 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
323 siw_dbg(base_dev, "too many QP's\n");
327 if (attrs->qp_type != IB_QPT_RC) {
328 siw_dbg(base_dev, "only RC QP's supported\n");
332 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
333 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
334 (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
335 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
336 siw_dbg(base_dev, "QP size error\n");
340 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
341 siw_dbg(base_dev, "max inline send: %d > %d\n",
342 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
347 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
348 * but not for a QP unable to hold any WQE (SQ + RQ)
350 if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
351 siw_dbg(base_dev, "QP must have send or receive queue\n");
355 scq = to_siw_cq(attrs->send_cq);
356 rcq = to_siw_cq(attrs->recv_cq);
358 if (!scq || (!rcq && !attrs->srq)) {
359 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
363 siw_base_qp = kzalloc(sizeof(*siw_base_qp), GFP_KERNEL);
368 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
373 siw_base_qp->qp = qp;
374 qp->ib_qp = &siw_base_qp->base_qp;
376 init_rwsem(&qp->state_lock);
377 spin_lock_init(&qp->sq_lock);
378 spin_lock_init(&qp->rq_lock);
379 spin_lock_init(&qp->orq_lock);
381 qp->kernel_verbs = !udata;
382 qp->xa_sq_index = SIW_INVAL_UOBJ_KEY;
383 qp->xa_rq_index = SIW_INVAL_UOBJ_KEY;
385 rv = siw_qp_add(sdev, qp);
389 /* All queue indices are derived from modulo operations
390 * on a free running 'get' (consumer) and 'put' (producer)
391 * unsigned counter. Having queue sizes at power of two
392 * avoids handling counter wrap around.
394 num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
395 num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
397 if (qp->kernel_verbs)
398 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
400 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
402 if (qp->sendq == NULL) {
403 siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
407 if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
408 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
409 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
422 * Verbs 6.3.7: ignore RQ size, if SRQ present
423 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
425 qp->srq = to_siw_srq(attrs->srq);
426 qp->attrs.rq_size = 0;
427 siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
428 } else if (num_rqe) {
429 if (qp->kernel_verbs)
430 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
433 vmalloc_user(num_rqe * sizeof(struct siw_rqe));
435 if (qp->recvq == NULL) {
436 siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
440 qp->attrs.rq_size = num_rqe;
442 qp->attrs.sq_size = num_sqe;
443 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
444 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
446 /* Make those two tunables fixed for now. */
447 qp->tx_ctx.gso_seg_limit = 1;
448 qp->tx_ctx.zcopy_tx = zcopy_tx;
450 qp->attrs.state = SIW_QP_STATE_IDLE;
453 struct siw_uresp_create_qp uresp = {};
455 uresp.num_sqe = num_sqe;
456 uresp.num_rqe = num_rqe;
457 uresp.qp_id = qp_id(qp);
461 siw_create_uobj(uctx, qp->sendq,
462 num_sqe * sizeof(struct siw_sqe));
466 siw_create_uobj(uctx, qp->recvq,
467 num_rqe * sizeof(struct siw_rqe));
469 if (qp->xa_sq_index == SIW_INVAL_UOBJ_KEY ||
470 qp->xa_rq_index == SIW_INVAL_UOBJ_KEY) {
474 uresp.sq_key = qp->xa_sq_index << PAGE_SHIFT;
475 uresp.rq_key = qp->xa_rq_index << PAGE_SHIFT;
477 if (udata->outlen < sizeof(uresp)) {
481 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
485 qp->tx_cpu = siw_get_tx_cpu(sdev);
486 if (qp->tx_cpu < 0) {
490 INIT_LIST_HEAD(&qp->devq);
491 spin_lock_irqsave(&sdev->lock, flags);
492 list_add_tail(&qp->devq, &sdev->qp_list);
493 spin_unlock_irqrestore(&sdev->lock, flags);
498 xa_erase(&sdev->qp_xa, qp_id(qp));
503 if (qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
504 kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
505 if (qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
506 kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
512 atomic_dec(&sdev->num_qp);
518 * Minimum siw_query_qp() verb interface.
520 * @qp_attr_mask is not used but all available information is provided
522 int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
523 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
526 struct siw_device *sdev;
528 if (base_qp && qp_attr && qp_init_attr) {
529 qp = to_siw_qp(base_qp);
530 sdev = to_siw_dev(base_qp->device);
534 qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
535 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
536 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
537 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
538 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
539 qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
540 qp_attr->max_rd_atomic = qp->attrs.irq_size;
541 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
543 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
544 IB_ACCESS_REMOTE_WRITE |
545 IB_ACCESS_REMOTE_READ;
547 qp_init_attr->qp_type = base_qp->qp_type;
548 qp_init_attr->send_cq = base_qp->send_cq;
549 qp_init_attr->recv_cq = base_qp->recv_cq;
550 qp_init_attr->srq = base_qp->srq;
552 qp_init_attr->cap = qp_attr->cap;
557 int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
558 int attr_mask, struct ib_udata *udata)
560 struct siw_qp_attrs new_attrs;
561 enum siw_qp_attr_mask siw_attr_mask = 0;
562 struct siw_qp *qp = to_siw_qp(base_qp);
568 memset(&new_attrs, 0, sizeof(new_attrs));
570 if (attr_mask & IB_QP_ACCESS_FLAGS) {
571 siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
573 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
574 new_attrs.flags |= SIW_RDMA_READ_ENABLED;
575 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
576 new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
577 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
578 new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
580 if (attr_mask & IB_QP_STATE) {
581 siw_dbg_qp(qp, "desired IB QP state: %s\n",
582 ib_qp_state_to_string[attr->qp_state]);
584 new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
586 if (new_attrs.state > SIW_QP_STATE_RTS)
587 qp->tx_ctx.tx_suspend = 1;
589 siw_attr_mask |= SIW_QP_ATTR_STATE;
594 down_write(&qp->state_lock);
596 rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
598 up_write(&qp->state_lock);
603 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
605 struct siw_qp *qp = to_siw_qp(base_qp);
606 struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
607 struct siw_ucontext *uctx =
608 rdma_udata_to_drv_context(udata, struct siw_ucontext,
610 struct siw_qp_attrs qp_attrs;
612 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
615 * Mark QP as in process of destruction to prevent from
616 * any async callbacks to RDMA core
618 qp->attrs.flags |= SIW_QP_IN_DESTROY;
619 qp->rx_stream.rx_suspend = 1;
621 if (uctx && qp->xa_sq_index != SIW_INVAL_UOBJ_KEY)
622 kfree(xa_erase(&uctx->xa, qp->xa_sq_index));
623 if (uctx && qp->xa_rq_index != SIW_INVAL_UOBJ_KEY)
624 kfree(xa_erase(&uctx->xa, qp->xa_rq_index));
626 down_write(&qp->state_lock);
628 qp_attrs.state = SIW_QP_STATE_ERROR;
629 siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
632 siw_cep_put(qp->cep);
635 up_write(&qp->state_lock);
637 kfree(qp->tx_ctx.mpa_crc_hd);
638 kfree(qp->rx_stream.mpa_crc_hd);
640 qp->scq = qp->rcq = NULL;
649 * siw_copy_inline_sgl()
651 * Prepare sgl of inlined data for sending. For userland callers
652 * function checks if given buffer addresses and len's are within
653 * process context bounds.
654 * Data from all provided sge's are copied together into the wqe,
655 * referenced by a single sge.
657 static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
660 struct ib_sge *core_sge = core_wr->sg_list;
661 void *kbuf = &sqe->sge[1];
662 int num_sge = core_wr->num_sge, bytes = 0;
664 sqe->sge[0].laddr = (uintptr_t)kbuf;
665 sqe->sge[0].lkey = 0;
668 if (!core_sge->length) {
672 bytes += core_sge->length;
673 if (bytes > SIW_MAX_INLINE) {
677 memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
680 kbuf += core_sge->length;
683 sqe->sge[0].length = bytes > 0 ? bytes : 0;
684 sqe->num_sge = bytes > 0 ? 1 : 0;
692 * Post a list of S-WR's to a SQ.
694 * @base_qp: Base QP contained in siw QP
695 * @wr: Null terminated list of user WR's
696 * @bad_wr: Points to failing WR in case of synchronous failure.
698 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
699 const struct ib_send_wr **bad_wr)
701 struct siw_qp *qp = to_siw_qp(base_qp);
702 struct siw_wqe *wqe = tx_wqe(qp);
708 * Try to acquire QP state lock. Must be non-blocking
709 * to accommodate kernel clients needs.
711 if (!down_read_trylock(&qp->state_lock)) {
713 siw_dbg_qp(qp, "QP locked, state %d\n", qp->attrs.state);
716 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
717 up_read(&qp->state_lock);
719 siw_dbg_qp(qp, "QP out of state %d\n", qp->attrs.state);
722 if (wr && !qp->kernel_verbs) {
723 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
724 up_read(&qp->state_lock);
728 spin_lock_irqsave(&qp->sq_lock, flags);
731 u32 idx = qp->sq_put % qp->attrs.sq_size;
732 struct siw_sqe *sqe = &qp->sendq[idx];
735 siw_dbg_qp(qp, "sq full\n");
739 if (wr->num_sge > qp->attrs.sq_max_sges) {
740 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
746 if ((wr->send_flags & IB_SEND_SIGNALED) ||
747 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
748 sqe->flags |= SIW_WQE_SIGNALLED;
750 if (wr->send_flags & IB_SEND_FENCE)
751 sqe->flags |= SIW_WQE_READ_FENCE;
753 switch (wr->opcode) {
755 case IB_WR_SEND_WITH_INV:
756 if (wr->send_flags & IB_SEND_SOLICITED)
757 sqe->flags |= SIW_WQE_SOLICITED;
759 if (!(wr->send_flags & IB_SEND_INLINE)) {
760 siw_copy_sgl(wr->sg_list, sqe->sge,
762 sqe->num_sge = wr->num_sge;
764 rv = siw_copy_inline_sgl(wr, sqe);
769 sqe->flags |= SIW_WQE_INLINE;
772 if (wr->opcode == IB_WR_SEND)
773 sqe->opcode = SIW_OP_SEND;
775 sqe->opcode = SIW_OP_SEND_REMOTE_INV;
776 sqe->rkey = wr->ex.invalidate_rkey;
780 case IB_WR_RDMA_READ_WITH_INV:
781 case IB_WR_RDMA_READ:
783 * iWarp restricts RREAD sink to SGL containing
784 * 1 SGE only. we could relax to SGL with multiple
785 * elements referring the SAME ltag or even sending
786 * a private per-rreq tag referring to a checked
787 * local sgl with MULTIPLE ltag's.
789 if (unlikely(wr->num_sge != 1)) {
793 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
795 * NOTE: zero length RREAD is allowed!
797 sqe->raddr = rdma_wr(wr)->remote_addr;
798 sqe->rkey = rdma_wr(wr)->rkey;
801 if (wr->opcode == IB_WR_RDMA_READ)
802 sqe->opcode = SIW_OP_READ;
804 sqe->opcode = SIW_OP_READ_LOCAL_INV;
807 case IB_WR_RDMA_WRITE:
808 if (!(wr->send_flags & IB_SEND_INLINE)) {
809 siw_copy_sgl(wr->sg_list, &sqe->sge[0],
811 sqe->num_sge = wr->num_sge;
813 rv = siw_copy_inline_sgl(wr, sqe);
814 if (unlikely(rv < 0)) {
818 sqe->flags |= SIW_WQE_INLINE;
821 sqe->raddr = rdma_wr(wr)->remote_addr;
822 sqe->rkey = rdma_wr(wr)->rkey;
823 sqe->opcode = SIW_OP_WRITE;
827 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
828 sqe->rkey = reg_wr(wr)->key;
829 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
830 sqe->opcode = SIW_OP_REG_MR;
833 case IB_WR_LOCAL_INV:
834 sqe->rkey = wr->ex.invalidate_rkey;
835 sqe->opcode = SIW_OP_INVAL_STAG;
839 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
844 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
845 sqe->opcode, sqe->flags,
846 (void *)(uintptr_t)sqe->id);
848 if (unlikely(rv < 0))
851 /* make SQE only valid after completely written */
853 sqe->flags |= SIW_WQE_VALID;
860 * Send directly if SQ processing is not in progress.
861 * Eventual immediate errors (rv < 0) do not affect the involved
862 * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
863 * processing, if new work is already pending. But rv must be passed
866 if (wqe->wr_status != SIW_WR_IDLE) {
867 spin_unlock_irqrestore(&qp->sq_lock, flags);
868 goto skip_direct_sending;
870 rv = siw_activate_tx(qp);
871 spin_unlock_irqrestore(&qp->sq_lock, flags);
874 goto skip_direct_sending;
876 if (qp->kernel_verbs) {
877 rv = siw_sq_start(qp);
879 qp->tx_ctx.in_syscall = 1;
881 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
882 siw_qp_cm_drop(qp, 0);
884 qp->tx_ctx.in_syscall = 0;
888 up_read(&qp->state_lock);
895 siw_dbg_qp(qp, "error %d\n", rv);
904 * Post a list of R-WR's to a RQ.
906 * @base_qp: Base QP contained in siw QP
907 * @wr: Null terminated list of user WR's
908 * @bad_wr: Points to failing WR in case of synchronous failure.
910 int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
911 const struct ib_recv_wr **bad_wr)
913 struct siw_qp *qp = to_siw_qp(base_qp);
919 return -EOPNOTSUPP; /* what else from errno.h? */
922 * Try to acquire QP state lock. Must be non-blocking
923 * to accommodate kernel clients needs.
925 if (!down_read_trylock(&qp->state_lock)) {
929 if (!qp->kernel_verbs) {
930 siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
931 up_read(&qp->state_lock);
935 if (qp->attrs.state > SIW_QP_STATE_RTS) {
936 up_read(&qp->state_lock);
941 * Serialize potentially multiple producers.
942 * Not needed for single threaded consumer side.
944 spin_lock_irqsave(&qp->rq_lock, flags);
947 u32 idx = qp->rq_put % qp->attrs.rq_size;
948 struct siw_rqe *rqe = &qp->recvq[idx];
951 siw_dbg_qp(qp, "RQ full\n");
955 if (wr->num_sge > qp->attrs.rq_max_sges) {
956 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
961 rqe->num_sge = wr->num_sge;
962 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
964 /* make sure RQE is completely written before valid */
967 rqe->flags = SIW_WQE_VALID;
972 spin_unlock_irqrestore(&qp->rq_lock, flags);
974 up_read(&qp->state_lock);
977 siw_dbg_qp(qp, "error %d\n", rv);
980 return rv > 0 ? 0 : rv;
983 void siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
985 struct siw_cq *cq = to_siw_cq(base_cq);
986 struct siw_device *sdev = to_siw_dev(base_cq->device);
987 struct siw_ucontext *ctx =
988 rdma_udata_to_drv_context(udata, struct siw_ucontext,
991 siw_dbg_cq(cq, "free CQ resources\n");
995 if (ctx && cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
996 kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
998 atomic_dec(&sdev->num_cq);
1006 * Populate CQ of requested size
1008 * @base_cq: CQ as allocated by RDMA midlayer
1009 * @attr: Initial CQ attributes
1010 * @udata: relates to user context
1013 int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1014 struct ib_udata *udata)
1016 struct siw_device *sdev = to_siw_dev(base_cq->device);
1017 struct siw_cq *cq = to_siw_cq(base_cq);
1018 int rv, size = attr->cqe;
1020 if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1021 siw_dbg(base_cq->device, "too many CQ's\n");
1025 if (size < 1 || size > sdev->attrs.max_cqe) {
1026 siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1030 size = roundup_pow_of_two(size);
1031 cq->base_cq.cqe = size;
1033 cq->xa_cq_index = SIW_INVAL_UOBJ_KEY;
1036 cq->kernel_verbs = 1;
1037 cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1038 sizeof(struct siw_cq_ctrl));
1040 cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1041 sizeof(struct siw_cq_ctrl));
1043 if (cq->queue == NULL) {
1047 get_random_bytes(&cq->id, 4);
1048 siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1050 spin_lock_init(&cq->lock);
1052 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1055 struct siw_uresp_create_cq uresp = {};
1056 struct siw_ucontext *ctx =
1057 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1061 siw_create_uobj(ctx, cq->queue,
1062 size * sizeof(struct siw_cqe) +
1063 sizeof(struct siw_cq_ctrl));
1064 if (cq->xa_cq_index == SIW_INVAL_UOBJ_KEY) {
1068 uresp.cq_key = cq->xa_cq_index << PAGE_SHIFT;
1069 uresp.cq_id = cq->id;
1070 uresp.num_cqe = size;
1072 if (udata->outlen < sizeof(uresp)) {
1076 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1083 siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1085 if (cq && cq->queue) {
1086 struct siw_ucontext *ctx =
1087 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1089 if (cq->xa_cq_index != SIW_INVAL_UOBJ_KEY)
1090 kfree(xa_erase(&ctx->xa, cq->xa_cq_index));
1093 atomic_dec(&sdev->num_cq);
1101 * Reap CQ entries if available and copy work completion status into
1102 * array of WC's provided by caller. Returns number of reaped CQE's.
1104 * @base_cq: Base CQ contained in siw CQ.
1105 * @num_cqe: Maximum number of CQE's to reap.
1106 * @wc: Array of work completions to be filled by siw.
1108 int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1110 struct siw_cq *cq = to_siw_cq(base_cq);
1113 for (i = 0; i < num_cqe; i++) {
1114 if (!siw_reap_cqe(cq, wc))
1122 * siw_req_notify_cq()
1124 * Request notification for new CQE's added to that CQ.
1126 * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
1127 * event if a WQE with notification flag set enters the CQ
1128 * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
1129 * event if a WQE enters the CQ.
1130 * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
1131 * number of not reaped CQE's regardless of its notification
1132 * type and current or new CQ notification settings.
1134 * @base_cq: Base CQ contained in siw CQ.
1135 * @flags: Requested notification flags.
1137 int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1139 struct siw_cq *cq = to_siw_cq(base_cq);
1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1145 * Enable CQ event for next solicited completion.
1146 * and make it visible to all associated producers.
1148 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1151 * Enable CQ event for any signalled completion.
1152 * and make it visible to all associated producers.
1154 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1156 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1157 return cq->cq_put - cq->cq_get;
1165 * Release Memory Region.
1167 * @base_mr: Base MR contained in siw MR.
1168 * @udata: points to user context, unused.
1170 int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1172 struct siw_mr *mr = to_siw_mr(base_mr);
1173 struct siw_device *sdev = to_siw_dev(base_mr->device);
1175 siw_dbg_mem(mr->mem, "deregister MR\n");
1177 atomic_dec(&sdev->num_mr);
1179 siw_mr_drop_mem(mr);
1188 * Register Memory Region.
1190 * @pd: Protection Domain
1191 * @start: starting address of MR (virtual address)
1193 * @rnic_va: not used by siw
1194 * @rights: MR access rights
1195 * @udata: user buffer to communicate STag and Key.
1197 struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1198 u64 rnic_va, int rights, struct ib_udata *udata)
1200 struct siw_mr *mr = NULL;
1201 struct siw_umem *umem = NULL;
1202 struct siw_ureq_reg_mr ureq;
1203 struct siw_device *sdev = to_siw_dev(pd->device);
1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1208 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1209 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1210 (unsigned long long)len);
1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1213 siw_dbg_pd(pd, "too many mr's\n");
1221 if (mem_limit != RLIM_INFINITY) {
1222 unsigned long num_pages =
1223 (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1224 mem_limit >>= PAGE_SHIFT;
1226 if (num_pages > mem_limit - current->mm->locked_vm) {
1227 siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1228 num_pages, mem_limit,
1229 current->mm->locked_vm);
1234 umem = siw_umem_get(start, len, ib_access_writable(rights));
1237 siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1241 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1246 rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1251 struct siw_uresp_reg_mr uresp = {};
1252 struct siw_mem *mem = mr->mem;
1254 if (udata->inlen < sizeof(ureq)) {
1258 rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1262 mr->base_mr.lkey |= ureq.stag_key;
1263 mr->base_mr.rkey |= ureq.stag_key;
1264 mem->stag |= ureq.stag_key;
1265 uresp.stag = mem->stag;
1267 if (udata->outlen < sizeof(uresp)) {
1271 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1275 mr->mem->stag_valid = 1;
1277 return &mr->base_mr;
1280 atomic_dec(&sdev->num_mr);
1283 siw_mr_drop_mem(mr);
1287 siw_umem_release(umem, false);
1292 struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1293 u32 max_sge, struct ib_udata *udata)
1295 struct siw_device *sdev = to_siw_dev(pd->device);
1296 struct siw_mr *mr = NULL;
1297 struct siw_pbl *pbl = NULL;
1300 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1301 siw_dbg_pd(pd, "too many mr's\n");
1305 if (mr_type != IB_MR_TYPE_MEM_REG) {
1306 siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1310 if (max_sge > SIW_MAX_SGE_PBL) {
1311 siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1315 pbl = siw_pbl_alloc(max_sge);
1318 siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1322 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1327 rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1331 mr->mem->is_pbl = 1;
1333 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1335 return &mr->base_mr;
1338 atomic_dec(&sdev->num_mr);
1344 siw_mr_drop_mem(mr);
1347 siw_dbg_pd(pd, "failed: %d\n", rv);
1352 /* Just used to count number of pages being mapped */
1353 static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1358 int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1359 unsigned int *sg_off)
1361 struct scatterlist *slp;
1362 struct siw_mr *mr = to_siw_mr(base_mr);
1363 struct siw_mem *mem = mr->mem;
1364 struct siw_pbl *pbl = mem->pbl;
1365 struct siw_pble *pble;
1366 unsigned long pbl_size;
1370 siw_dbg_mem(mem, "no PBL allocated\n");
1375 if (pbl->max_buf < num_sle) {
1376 siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1377 mem->pbl->max_buf, num_sle);
1380 for_each_sg(sl, slp, num_sle, i) {
1381 if (sg_dma_len(slp) == 0) {
1382 siw_dbg_mem(mem, "empty SGE\n");
1386 pble->addr = sg_dma_address(slp);
1387 pble->size = sg_dma_len(slp);
1389 pbl_size = pble->size;
1392 /* Merge PBL entries if adjacent */
1393 if (pble->addr + pble->size == sg_dma_address(slp)) {
1394 pble->size += sg_dma_len(slp);
1398 pble->addr = sg_dma_address(slp);
1399 pble->size = sg_dma_len(slp);
1400 pble->pbl_off = pbl_size;
1402 pbl_size += sg_dma_len(slp);
1405 "sge[%d], size %u, addr 0x%p, total %lu\n",
1406 i, pble->size, (void *)(uintptr_t)pble->addr,
1409 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1411 mem->len = base_mr->length;
1412 mem->va = base_mr->iova;
1414 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1415 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1424 * Create a (empty) DMA memory region, where no umem is attached.
1426 struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1428 struct siw_device *sdev = to_siw_dev(pd->device);
1429 struct siw_mr *mr = NULL;
1432 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1433 siw_dbg_pd(pd, "too many mr's\n");
1437 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1442 rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1446 mr->mem->stag_valid = 1;
1448 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1450 return &mr->base_mr;
1456 atomic_dec(&sdev->num_mr);
1464 * Create Shared Receive Queue of attributes @init_attrs
1465 * within protection domain given by @pd.
1467 * @base_srq: Base SRQ contained in siw SRQ.
1468 * @init_attrs: SRQ init attributes.
1469 * @udata: points to user context
1471 int siw_create_srq(struct ib_srq *base_srq,
1472 struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1474 struct siw_srq *srq = to_siw_srq(base_srq);
1475 struct ib_srq_attr *attrs = &init_attrs->attr;
1476 struct siw_device *sdev = to_siw_dev(base_srq->device);
1477 struct siw_ucontext *ctx =
1478 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1482 if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1483 siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1487 if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1488 attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1492 srq->max_sge = attrs->max_sge;
1493 srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1494 srq->xa_srq_index = SIW_INVAL_UOBJ_KEY;
1495 srq->limit = attrs->srq_limit;
1499 srq->kernel_verbs = !udata;
1503 vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1505 srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
1507 if (srq->recvq == NULL) {
1512 struct siw_uresp_create_srq uresp = {};
1514 srq->xa_srq_index = siw_create_uobj(
1515 ctx, srq->recvq, srq->num_rqe * sizeof(struct siw_rqe));
1517 if (srq->xa_srq_index == SIW_INVAL_UOBJ_KEY) {
1521 uresp.srq_key = srq->xa_srq_index;
1522 uresp.num_rqe = srq->num_rqe;
1524 if (udata->outlen < sizeof(uresp)) {
1528 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1532 spin_lock_init(&srq->lock);
1534 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1540 if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
1541 kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
1544 atomic_dec(&sdev->num_srq);
1552 * Modify SRQ. The caller may resize SRQ and/or set/reset notification
1553 * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification.
1555 * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE
1556 * parameter. siw_modify_srq() does not check the attrs->max_sge param.
1558 int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1559 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1561 struct siw_srq *srq = to_siw_srq(base_srq);
1562 unsigned long flags;
1565 spin_lock_irqsave(&srq->lock, flags);
1567 if (attr_mask & IB_SRQ_MAX_WR) {
1568 /* resize request not yet supported */
1572 if (attr_mask & IB_SRQ_LIMIT) {
1573 if (attrs->srq_limit) {
1574 if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1582 srq->limit = attrs->srq_limit;
1585 spin_unlock_irqrestore(&srq->lock, flags);
1593 * Query SRQ attributes.
1595 int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1597 struct siw_srq *srq = to_siw_srq(base_srq);
1598 unsigned long flags;
1600 spin_lock_irqsave(&srq->lock, flags);
1602 attrs->max_wr = srq->num_rqe;
1603 attrs->max_sge = srq->max_sge;
1604 attrs->srq_limit = srq->limit;
1606 spin_unlock_irqrestore(&srq->lock, flags);
1615 * It is assumed that the SRQ is not referenced by any
1616 * QP anymore - the code trusts the RDMA core environment to keep track
1619 void siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
1621 struct siw_srq *srq = to_siw_srq(base_srq);
1622 struct siw_device *sdev = to_siw_dev(base_srq->device);
1623 struct siw_ucontext *ctx =
1624 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1627 if (ctx && srq->xa_srq_index != SIW_INVAL_UOBJ_KEY)
1628 kfree(xa_erase(&ctx->xa, srq->xa_srq_index));
1631 atomic_dec(&sdev->num_srq);
1635 * siw_post_srq_recv()
1637 * Post a list of receive queue elements to SRQ.
1638 * NOTE: The function does not check or lock a certain SRQ state
1639 * during the post operation. The code simply trusts the
1640 * RDMA core environment.
1642 * @base_srq: Base SRQ contained in siw SRQ
1643 * @wr: List of R-WR's
1644 * @bad_wr: Updated to failing WR if posting fails.
1646 int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1647 const struct ib_recv_wr **bad_wr)
1649 struct siw_srq *srq = to_siw_srq(base_srq);
1650 unsigned long flags;
1653 if (unlikely(!srq->kernel_verbs)) {
1654 siw_dbg_pd(base_srq->pd,
1655 "[SRQ]: no kernel post_recv for mapped srq\n");
1660 * Serialize potentially multiple producers.
1661 * Also needed to serialize potentially multiple
1664 spin_lock_irqsave(&srq->lock, flags);
1667 u32 idx = srq->rq_put % srq->num_rqe;
1668 struct siw_rqe *rqe = &srq->recvq[idx];
1671 siw_dbg_pd(base_srq->pd, "SRQ full\n");
1675 if (unlikely(wr->num_sge > srq->max_sge)) {
1676 siw_dbg_pd(base_srq->pd,
1677 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1681 rqe->id = wr->wr_id;
1682 rqe->num_sge = wr->num_sge;
1683 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1685 /* Make sure S-RQE is completely written before valid */
1688 rqe->flags = SIW_WQE_VALID;
1693 spin_unlock_irqrestore(&srq->lock, flags);
1695 if (unlikely(rv < 0)) {
1696 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1702 void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1704 struct ib_event event;
1705 struct ib_qp *base_qp = qp->ib_qp;
1708 * Do not report asynchronous errors on QP which gets
1709 * destroyed via verbs interface (siw_destroy_qp())
1711 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1714 event.event = etype;
1715 event.device = base_qp->device;
1716 event.element.qp = base_qp;
1718 if (base_qp->event_handler) {
1719 siw_dbg_qp(qp, "reporting event %d\n", etype);
1720 base_qp->event_handler(&event, base_qp->qp_context);
1724 void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1726 struct ib_event event;
1727 struct ib_cq *base_cq = &cq->base_cq;
1729 event.event = etype;
1730 event.device = base_cq->device;
1731 event.element.cq = base_cq;
1733 if (base_cq->event_handler) {
1734 siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1735 base_cq->event_handler(&event, base_cq->cq_context);
1739 void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1741 struct ib_event event;
1742 struct ib_srq *base_srq = &srq->base_srq;
1744 event.event = etype;
1745 event.device = base_srq->device;
1746 event.element.srq = base_srq;
1748 if (base_srq->event_handler) {
1749 siw_dbg_pd(srq->base_srq.pd,
1750 "reporting SRQ event %d\n", etype);
1751 base_srq->event_handler(&event, base_srq->srq_context);
1755 void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype)
1757 struct ib_event event;
1759 event.event = etype;
1760 event.device = &sdev->base_dev;
1761 event.element.port_num = port;
1763 siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1765 ib_dispatch_event(&event);