1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/random.h>
38 #include <linux/highmem.h>
39 #include <linux/time.h>
40 #include <linux/hugetlb.h>
41 #include <linux/irq.h>
42 #include <asm/byteorder.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/iw_cm.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/uverbs_ioctl.h>
52 * i40iw_query_device - get device attributes
53 * @ibdev: device pointer from stack
54 * @props: returning device attributes
57 static int i40iw_query_device(struct ib_device *ibdev,
58 struct ib_device_attr *props,
59 struct ib_udata *udata)
61 struct i40iw_device *iwdev = to_iwdev(ibdev);
63 if (udata->inlen || udata->outlen)
65 memset(props, 0, sizeof(*props));
66 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
67 props->fw_ver = I40IW_FW_VERSION;
68 props->device_cap_flags = iwdev->device_cap_flags;
69 props->vendor_id = iwdev->ldev->pcidev->vendor;
70 props->vendor_part_id = iwdev->ldev->pcidev->device;
71 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
72 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
73 props->max_qp = iwdev->max_qp - iwdev->used_qps;
74 props->max_qp_wr = I40IW_MAX_QP_WRS;
75 props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
76 props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
77 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
78 props->max_cqe = iwdev->max_cqe;
79 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
80 props->max_pd = iwdev->max_pd - iwdev->used_pds;
81 props->max_sge_rd = I40IW_MAX_SGE_RD;
82 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
83 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
84 props->atomic_cap = IB_ATOMIC_NONE;
85 props->max_map_per_fmr = 1;
86 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
91 * i40iw_query_port - get port attrubutes
92 * @ibdev: device pointer from stack
93 * @port: port number for query
94 * @props: returning device attributes
96 static int i40iw_query_port(struct ib_device *ibdev,
98 struct ib_port_attr *props)
100 struct i40iw_device *iwdev = to_iwdev(ibdev);
101 struct net_device *netdev = iwdev->netdev;
103 /* props being zeroed by the caller, avoid zeroing it here */
104 props->max_mtu = IB_MTU_4096;
105 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
108 if (netif_carrier_ok(iwdev->netdev))
109 props->state = IB_PORT_ACTIVE;
111 props->state = IB_PORT_DOWN;
112 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
113 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
114 props->gid_tbl_len = 1;
115 props->pkey_tbl_len = 1;
116 props->active_width = IB_WIDTH_4X;
117 props->active_speed = 1;
118 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
123 * i40iw_alloc_ucontext - Allocate the user context data structure
124 * @uctx: Uverbs context pointer from stack
127 * This keeps track of all objects associated with a particular
130 static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
131 struct ib_udata *udata)
133 struct ib_device *ibdev = uctx->device;
134 struct i40iw_device *iwdev = to_iwdev(ibdev);
135 struct i40iw_alloc_ucontext_req req;
136 struct i40iw_alloc_ucontext_resp uresp = {};
137 struct i40iw_ucontext *ucontext = to_ucontext(uctx);
139 if (ib_copy_from_udata(&req, udata, sizeof(req)))
142 if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
143 i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
147 uresp.max_qps = iwdev->max_qp;
148 uresp.max_pds = iwdev->max_pd;
149 uresp.wq_size = iwdev->max_qp_wr * 2;
150 uresp.kernel_ver = req.userspace_ver;
152 ucontext->iwdev = iwdev;
153 ucontext->abi_ver = req.userspace_ver;
155 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
158 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
159 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
160 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
161 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
167 * i40iw_dealloc_ucontext - deallocate the user context data structure
168 * @context: user context created during alloc
170 static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
176 * i40iw_mmap - user memory map
177 * @context: context created during alloc
178 * @vma: kernel info for user memory map
180 static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
182 struct i40iw_ucontext *ucontext;
186 ucontext = to_ucontext(context);
187 if (ucontext->iwdev->sc_dev.is_pf) {
188 db_addr_offset = I40IW_DB_ADDR_OFFSET;
189 push_offset = I40IW_PUSH_OFFSET;
191 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
193 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
194 push_offset = I40IW_VF_PUSH_OFFSET;
196 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
199 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
201 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
202 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
203 vma->vm_private_data = ucontext;
205 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
206 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
208 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
211 if (io_remap_pfn_range(vma, vma->vm_start,
212 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
213 PAGE_SIZE, vma->vm_page_prot))
220 * i40iw_alloc_push_page - allocate a push page for qp
221 * @iwdev: iwarp device
222 * @qp: hardware control qp
224 static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
226 struct i40iw_cqp_request *cqp_request;
227 struct cqp_commands_info *cqp_info;
228 enum i40iw_status_code status;
230 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
233 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
237 atomic_inc(&cqp_request->refcount);
239 cqp_info = &cqp_request->info;
240 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
241 cqp_info->post_sq = 1;
243 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
244 cqp_info->in.u.manage_push_page.info.free_page = 0;
245 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
246 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
248 status = i40iw_handle_cqp_op(iwdev, cqp_request);
250 qp->push_idx = cqp_request->compl_info.op_ret_val;
252 i40iw_pr_err("CQP-OP Push page fail");
253 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
257 * i40iw_dealloc_push_page - free a push page for qp
258 * @iwdev: iwarp device
259 * @qp: hardware control qp
261 static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
263 struct i40iw_cqp_request *cqp_request;
264 struct cqp_commands_info *cqp_info;
265 enum i40iw_status_code status;
267 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
270 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
274 cqp_info = &cqp_request->info;
275 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
276 cqp_info->post_sq = 1;
278 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
279 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
280 cqp_info->in.u.manage_push_page.info.free_page = 1;
281 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
282 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
284 status = i40iw_handle_cqp_op(iwdev, cqp_request);
286 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
288 i40iw_pr_err("CQP-OP Push page fail");
292 * i40iw_alloc_pd - allocate protection domain
296 static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
298 struct i40iw_pd *iwpd = to_iwpd(pd);
299 struct i40iw_device *iwdev = to_iwdev(pd->device);
300 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
301 struct i40iw_alloc_pd_resp uresp;
302 struct i40iw_sc_pd *sc_pd;
309 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
310 iwdev->max_pd, &pd_id, &iwdev->next_pd);
312 i40iw_pr_err("alloc resource failed\n");
316 sc_pd = &iwpd->sc_pd;
319 struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
320 udata, struct i40iw_ucontext, ibucontext);
321 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
322 memset(&uresp, 0, sizeof(uresp));
324 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
329 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
332 i40iw_add_pdusecount(iwpd);
336 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
341 * i40iw_dealloc_pd - deallocate pd
342 * @ibpd: ptr of pd to be deallocated
343 * @udata: user data or null for kernel object
345 static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
347 struct i40iw_pd *iwpd = to_iwpd(ibpd);
348 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
350 i40iw_rem_pdusecount(iwpd, iwdev);
354 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
356 * @va: user virtual address
357 * @pbl_list: pbl list to search in (QP's or CQ's)
359 static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
360 struct list_head *pbl_list)
362 struct i40iw_pbl *iwpbl;
364 list_for_each_entry(iwpbl, pbl_list, list) {
365 if (iwpbl->user_base == va) {
366 iwpbl->on_list = false;
367 list_del(&iwpbl->list);
375 * i40iw_free_qp_resources - free up memory resources for qp
376 * @iwdev: iwarp device
377 * @iwqp: qp ptr (user or kernel)
378 * @qp_num: qp number assigned
380 void i40iw_free_qp_resources(struct i40iw_device *iwdev,
381 struct i40iw_qp *iwqp,
384 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
386 i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
387 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
389 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
390 if (iwpbl->pbl_allocated)
391 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
392 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
393 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
394 kfree(iwqp->kqp.wrid_mem);
395 iwqp->kqp.wrid_mem = NULL;
396 kfree(iwqp->allocated_buffer);
400 * i40iw_clean_cqes - clean cq entries for qp
401 * @iwqp: qp ptr (user or kernel)
404 static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
406 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
408 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
412 * i40iw_destroy_qp - destroy qp
413 * @ibqp: qp's ib pointer also to get to device's qp address
415 static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
417 struct i40iw_qp *iwqp = to_iwqp(ibqp);
421 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
422 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
424 if (!iwqp->user_mode) {
426 i40iw_clean_cqes(iwqp, iwqp->iwscq);
427 if (iwqp->iwrcq != iwqp->iwscq)
428 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
432 i40iw_rem_ref(&iwqp->ibqp);
437 * i40iw_setup_virt_qp - setup for allocation of virtual qp
440 * @init_info: initialize info to return
442 static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
443 struct i40iw_qp *iwqp,
444 struct i40iw_qp_init_info *init_info)
446 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
447 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
449 iwqp->page = qpmr->sq_page;
450 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
451 if (iwpbl->pbl_allocated) {
452 init_info->virtual_map = true;
453 init_info->sq_pa = qpmr->sq_pbl.idx;
454 init_info->rq_pa = qpmr->rq_pbl.idx;
456 init_info->sq_pa = qpmr->sq_pbl.addr;
457 init_info->rq_pa = qpmr->rq_pbl.addr;
463 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
464 * @iwdev: iwarp device
465 * @iwqp: qp ptr (user or kernel)
466 * @info: initialize info to return
468 static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
469 struct i40iw_qp *iwqp,
470 struct i40iw_qp_init_info *info)
472 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
473 u32 sqdepth, rqdepth;
476 enum i40iw_status_code status;
477 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
479 i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
480 status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
484 status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
488 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
489 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
491 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
492 if (!ukinfo->sq_wrtrk_array)
495 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
497 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
498 size += (I40IW_SHADOW_AREA_SIZE << 3);
500 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
502 kfree(ukinfo->sq_wrtrk_array);
503 ukinfo->sq_wrtrk_array = NULL;
507 ukinfo->sq = mem->va;
508 info->sq_pa = mem->pa;
510 ukinfo->rq = &ukinfo->sq[sqdepth];
511 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
513 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
514 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
516 ukinfo->sq_size = sqdepth >> sqshift;
517 ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
518 ukinfo->qp_id = iwqp->ibqp.qp_num;
523 * i40iw_create_qp - create qp
525 * @init_attr: attributes for qp
526 * @udata: user data for create qp
528 static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
529 struct ib_qp_init_attr *init_attr,
530 struct ib_udata *udata)
532 struct i40iw_pd *iwpd = to_iwpd(ibpd);
533 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
534 struct i40iw_cqp *iwcqp = &iwdev->cqp;
535 struct i40iw_qp *iwqp;
536 struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
537 udata, struct i40iw_ucontext, ibucontext);
538 struct i40iw_create_qp_req req;
539 struct i40iw_create_qp_resp uresp;
542 enum i40iw_status_code ret;
546 struct i40iw_sc_qp *qp;
547 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
548 struct i40iw_qp_init_info init_info;
549 struct i40iw_create_qp_info *qp_info;
550 struct i40iw_cqp_request *cqp_request;
551 struct cqp_commands_info *cqp_info;
553 struct i40iw_qp_host_ctx_info *ctx_info;
554 struct i40iwarp_offload_info *iwarp_info;
558 return ERR_PTR(-ENODEV);
560 if (init_attr->create_flags)
561 return ERR_PTR(-EINVAL);
562 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
563 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
565 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
566 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
568 if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
569 init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
571 memset(&init_info, 0, sizeof(init_info));
573 sq_size = init_attr->cap.max_send_wr;
574 rq_size = init_attr->cap.max_recv_wr;
576 init_info.vsi = &iwdev->vsi;
577 init_info.qp_uk_init_info.sq_size = sq_size;
578 init_info.qp_uk_init_info.rq_size = rq_size;
579 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
580 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
581 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
583 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
585 return ERR_PTR(-ENOMEM);
587 iwqp = (struct i40iw_qp *)mem;
588 iwqp->allocated_buffer = mem;
590 qp->back_qp = (void *)iwqp;
591 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
593 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
595 if (i40iw_allocate_dma_mem(dev->hw,
597 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
599 i40iw_pr_err("dma_mem failed\n");
604 init_info.q2 = iwqp->q2_ctx_mem.va;
605 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
607 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
608 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
610 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
611 &qp_num, &iwdev->next_qp);
613 i40iw_pr_err("qp resource\n");
619 iwqp->ibqp.qp_num = qp_num;
621 iwqp->iwscq = to_iwcq(init_attr->send_cq);
622 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
624 iwqp->host_ctx.va = init_info.host_ctx;
625 iwqp->host_ctx.pa = init_info.host_ctx_pa;
626 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
628 init_info.pd = &iwpd->sc_pd;
629 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
630 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
632 if (init_attr->qp_type != IB_QPT_RC) {
636 if (iwdev->push_mode)
637 i40iw_alloc_push_page(iwdev, qp);
639 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
641 i40iw_pr_err("ib_copy_from_data\n");
644 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
647 if (req.user_wqe_buffers) {
648 struct i40iw_pbl *iwpbl;
651 &ucontext->qp_reg_mem_list_lock, flags);
652 iwpbl = i40iw_get_pbl(
653 (unsigned long)req.user_wqe_buffers,
654 &ucontext->qp_reg_mem_list);
655 spin_unlock_irqrestore(
656 &ucontext->qp_reg_mem_list_lock, flags);
660 i40iw_pr_err("no pbl info\n");
663 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
665 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
667 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
671 i40iw_pr_err("setup qp failed\n");
675 init_info.type = I40IW_QP_TYPE_IWARP;
676 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
679 i40iw_pr_err("qp_init fail\n");
682 ctx_info = &iwqp->ctx_info;
683 iwarp_info = &iwqp->iwarp_info;
684 iwarp_info->rd_enable = true;
685 iwarp_info->wr_rdresp_en = true;
686 if (!iwqp->user_mode) {
687 iwarp_info->fast_reg_en = true;
688 iwarp_info->priv_mode_en = true;
690 iwarp_info->ddp_ver = 1;
691 iwarp_info->rdmap_ver = 1;
693 ctx_info->iwarp_info_valid = true;
694 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
695 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
696 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
697 ctx_info->push_mode_en = false;
699 ctx_info->push_mode_en = true;
700 ctx_info->push_idx = qp->push_idx;
703 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
704 (u64 *)iwqp->host_ctx.va,
706 ctx_info->iwarp_info_valid = false;
707 cqp_request = i40iw_get_cqp_request(iwcqp, true);
712 cqp_info = &cqp_request->info;
713 qp_info = &cqp_request->info.in.u.qp_create.info;
715 memset(qp_info, 0, sizeof(*qp_info));
717 qp_info->cq_num_valid = true;
718 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
720 cqp_info->cqp_cmd = OP_QP_CREATE;
721 cqp_info->post_sq = 1;
722 cqp_info->in.u.qp_create.qp = qp;
723 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
724 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
726 i40iw_pr_err("CQP-OP QP create fail");
731 i40iw_add_ref(&iwqp->ibqp);
732 spin_lock_init(&iwqp->lock);
733 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
734 iwdev->qp_table[qp_num] = iwqp;
735 i40iw_add_pdusecount(iwqp->iwpd);
736 i40iw_add_devusecount(iwdev);
738 memset(&uresp, 0, sizeof(uresp));
739 uresp.actual_sq_size = sq_size;
740 uresp.actual_rq_size = rq_size;
741 uresp.qp_id = qp_num;
742 uresp.push_idx = qp->push_idx;
743 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
745 i40iw_pr_err("copy_to_udata failed\n");
746 i40iw_destroy_qp(&iwqp->ibqp, udata);
747 /* let the completion of the qp destroy free the qp */
748 return ERR_PTR(err_code);
751 init_completion(&iwqp->sq_drained);
752 init_completion(&iwqp->rq_drained);
756 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
757 return ERR_PTR(err_code);
761 * i40iw_query - query qp attributes
763 * @attr: attributes pointer
764 * @attr_mask: Not used
765 * @init_attr: qp attributes to return
767 static int i40iw_query_qp(struct ib_qp *ibqp,
768 struct ib_qp_attr *attr,
770 struct ib_qp_init_attr *init_attr)
772 struct i40iw_qp *iwqp = to_iwqp(ibqp);
773 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
775 attr->qp_state = iwqp->ibqp_state;
776 attr->cur_qp_state = attr->qp_state;
777 attr->qp_access_flags = 0;
778 attr->cap.max_send_wr = qp->qp_uk.sq_size;
779 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
780 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
781 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
782 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
784 init_attr->event_handler = iwqp->ibqp.event_handler;
785 init_attr->qp_context = iwqp->ibqp.qp_context;
786 init_attr->send_cq = iwqp->ibqp.send_cq;
787 init_attr->recv_cq = iwqp->ibqp.recv_cq;
788 init_attr->srq = iwqp->ibqp.srq;
789 init_attr->cap = attr->cap;
790 init_attr->port_num = 1;
795 * i40iw_hw_modify_qp - setup cqp for modify qp
796 * @iwdev: iwarp device
797 * @iwqp: qp ptr (user or kernel)
798 * @info: info for modify qp
799 * @wait: flag to wait or not for modify qp completion
801 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
802 struct i40iw_modify_qp_info *info, bool wait)
804 struct i40iw_cqp_request *cqp_request;
805 struct cqp_commands_info *cqp_info;
806 struct i40iw_modify_qp_info *m_info;
807 struct i40iw_gen_ae_info ae_info;
809 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
813 cqp_info = &cqp_request->info;
814 m_info = &cqp_info->in.u.qp_modify.info;
815 memcpy(m_info, info, sizeof(*m_info));
816 cqp_info->cqp_cmd = OP_QP_MODIFY;
817 cqp_info->post_sq = 1;
818 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
819 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
820 if (!i40iw_handle_cqp_op(iwdev, cqp_request))
823 switch (m_info->next_iwarp_state) {
824 case I40IW_QP_STATE_RTS:
825 if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
826 i40iw_send_reset(iwqp->cm_node);
828 case I40IW_QP_STATE_IDLE:
829 case I40IW_QP_STATE_TERMINATE:
830 case I40IW_QP_STATE_CLOSING:
831 ae_info.ae_code = I40IW_AE_BAD_CLOSE;
832 ae_info.ae_source = 0;
833 i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
835 case I40IW_QP_STATE_ERROR:
842 * i40iw_modify_qp - modify qp request
843 * @ibqp: qp's pointer for modify
844 * @attr: access attributes
845 * @attr_mask: state mask
848 int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
849 int attr_mask, struct ib_udata *udata)
851 struct i40iw_qp *iwqp = to_iwqp(ibqp);
852 struct i40iw_device *iwdev = iwqp->iwdev;
853 struct i40iw_qp_host_ctx_info *ctx_info;
854 struct i40iwarp_offload_info *iwarp_info;
855 struct i40iw_modify_qp_info info;
856 u8 issue_modify_qp = 0;
861 memset(&info, 0, sizeof(info));
862 ctx_info = &iwqp->ctx_info;
863 iwarp_info = &iwqp->iwarp_info;
865 spin_lock_irqsave(&iwqp->lock, flags);
867 if (attr_mask & IB_QP_STATE) {
868 if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
873 switch (attr->qp_state) {
876 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
880 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
881 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
886 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
893 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
895 info.next_iwarp_state = I40IW_QP_STATE_RTS;
896 info.tcp_ctx_valid = true;
897 info.ord_valid = true;
898 info.arp_cache_idx_valid = true;
899 info.cq_num_valid = true;
902 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
906 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
907 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
911 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
915 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
919 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
923 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
928 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
932 if (iwqp->sc_qp.term_flags)
933 i40iw_terminate_del_timer(&iwqp->sc_qp);
934 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
935 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
937 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
938 info.reset_tcp_conn = true;
942 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
949 iwqp->ibqp_state = attr->qp_state;
952 if (attr_mask & IB_QP_ACCESS_FLAGS) {
953 ctx_info->iwarp_info_valid = true;
954 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
955 iwarp_info->wr_rdresp_en = true;
956 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
957 iwarp_info->wr_rdresp_en = true;
958 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
959 iwarp_info->rd_enable = true;
960 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
961 iwarp_info->bind_en = true;
963 if (iwqp->user_mode) {
964 iwarp_info->rd_enable = true;
965 iwarp_info->wr_rdresp_en = true;
966 iwarp_info->priv_mode_en = false;
970 if (ctx_info->iwarp_info_valid) {
971 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
974 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
975 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
976 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
977 (u64 *)iwqp->host_ctx.va,
980 i40iw_pr_err("setting QP context\n");
986 spin_unlock_irqrestore(&iwqp->lock, flags);
988 if (issue_modify_qp) {
989 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
991 spin_lock_irqsave(&iwqp->lock, flags);
992 iwqp->iwarp_state = info.next_iwarp_state;
993 spin_unlock_irqrestore(&iwqp->lock, flags);
996 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
998 if (iwqp->cm_id && iwqp->hw_tcp_state) {
999 spin_lock_irqsave(&iwqp->lock, flags);
1000 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1001 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1002 spin_unlock_irqrestore(&iwqp->lock, flags);
1003 i40iw_cm_disconn(iwqp);
1006 spin_lock_irqsave(&iwqp->lock, flags);
1008 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1009 iwqp->cm_id->add_ref(iwqp->cm_id);
1010 i40iw_schedule_cm_timer(iwqp->cm_node,
1011 (struct i40iw_puda_buf *)iwqp,
1012 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1015 spin_unlock_irqrestore(&iwqp->lock, flags);
1020 spin_unlock_irqrestore(&iwqp->lock, flags);
1025 * cq_free_resources - free up recources for cq
1026 * @iwdev: iwarp device
1029 static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1031 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1033 if (!iwcq->user_mode)
1034 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1035 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1039 * i40iw_cq_wq_destroy - send cq destroy cqp
1040 * @iwdev: iwarp device
1041 * @cq: hardware control cq
1043 void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1045 enum i40iw_status_code status;
1046 struct i40iw_cqp_request *cqp_request;
1047 struct cqp_commands_info *cqp_info;
1049 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1053 cqp_info = &cqp_request->info;
1055 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1056 cqp_info->post_sq = 1;
1057 cqp_info->in.u.cq_destroy.cq = cq;
1058 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1059 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1061 i40iw_pr_err("CQP-OP Destroy QP fail");
1065 * i40iw_destroy_cq - destroy cq
1066 * @ib_cq: cq pointer
1067 * @udata: user data or NULL for kernel object
1069 static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1071 struct i40iw_cq *iwcq;
1072 struct i40iw_device *iwdev;
1073 struct i40iw_sc_cq *cq;
1075 iwcq = to_iwcq(ib_cq);
1076 iwdev = to_iwdev(ib_cq->device);
1078 i40iw_cq_wq_destroy(iwdev, cq);
1079 cq_free_resources(iwdev, iwcq);
1080 i40iw_rem_devusecount(iwdev);
1084 * i40iw_create_cq - create cq
1085 * @ibcq: CQ allocated
1086 * @attr: attributes for cq
1089 static int i40iw_create_cq(struct ib_cq *ibcq,
1090 const struct ib_cq_init_attr *attr,
1091 struct ib_udata *udata)
1093 struct ib_device *ibdev = ibcq->device;
1094 struct i40iw_device *iwdev = to_iwdev(ibdev);
1095 struct i40iw_cq *iwcq = to_iwcq(ibcq);
1096 struct i40iw_pbl *iwpbl;
1098 struct i40iw_sc_cq *cq;
1099 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1100 struct i40iw_cq_init_info info = {};
1101 enum i40iw_status_code status;
1102 struct i40iw_cqp_request *cqp_request;
1103 struct cqp_commands_info *cqp_info;
1104 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1105 unsigned long flags;
1107 int entries = attr->cqe;
1112 if (entries > iwdev->max_cqe)
1115 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1116 iwdev->max_cq, &cq_num,
1122 cq->back_cq = (void *)iwcq;
1123 spin_lock_init(&iwcq->lock);
1126 ukinfo->cq_size = max(entries, 4);
1127 ukinfo->cq_id = cq_num;
1128 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1130 if (attr->comp_vector < iwdev->ceqs_count)
1131 info.ceq_id = attr->comp_vector;
1132 info.ceq_id_valid = true;
1134 info.type = I40IW_CQ_TYPE_IWARP;
1136 struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1137 udata, struct i40iw_ucontext, ibucontext);
1138 struct i40iw_create_cq_req req;
1139 struct i40iw_cq_mr *cqmr;
1141 memset(&req, 0, sizeof(req));
1142 iwcq->user_mode = true;
1143 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1145 goto cq_free_resources;
1148 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1149 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1150 &ucontext->cq_reg_mem_list);
1151 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1154 goto cq_free_resources;
1157 iwcq->iwpbl = iwpbl;
1158 iwcq->cq_mem_size = 0;
1159 cqmr = &iwpbl->cq_mr;
1160 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1161 if (iwpbl->pbl_allocated) {
1162 info.virtual_map = true;
1163 info.pbl_chunk_size = 1;
1164 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1166 info.cq_base_pa = cqmr->cq_pbl.addr;
1169 /* Kmode allocations */
1173 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1174 rsize = round_up(rsize, 256);
1175 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1176 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1177 rsize + shadow, 256);
1180 goto cq_free_resources;
1182 ukinfo->cq_base = iwcq->kmem.va;
1183 info.cq_base_pa = iwcq->kmem.pa;
1184 info.shadow_area_pa = info.cq_base_pa + rsize;
1185 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1188 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1189 i40iw_pr_err("init cq fail\n");
1191 goto cq_free_resources;
1194 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1197 goto cq_free_resources;
1200 cqp_info = &cqp_request->info;
1201 cqp_info->cqp_cmd = OP_CQ_CREATE;
1202 cqp_info->post_sq = 1;
1203 cqp_info->in.u.cq_create.cq = cq;
1204 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1205 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1207 i40iw_pr_err("CQP-OP Create QP fail");
1209 goto cq_free_resources;
1213 struct i40iw_create_cq_resp resp;
1215 memset(&resp, 0, sizeof(resp));
1216 resp.cq_id = info.cq_uk_init_info.cq_id;
1217 resp.cq_size = info.cq_uk_init_info.cq_size;
1218 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1219 i40iw_pr_err("copy to user data\n");
1225 i40iw_add_devusecount(iwdev);
1229 i40iw_cq_wq_destroy(iwdev, cq);
1231 cq_free_resources(iwdev, iwcq);
1236 * i40iw_get_user_access - get hw access from IB access
1237 * @acc: IB access to return hw access
1239 static inline u16 i40iw_get_user_access(int acc)
1243 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1244 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1245 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1246 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1251 * i40iw_free_stag - free stag resource
1252 * @iwdev: iwarp device
1253 * @stag: stag to free
1255 static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1259 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1260 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1261 i40iw_rem_devusecount(iwdev);
1265 * i40iw_create_stag - create random stag
1266 * @iwdev: iwarp device
1268 static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1272 u32 next_stag_index;
1278 get_random_bytes(&random, sizeof(random));
1279 consumer_key = (u8)random;
1281 driver_key = random & ~iwdev->mr_stagmask;
1282 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1283 next_stag_index %= iwdev->max_mr;
1285 ret = i40iw_alloc_resource(iwdev,
1286 iwdev->allocated_mrs, iwdev->max_mr,
1287 &stag_index, &next_stag_index);
1289 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1291 stag += (u32)consumer_key;
1292 i40iw_add_devusecount(iwdev);
1298 * i40iw_next_pbl_addr - Get next pbl address
1299 * @pbl: pointer to a pble
1300 * @pinfo: info pointer
1303 static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
1304 struct i40iw_pble_info **pinfo,
1308 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1312 return (u64 *)(*pinfo)->addr;
1316 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1317 * @iwmr: iwmr for IB's user page addresses
1318 * @pbl: ple pointer to save 1 level or 0 level pble
1319 * @level: indicated level 0, 1 or 2
1321 static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1323 enum i40iw_pble_level level)
1325 struct ib_umem *region = iwmr->region;
1326 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1327 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1328 struct i40iw_pble_info *pinfo;
1329 struct ib_block_iter biter;
1332 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
1334 if (iwmr->type == IW_MEMREG_TYPE_QP)
1335 iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
1337 rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
1339 *pbl = rdma_block_iter_dma_address(&biter);
1340 pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1345 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1346 * @arr: lvl1 pbl array
1347 * @npages: page count
1348 * pg_size: page size
1351 static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1355 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1356 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1363 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1364 * @palloc: pbl allocation struct
1365 * pg_size: page size
1367 static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1369 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1370 struct i40iw_pble_info *leaf = lvl2->leaf;
1372 u64 *start_addr = NULL;
1376 if (palloc->level == I40IW_LEVEL_1) {
1377 arr = (u64 *)palloc->level1.addr;
1378 ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1382 start_addr = (u64 *)leaf->addr;
1384 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1385 arr = (u64 *)leaf->addr;
1386 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1388 ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1397 * i40iw_setup_pbles - copy user pg address to pble's
1398 * @iwdev: iwarp device
1399 * @iwmr: mr pointer for this memory registration
1400 * @use_pbles: flag if to use pble's
1402 static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1403 struct i40iw_mr *iwmr,
1406 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1407 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1408 struct i40iw_pble_info *pinfo;
1410 enum i40iw_status_code status;
1411 enum i40iw_pble_level level = I40IW_LEVEL_1;
1414 mutex_lock(&iwdev->pbl_mutex);
1415 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1416 mutex_unlock(&iwdev->pbl_mutex);
1420 iwpbl->pbl_allocated = true;
1421 level = palloc->level;
1422 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1423 pbl = (u64 *)pinfo->addr;
1425 pbl = iwmr->pgaddrmem;
1428 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
1431 iwmr->pgaddrmem[0] = *pbl;
1437 * i40iw_handle_q_mem - handle memory for qp and cq
1438 * @iwdev: iwarp device
1439 * @req: information for q memory management
1440 * @iwpbl: pble struct
1441 * @use_pbles: flag to use pble
1443 static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1444 struct i40iw_mem_reg_req *req,
1445 struct i40iw_pbl *iwpbl,
1448 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1449 struct i40iw_mr *iwmr = iwpbl->iwmr;
1450 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1451 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1452 struct i40iw_hmc_pble *hmc_p;
1453 u64 *arr = iwmr->pgaddrmem;
1459 total = req->sq_pages + req->rq_pages + req->cq_pages;
1460 pg_size = iwmr->page_size;
1462 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1466 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1467 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1468 iwpbl->pbl_allocated = false;
1473 arr = (u64 *)palloc->level1.addr;
1475 if (iwmr->type == IW_MEMREG_TYPE_QP) {
1476 hmc_p = &qpmr->sq_pbl;
1477 qpmr->shadow = (dma_addr_t)arr[total];
1480 ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1482 ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1486 hmc_p->idx = palloc->level1.idx;
1487 hmc_p = &qpmr->rq_pbl;
1488 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1490 hmc_p->addr = arr[0];
1491 hmc_p = &qpmr->rq_pbl;
1492 hmc_p->addr = arr[req->sq_pages];
1495 hmc_p = &cqmr->cq_pbl;
1496 cqmr->shadow = (dma_addr_t)arr[total];
1499 ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1502 hmc_p->idx = palloc->level1.idx;
1504 hmc_p->addr = arr[0];
1507 if (use_pbles && ret) {
1508 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1509 iwpbl->pbl_allocated = false;
1516 * i40iw_hw_alloc_stag - cqp command to allocate stag
1517 * @iwdev: iwarp device
1518 * @iwmr: iwarp mr pointer
1520 static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1522 struct i40iw_allocate_stag_info *info;
1523 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1524 enum i40iw_status_code status;
1526 struct i40iw_cqp_request *cqp_request;
1527 struct cqp_commands_info *cqp_info;
1529 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1533 cqp_info = &cqp_request->info;
1534 info = &cqp_info->in.u.alloc_stag.info;
1535 memset(info, 0, sizeof(*info));
1536 info->page_size = PAGE_SIZE;
1537 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1538 info->pd_id = iwpd->sc_pd.pd_id;
1539 info->total_len = iwmr->length;
1540 info->remote_access = true;
1541 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1542 cqp_info->post_sq = 1;
1543 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1544 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1546 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1549 i40iw_pr_err("CQP-OP MR Reg fail");
1555 * i40iw_alloc_mr - register stag for fast memory registration
1557 * @mr_type: memory for stag registrion
1558 * @max_num_sg: man number of pages
1559 * @udata: user data or NULL for kernel objects
1561 static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1562 u32 max_num_sg, struct ib_udata *udata)
1564 struct i40iw_pd *iwpd = to_iwpd(pd);
1565 struct i40iw_device *iwdev = to_iwdev(pd->device);
1566 struct i40iw_pble_alloc *palloc;
1567 struct i40iw_pbl *iwpbl;
1568 struct i40iw_mr *iwmr;
1569 enum i40iw_status_code status;
1571 int err_code = -ENOMEM;
1573 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1575 return ERR_PTR(-ENOMEM);
1577 stag = i40iw_create_stag(iwdev);
1579 err_code = -EOVERFLOW;
1582 stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
1584 iwmr->ibmr.rkey = stag;
1585 iwmr->ibmr.lkey = stag;
1587 iwmr->ibmr.device = pd->device;
1588 iwpbl = &iwmr->iwpbl;
1590 iwmr->type = IW_MEMREG_TYPE_MEM;
1591 palloc = &iwpbl->pble_alloc;
1592 iwmr->page_cnt = max_num_sg;
1593 mutex_lock(&iwdev->pbl_mutex);
1594 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1595 mutex_unlock(&iwdev->pbl_mutex);
1599 if (palloc->level != I40IW_LEVEL_1)
1601 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1604 iwpbl->pbl_allocated = true;
1605 i40iw_add_pdusecount(iwpd);
1608 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1610 i40iw_free_stag(iwdev, stag);
1613 return ERR_PTR(err_code);
1617 * i40iw_set_page - populate pbl list for fmr
1618 * @ibmr: ib mem to access iwarp mr pointer
1619 * @addr: page dma address fro pbl list
1621 static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1623 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1624 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1625 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1628 if (unlikely(iwmr->npages == iwmr->page_cnt))
1631 pbl = (u64 *)palloc->level1.addr;
1632 pbl[iwmr->npages++] = cpu_to_le64(addr);
1637 * i40iw_map_mr_sg - map of sg list for fmr
1638 * @ibmr: ib mem to access iwarp mr pointer
1639 * @sg: scatter gather list for fmr
1640 * @sg_nents: number of sg pages
1642 static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1643 int sg_nents, unsigned int *sg_offset)
1645 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1648 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
1652 * i40iw_drain_sq - drain the send queue
1653 * @ibqp: ib qp pointer
1655 static void i40iw_drain_sq(struct ib_qp *ibqp)
1657 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1658 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1660 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1661 wait_for_completion(&iwqp->sq_drained);
1665 * i40iw_drain_rq - drain the receive queue
1666 * @ibqp: ib qp pointer
1668 static void i40iw_drain_rq(struct ib_qp *ibqp)
1670 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1671 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1673 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1674 wait_for_completion(&iwqp->rq_drained);
1678 * i40iw_hwreg_mr - send cqp command for memory registration
1679 * @iwdev: iwarp device
1680 * @iwmr: iwarp mr pointer
1681 * @access: access for MR
1683 static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1684 struct i40iw_mr *iwmr,
1687 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1688 struct i40iw_reg_ns_stag_info *stag_info;
1689 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1690 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1691 enum i40iw_status_code status;
1693 struct i40iw_cqp_request *cqp_request;
1694 struct cqp_commands_info *cqp_info;
1696 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1700 cqp_info = &cqp_request->info;
1701 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1702 memset(stag_info, 0, sizeof(*stag_info));
1703 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1704 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1705 stag_info->stag_key = (u8)iwmr->stag;
1706 stag_info->total_len = iwmr->length;
1707 stag_info->access_rights = access;
1708 stag_info->pd_id = iwpd->sc_pd.pd_id;
1709 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
1710 stag_info->page_size = iwmr->page_size;
1712 if (iwpbl->pbl_allocated) {
1713 if (palloc->level == I40IW_LEVEL_1) {
1714 stag_info->first_pm_pbl_index = palloc->level1.idx;
1715 stag_info->chunk_size = 1;
1717 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1718 stag_info->chunk_size = 3;
1721 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1724 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1725 cqp_info->post_sq = 1;
1726 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1727 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1729 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1732 i40iw_pr_err("CQP-OP MR Reg fail");
1738 * i40iw_reg_user_mr - Register a user memory region
1740 * @start: virtual start address
1741 * @length: length of mr
1742 * @virt: virtual address
1743 * @acc: access of mr
1746 static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1751 struct ib_udata *udata)
1753 struct i40iw_pd *iwpd = to_iwpd(pd);
1754 struct i40iw_device *iwdev = to_iwdev(pd->device);
1755 struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1756 udata, struct i40iw_ucontext, ibucontext);
1757 struct i40iw_pble_alloc *palloc;
1758 struct i40iw_pbl *iwpbl;
1759 struct i40iw_mr *iwmr;
1760 struct ib_umem *region;
1761 struct i40iw_mem_reg_req req;
1766 bool use_pbles = false;
1767 unsigned long flags;
1773 return ERR_PTR(-ENODEV);
1775 if (length > I40IW_MAX_MR_SIZE)
1776 return ERR_PTR(-EINVAL);
1777 region = ib_umem_get(udata, start, length, acc, 0);
1779 return (struct ib_mr *)region;
1781 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1782 ib_umem_release(region);
1783 return ERR_PTR(-EFAULT);
1786 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1788 ib_umem_release(region);
1789 return ERR_PTR(-ENOMEM);
1792 iwpbl = &iwmr->iwpbl;
1794 iwmr->region = region;
1796 iwmr->ibmr.device = pd->device;
1798 iwmr->page_size = PAGE_SIZE;
1799 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1800 iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
1803 region_length = region->length + (start & (iwmr->page_size - 1));
1804 pg_shift = ffs(iwmr->page_size) - 1;
1805 pbl_depth = region_length >> pg_shift;
1806 pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
1807 iwmr->length = region->length;
1809 iwpbl->user_base = virt;
1810 palloc = &iwpbl->pble_alloc;
1812 iwmr->type = req.reg_type;
1813 iwmr->page_cnt = (u32)pbl_depth;
1815 switch (req.reg_type) {
1816 case IW_MEMREG_TYPE_QP:
1817 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1818 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1821 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1822 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1823 iwpbl->on_list = true;
1824 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1826 case IW_MEMREG_TYPE_CQ:
1827 use_pbles = (req.cq_pages > 1);
1828 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1832 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1833 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1834 iwpbl->on_list = true;
1835 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1837 case IW_MEMREG_TYPE_MEM:
1838 use_pbles = (iwmr->page_cnt != 1);
1839 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1841 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1846 ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
1848 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1849 iwpbl->pbl_allocated = false;
1853 access |= i40iw_get_user_access(acc);
1854 stag = i40iw_create_stag(iwdev);
1861 iwmr->ibmr.rkey = stag;
1862 iwmr->ibmr.lkey = stag;
1864 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1866 i40iw_free_stag(iwdev, stag);
1875 iwmr->type = req.reg_type;
1876 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1877 i40iw_add_pdusecount(iwpd);
1881 if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
1882 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1883 ib_umem_release(region);
1885 return ERR_PTR(err);
1889 * i40iw_reg_phys_mr - register kernel physical memory
1891 * @addr: physical address of memory to register
1892 * @size: size of memory to register
1893 * @acc: Access rights
1894 * @iova_start: start of virtual address for physical buffers
1896 struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1902 struct i40iw_pd *iwpd = to_iwpd(pd);
1903 struct i40iw_device *iwdev = to_iwdev(pd->device);
1904 struct i40iw_pbl *iwpbl;
1905 struct i40iw_mr *iwmr;
1906 enum i40iw_status_code status;
1908 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1911 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1913 return ERR_PTR(-ENOMEM);
1915 iwmr->ibmr.device = pd->device;
1916 iwpbl = &iwmr->iwpbl;
1918 iwmr->type = IW_MEMREG_TYPE_MEM;
1919 iwpbl->user_base = *iova_start;
1920 stag = i40iw_create_stag(iwdev);
1925 access |= i40iw_get_user_access(acc);
1927 iwmr->ibmr.rkey = stag;
1928 iwmr->ibmr.lkey = stag;
1930 iwmr->pgaddrmem[0] = addr;
1931 iwmr->length = size;
1932 status = i40iw_hwreg_mr(iwdev, iwmr, access);
1934 i40iw_free_stag(iwdev, stag);
1939 i40iw_add_pdusecount(iwpd);
1943 return ERR_PTR(ret);
1947 * i40iw_get_dma_mr - register physical mem
1949 * @acc: access for memory
1951 static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
1955 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
1959 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
1960 * @iwmr: iwmr for IB's user page addresses
1961 * @ucontext: ptr to user context
1963 static void i40iw_del_memlist(struct i40iw_mr *iwmr,
1964 struct i40iw_ucontext *ucontext)
1966 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1967 unsigned long flags;
1969 switch (iwmr->type) {
1970 case IW_MEMREG_TYPE_CQ:
1971 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1972 if (iwpbl->on_list) {
1973 iwpbl->on_list = false;
1974 list_del(&iwpbl->list);
1976 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1978 case IW_MEMREG_TYPE_QP:
1979 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1980 if (iwpbl->on_list) {
1981 iwpbl->on_list = false;
1982 list_del(&iwpbl->list);
1984 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1992 * i40iw_dereg_mr - deregister mr
1993 * @ib_mr: mr ptr for dereg
1995 static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1997 struct ib_pd *ibpd = ib_mr->pd;
1998 struct i40iw_pd *iwpd = to_iwpd(ibpd);
1999 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2000 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2001 enum i40iw_status_code status;
2002 struct i40iw_dealloc_stag_info *info;
2003 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2004 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2005 struct i40iw_cqp_request *cqp_request;
2006 struct cqp_commands_info *cqp_info;
2009 ib_umem_release(iwmr->region);
2011 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2012 /* region is released. only test for userness. */
2014 struct i40iw_ucontext *ucontext =
2015 rdma_udata_to_drv_context(
2017 struct i40iw_ucontext,
2020 i40iw_del_memlist(iwmr, ucontext);
2022 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
2023 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2028 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2032 cqp_info = &cqp_request->info;
2033 info = &cqp_info->in.u.dealloc_stag.info;
2034 memset(info, 0, sizeof(*info));
2036 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2037 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2038 stag_idx = info->stag_idx;
2040 if (iwpbl->pbl_allocated)
2041 info->dealloc_pbl = true;
2043 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2044 cqp_info->post_sq = 1;
2045 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2046 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2047 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2049 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2050 i40iw_rem_pdusecount(iwpd, iwdev);
2051 i40iw_free_stag(iwdev, iwmr->stag);
2052 if (iwpbl->pbl_allocated)
2053 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2061 static ssize_t hw_rev_show(struct device *dev,
2062 struct device_attribute *attr, char *buf)
2064 struct i40iw_ib_device *iwibdev =
2065 rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
2066 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2068 return sprintf(buf, "%x\n", hw_rev);
2070 static DEVICE_ATTR_RO(hw_rev);
2075 static ssize_t hca_type_show(struct device *dev,
2076 struct device_attribute *attr, char *buf)
2078 return sprintf(buf, "I40IW\n");
2080 static DEVICE_ATTR_RO(hca_type);
2085 static ssize_t board_id_show(struct device *dev,
2086 struct device_attribute *attr, char *buf)
2088 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2090 static DEVICE_ATTR_RO(board_id);
2092 static struct attribute *i40iw_dev_attributes[] = {
2093 &dev_attr_hw_rev.attr,
2094 &dev_attr_hca_type.attr,
2095 &dev_attr_board_id.attr,
2099 static const struct attribute_group i40iw_attr_group = {
2100 .attrs = i40iw_dev_attributes,
2104 * i40iw_copy_sg_list - copy sg list for qp
2105 * @sg_list: copied into sg_list
2106 * @sgl: copy from sgl
2107 * @num_sges: count of sg entries
2109 static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2113 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2114 sg_list[i].tag_off = sgl[i].addr;
2115 sg_list[i].len = sgl[i].length;
2116 sg_list[i].stag = sgl[i].lkey;
2121 * i40iw_post_send - kernel application wr
2122 * @ibqp: qp ptr for wr
2123 * @ib_wr: work request ptr
2124 * @bad_wr: return of bad wr if err
2126 static int i40iw_post_send(struct ib_qp *ibqp,
2127 const struct ib_send_wr *ib_wr,
2128 const struct ib_send_wr **bad_wr)
2130 struct i40iw_qp *iwqp;
2131 struct i40iw_qp_uk *ukqp;
2132 struct i40iw_post_sq_info info;
2133 enum i40iw_status_code ret;
2135 unsigned long flags;
2138 iwqp = (struct i40iw_qp *)ibqp;
2139 ukqp = &iwqp->sc_qp.qp_uk;
2141 spin_lock_irqsave(&iwqp->lock, flags);
2143 if (iwqp->flush_issued) {
2150 memset(&info, 0, sizeof(info));
2151 info.wr_id = (u64)(ib_wr->wr_id);
2152 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2153 info.signaled = true;
2154 if (ib_wr->send_flags & IB_SEND_FENCE)
2155 info.read_fence = true;
2157 switch (ib_wr->opcode) {
2160 case IB_WR_SEND_WITH_INV:
2161 if (ib_wr->opcode == IB_WR_SEND) {
2162 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2163 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2165 info.op_type = I40IW_OP_TYPE_SEND;
2167 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2168 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2170 info.op_type = I40IW_OP_TYPE_SEND_INV;
2173 if (ib_wr->send_flags & IB_SEND_INLINE) {
2174 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2175 info.op.inline_send.len = ib_wr->sg_list[0].length;
2176 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2178 info.op.send.num_sges = ib_wr->num_sge;
2179 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
2180 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2184 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2190 case IB_WR_RDMA_WRITE:
2191 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2193 if (ib_wr->send_flags & IB_SEND_INLINE) {
2194 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2195 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2196 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2197 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2198 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2200 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2201 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2202 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2203 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2204 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2208 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2214 case IB_WR_RDMA_READ_WITH_INV:
2217 case IB_WR_RDMA_READ:
2218 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2222 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2223 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2224 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2225 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2226 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2227 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
2228 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
2230 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2236 case IB_WR_LOCAL_INV:
2237 info.op_type = I40IW_OP_TYPE_INV_STAG;
2238 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2239 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2245 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2246 int flags = reg_wr(ib_wr)->access;
2247 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2248 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2249 struct i40iw_fast_reg_stag_info info;
2251 memset(&info, 0, sizeof(info));
2252 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2253 info.access_rights |= i40iw_get_user_access(flags);
2254 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2255 info.stag_idx = reg_wr(ib_wr)->key >> 8;
2256 info.page_size = reg_wr(ib_wr)->mr->page_size;
2257 info.wr_id = ib_wr->wr_id;
2259 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2260 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2261 info.total_len = iwmr->ibmr.length;
2262 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2263 info.first_pm_pbl_index = palloc->level1.idx;
2264 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2265 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2267 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2268 info.chunk_size = 1;
2270 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2277 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2284 ib_wr = ib_wr->next;
2291 ukqp->ops.iw_qp_post_wr(ukqp);
2292 spin_unlock_irqrestore(&iwqp->lock, flags);
2298 * i40iw_post_recv - post receive wr for kernel application
2299 * @ibqp: ib qp pointer
2300 * @ib_wr: work request for receive
2301 * @bad_wr: bad wr caused an error
2303 static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
2304 const struct ib_recv_wr **bad_wr)
2306 struct i40iw_qp *iwqp;
2307 struct i40iw_qp_uk *ukqp;
2308 struct i40iw_post_rq_info post_recv;
2309 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2310 enum i40iw_status_code ret = 0;
2311 unsigned long flags;
2314 iwqp = (struct i40iw_qp *)ibqp;
2315 ukqp = &iwqp->sc_qp.qp_uk;
2317 memset(&post_recv, 0, sizeof(post_recv));
2318 spin_lock_irqsave(&iwqp->lock, flags);
2320 if (iwqp->flush_issued) {
2326 post_recv.num_sges = ib_wr->num_sge;
2327 post_recv.wr_id = ib_wr->wr_id;
2328 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2329 post_recv.sg_list = sg_list;
2330 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2332 i40iw_pr_err(" post_recv err %d\n", ret);
2333 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2340 ib_wr = ib_wr->next;
2343 spin_unlock_irqrestore(&iwqp->lock, flags);
2348 * i40iw_poll_cq - poll cq for completion (kernel apps)
2350 * @num_entries: number of entries to poll
2351 * @entry: wr of entry completed
2353 static int i40iw_poll_cq(struct ib_cq *ibcq,
2355 struct ib_wc *entry)
2357 struct i40iw_cq *iwcq;
2359 struct i40iw_cq_poll_info cq_poll_info;
2360 enum i40iw_status_code ret;
2361 struct i40iw_cq_uk *ukcq;
2362 struct i40iw_sc_qp *qp;
2363 struct i40iw_qp *iwqp;
2364 unsigned long flags;
2366 iwcq = (struct i40iw_cq *)ibcq;
2367 ukcq = &iwcq->sc_cq.cq_uk;
2369 spin_lock_irqsave(&iwcq->lock, flags);
2370 while (cqe_count < num_entries) {
2371 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
2372 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2374 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2381 entry->wc_flags = 0;
2382 entry->wr_id = cq_poll_info.wr_id;
2383 if (cq_poll_info.error) {
2384 entry->status = IB_WC_WR_FLUSH_ERR;
2385 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2387 entry->status = IB_WC_SUCCESS;
2390 switch (cq_poll_info.op_type) {
2391 case I40IW_OP_TYPE_RDMA_WRITE:
2392 entry->opcode = IB_WC_RDMA_WRITE;
2394 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2395 case I40IW_OP_TYPE_RDMA_READ:
2396 entry->opcode = IB_WC_RDMA_READ;
2398 case I40IW_OP_TYPE_SEND_SOL:
2399 case I40IW_OP_TYPE_SEND_SOL_INV:
2400 case I40IW_OP_TYPE_SEND_INV:
2401 case I40IW_OP_TYPE_SEND:
2402 entry->opcode = IB_WC_SEND;
2404 case I40IW_OP_TYPE_REC:
2405 entry->opcode = IB_WC_RECV;
2408 entry->opcode = IB_WC_RECV;
2412 entry->ex.imm_data = 0;
2413 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2414 entry->qp = (struct ib_qp *)qp->back_qp;
2415 entry->src_qp = cq_poll_info.qp_id;
2416 iwqp = (struct i40iw_qp *)qp->back_qp;
2417 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2418 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2419 complete(&iwqp->sq_drained);
2420 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2421 complete(&iwqp->rq_drained);
2423 entry->byte_len = cq_poll_info.bytes_xfered;
2427 spin_unlock_irqrestore(&iwcq->lock, flags);
2432 * i40iw_req_notify_cq - arm cq kernel application
2434 * @notify_flags: notofication flags
2436 static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2437 enum ib_cq_notify_flags notify_flags)
2439 struct i40iw_cq *iwcq;
2440 struct i40iw_cq_uk *ukcq;
2441 unsigned long flags;
2442 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2444 iwcq = (struct i40iw_cq *)ibcq;
2445 ukcq = &iwcq->sc_cq.cq_uk;
2446 if (notify_flags == IB_CQ_SOLICITED)
2447 cq_notify = IW_CQ_COMPL_SOLICITED;
2448 spin_lock_irqsave(&iwcq->lock, flags);
2449 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2450 spin_unlock_irqrestore(&iwcq->lock, flags);
2455 * i40iw_port_immutable - return port's immutable data
2456 * @ibdev: ib dev struct
2457 * @port_num: port number
2458 * @immutable: immutable data for the port return
2460 static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2461 struct ib_port_immutable *immutable)
2463 struct ib_port_attr attr;
2466 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2468 err = ib_query_port(ibdev, port_num, &attr);
2473 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2474 immutable->gid_tbl_len = attr.gid_tbl_len;
2479 static const char * const i40iw_hw_stat_names[] = {
2481 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2482 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2483 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2484 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2485 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2486 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2487 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2488 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2489 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2491 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2493 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2495 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2497 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2499 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2501 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2503 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2505 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2507 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2509 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2511 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2513 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2515 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2517 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2519 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2521 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2523 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2525 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2527 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2529 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2531 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2533 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2535 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2537 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2539 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2541 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2545 static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
2547 u32 firmware_version = I40IW_FW_VERSION;
2549 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
2550 (firmware_version & 0x000000ff));
2554 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2555 * @ibdev: device pointer from stack
2556 * @port_num: port number
2558 static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2561 struct i40iw_device *iwdev = to_iwdev(ibdev);
2562 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2563 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2564 I40IW_HW_STAT_INDEX_MAX_64;
2565 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2567 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2568 (I40IW_HW_STAT_INDEX_MAX_32 +
2569 I40IW_HW_STAT_INDEX_MAX_64));
2572 * PFs get the default update lifespan, but VFs only update once
2577 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2582 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2583 * @ibdev: device pointer from stack
2584 * @stats: stats pointer from stack
2585 * @port_num: port number
2586 * @index: which hw counter the stack is requesting we update
2588 static int i40iw_get_hw_stats(struct ib_device *ibdev,
2589 struct rdma_hw_stats *stats,
2590 u8 port_num, int index)
2592 struct i40iw_device *iwdev = to_iwdev(ibdev);
2593 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2594 struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
2595 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2598 i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
2600 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2604 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
2606 return stats->num_counters;
2610 * i40iw_query_gid - Query port GID
2611 * @ibdev: device pointer from stack
2612 * @port: port number
2613 * @index: Entry index
2616 static int i40iw_query_gid(struct ib_device *ibdev,
2621 struct i40iw_device *iwdev = to_iwdev(ibdev);
2623 memset(gid->raw, 0, sizeof(gid->raw));
2624 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2629 * i40iw_query_pkey - Query partition key
2630 * @ibdev: device pointer from stack
2631 * @port: port number
2632 * @index: index of pkey
2633 * @pkey: pointer to store the pkey
2635 static int i40iw_query_pkey(struct ib_device *ibdev,
2644 static const struct ib_device_ops i40iw_dev_ops = {
2645 .owner = THIS_MODULE,
2646 .driver_id = RDMA_DRIVER_I40IW,
2647 /* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */
2648 .uverbs_abi_ver = I40IW_ABI_VER,
2650 .alloc_hw_stats = i40iw_alloc_hw_stats,
2651 .alloc_mr = i40iw_alloc_mr,
2652 .alloc_pd = i40iw_alloc_pd,
2653 .alloc_ucontext = i40iw_alloc_ucontext,
2654 .create_cq = i40iw_create_cq,
2655 .create_qp = i40iw_create_qp,
2656 .dealloc_pd = i40iw_dealloc_pd,
2657 .dealloc_ucontext = i40iw_dealloc_ucontext,
2658 .dereg_mr = i40iw_dereg_mr,
2659 .destroy_cq = i40iw_destroy_cq,
2660 .destroy_qp = i40iw_destroy_qp,
2661 .drain_rq = i40iw_drain_rq,
2662 .drain_sq = i40iw_drain_sq,
2663 .get_dev_fw_str = i40iw_get_dev_fw_str,
2664 .get_dma_mr = i40iw_get_dma_mr,
2665 .get_hw_stats = i40iw_get_hw_stats,
2666 .get_port_immutable = i40iw_port_immutable,
2667 .iw_accept = i40iw_accept,
2668 .iw_add_ref = i40iw_add_ref,
2669 .iw_connect = i40iw_connect,
2670 .iw_create_listen = i40iw_create_listen,
2671 .iw_destroy_listen = i40iw_destroy_listen,
2672 .iw_get_qp = i40iw_get_qp,
2673 .iw_reject = i40iw_reject,
2674 .iw_rem_ref = i40iw_rem_ref,
2675 .map_mr_sg = i40iw_map_mr_sg,
2677 .modify_qp = i40iw_modify_qp,
2678 .poll_cq = i40iw_poll_cq,
2679 .post_recv = i40iw_post_recv,
2680 .post_send = i40iw_post_send,
2681 .query_device = i40iw_query_device,
2682 .query_gid = i40iw_query_gid,
2683 .query_pkey = i40iw_query_pkey,
2684 .query_port = i40iw_query_port,
2685 .query_qp = i40iw_query_qp,
2686 .reg_user_mr = i40iw_reg_user_mr,
2687 .req_notify_cq = i40iw_req_notify_cq,
2688 INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
2689 INIT_RDMA_OBJ_SIZE(ib_cq, i40iw_cq, ibcq),
2690 INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
2694 * i40iw_init_rdma_device - initialization of iwarp device
2695 * @iwdev: iwarp device
2697 static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2699 struct i40iw_ib_device *iwibdev;
2700 struct net_device *netdev = iwdev->netdev;
2701 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2703 iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
2705 i40iw_pr_err("iwdev == NULL\n");
2708 iwdev->iwibdev = iwibdev;
2709 iwibdev->iwdev = iwdev;
2711 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2712 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2714 iwibdev->ibdev.uverbs_cmd_mask =
2715 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2716 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2717 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2718 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2719 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2720 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2721 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2722 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2723 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2724 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2725 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2726 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2727 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2728 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2729 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2730 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2731 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2732 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2733 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2734 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2735 iwibdev->ibdev.phys_port_cnt = 1;
2736 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
2737 iwibdev->ibdev.dev.parent = &pcidev->dev;
2738 memcpy(iwibdev->ibdev.iw_ifname, netdev->name,
2739 sizeof(iwibdev->ibdev.iw_ifname));
2740 ib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);
2746 * i40iw_port_ibevent - indicate port event
2747 * @iwdev: iwarp device
2749 void i40iw_port_ibevent(struct i40iw_device *iwdev)
2751 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2752 struct ib_event event;
2754 event.device = &iwibdev->ibdev;
2755 event.element.port_num = 1;
2756 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2757 ib_dispatch_event(&event);
2761 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2762 * @iwibdev: IB device ptr
2764 void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2766 ib_unregister_device(&iwibdev->ibdev);
2767 wait_event_timeout(iwibdev->iwdev->close_wq,
2768 !atomic64_read(&iwibdev->iwdev->use_count),
2769 I40IW_EVENT_TIMEOUT);
2770 ib_dealloc_device(&iwibdev->ibdev);
2774 * i40iw_register_rdma_device - register iwarp device to IB
2775 * @iwdev: iwarp device
2777 int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2780 struct i40iw_ib_device *iwibdev;
2782 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2783 if (!iwdev->iwibdev)
2785 iwibdev = iwdev->iwibdev;
2786 rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
2787 ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
2793 ib_dealloc_device(&iwdev->iwibdev->ibdev);