1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
6 * irdma_query_device - get device attributes
7 * @ibdev: device pointer from stack
8 * @props: returning device attributes
11 static int irdma_query_device(struct ib_device *ibdev,
12 struct ib_device_attr *props,
13 struct ib_udata *udata)
15 struct irdma_device *iwdev = to_iwdev(ibdev);
16 struct irdma_pci_f *rf = iwdev->rf;
17 struct pci_dev *pcidev = iwdev->rf->pcidev;
18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
20 if (udata->inlen || udata->outlen)
23 memset(props, 0, sizeof(*props));
24 addrconf_addr_eui48((u8 *)&props->sys_image_guid,
25 iwdev->netdev->dev_addr);
26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
27 irdma_fw_minor_ver(&rf->sc_dev);
28 props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
29 IB_DEVICE_MEM_MGT_EXTENSIONS;
30 props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
31 props->vendor_id = pcidev->vendor;
32 props->vendor_part_id = pcidev->device;
34 props->hw_ver = rf->pcidev->revision;
35 props->page_size_cap = hw_attrs->page_size_cap;
36 props->max_mr_size = hw_attrs->max_mr_size;
37 props->max_qp = rf->max_qp - rf->used_qps;
38 props->max_qp_wr = hw_attrs->max_qp_wr;
39 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
40 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
41 props->max_cq = rf->max_cq - rf->used_cqs;
42 props->max_cqe = rf->max_cqe;
43 props->max_mr = rf->max_mr - rf->used_mrs;
44 props->max_mw = props->max_mr;
45 props->max_pd = rf->max_pd - rf->used_pds;
46 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
47 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
48 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
49 if (rdma_protocol_roce(ibdev, 1))
50 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
51 props->max_ah = rf->max_ah;
52 props->max_mcast_grp = rf->max_mcg;
53 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
54 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
55 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
56 #define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
57 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
58 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
64 * irdma_get_eth_speed_and_width - Get IB port speed and width from netdev speed
65 * @link_speed: netdev phy link speed
66 * @active_speed: IB port speed
67 * @active_width: IB port width
69 static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
72 if (link_speed <= SPEED_1000) {
73 *active_width = IB_WIDTH_1X;
74 *active_speed = IB_SPEED_SDR;
75 } else if (link_speed <= SPEED_10000) {
76 *active_width = IB_WIDTH_1X;
77 *active_speed = IB_SPEED_FDR10;
78 } else if (link_speed <= SPEED_20000) {
79 *active_width = IB_WIDTH_4X;
80 *active_speed = IB_SPEED_DDR;
81 } else if (link_speed <= SPEED_25000) {
82 *active_width = IB_WIDTH_1X;
83 *active_speed = IB_SPEED_EDR;
84 } else if (link_speed <= SPEED_40000) {
85 *active_width = IB_WIDTH_4X;
86 *active_speed = IB_SPEED_FDR10;
88 *active_width = IB_WIDTH_4X;
89 *active_speed = IB_SPEED_EDR;
94 * irdma_query_port - get port attributes
95 * @ibdev: device pointer from stack
96 * @port: port number for query
97 * @props: returning device attributes
99 static int irdma_query_port(struct ib_device *ibdev, u32 port,
100 struct ib_port_attr *props)
102 struct irdma_device *iwdev = to_iwdev(ibdev);
103 struct net_device *netdev = iwdev->netdev;
105 /* no need to zero out pros here. done by caller */
107 props->max_mtu = IB_MTU_4096;
108 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
113 if (netif_carrier_ok(netdev) && netif_running(netdev)) {
114 props->state = IB_PORT_ACTIVE;
115 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
117 props->state = IB_PORT_DOWN;
118 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
120 irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
121 &props->active_width);
123 if (rdma_protocol_roce(ibdev, 1)) {
124 props->gid_tbl_len = 32;
125 props->ip_gids = true;
126 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
128 props->gid_tbl_len = 1;
130 props->qkey_viol_cntr = 0;
131 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
132 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
138 * irdma_disassociate_ucontext - Disassociate user context
139 * @context: ib user context
141 static void irdma_disassociate_ucontext(struct ib_ucontext *context)
145 static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
146 struct vm_area_struct *vma)
150 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
153 vma->vm_private_data = ucontext;
154 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
155 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
157 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
158 pgprot_noncached(vma->vm_page_prot), NULL);
161 static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
163 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
168 static struct rdma_user_mmap_entry*
169 irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
170 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
172 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
178 entry->bar_offset = bar_offset;
179 entry->mmap_flag = mmap_flag;
181 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
182 &entry->rdma_entry, PAGE_SIZE);
187 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
189 return &entry->rdma_entry;
193 * irdma_mmap - user memory map
194 * @context: context created during alloc
195 * @vma: kernel info for user memory map
197 static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
199 struct rdma_user_mmap_entry *rdma_entry;
200 struct irdma_user_mmap_entry *entry;
201 struct irdma_ucontext *ucontext;
205 ucontext = to_ucontext(context);
207 /* Legacy support for libi40iw with hard-coded mmap key */
208 if (ucontext->legacy_mode)
209 return irdma_mmap_legacy(ucontext, vma);
211 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
213 ibdev_dbg(&ucontext->iwdev->ibdev,
214 "VERBS: pgoff[0x%lx] does not have valid entry\n",
219 entry = to_irdma_mmap_entry(rdma_entry);
220 ibdev_dbg(&ucontext->iwdev->ibdev,
221 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
222 entry->bar_offset, entry->mmap_flag);
224 pfn = (entry->bar_offset +
225 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
227 switch (entry->mmap_flag) {
228 case IRDMA_MMAP_IO_NC:
229 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
230 pgprot_noncached(vma->vm_page_prot),
233 case IRDMA_MMAP_IO_WC:
234 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
235 pgprot_writecombine(vma->vm_page_prot),
243 ibdev_dbg(&ucontext->iwdev->ibdev,
244 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
245 entry->bar_offset, entry->mmap_flag, ret);
246 rdma_user_mmap_entry_put(rdma_entry);
252 * irdma_alloc_push_page - allocate a push page for qp
255 static void irdma_alloc_push_page(struct irdma_qp *iwqp)
257 struct irdma_cqp_request *cqp_request;
258 struct cqp_cmds_info *cqp_info;
259 struct irdma_device *iwdev = iwqp->iwdev;
260 struct irdma_sc_qp *qp = &iwqp->sc_qp;
263 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
267 cqp_info = &cqp_request->info;
268 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
269 cqp_info->post_sq = 1;
270 cqp_info->in.u.manage_push_page.info.push_idx = 0;
271 cqp_info->in.u.manage_push_page.info.qs_handle =
272 qp->vsi->qos[qp->user_pri].qs_handle;
273 cqp_info->in.u.manage_push_page.info.free_page = 0;
274 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
275 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
276 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
278 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
279 if (!status && cqp_request->compl_info.op_ret_val <
280 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
281 qp->push_idx = cqp_request->compl_info.op_ret_val;
285 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
289 * irdma_alloc_ucontext - Allocate the user context data structure
290 * @uctx: uverbs context pointer
293 * This keeps track of all objects associated with a particular
296 static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
297 struct ib_udata *udata)
299 struct ib_device *ibdev = uctx->device;
300 struct irdma_device *iwdev = to_iwdev(ibdev);
301 struct irdma_alloc_ucontext_req req;
302 struct irdma_alloc_ucontext_resp uresp = {};
303 struct irdma_ucontext *ucontext = to_ucontext(uctx);
304 struct irdma_uk_attrs *uk_attrs;
306 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
309 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
312 ucontext->iwdev = iwdev;
313 ucontext->abi_ver = req.userspace_ver;
315 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
316 /* GEN_1 legacy support with libi40iw */
317 if (udata->outlen < sizeof(uresp)) {
318 if (uk_attrs->hw_rev != IRDMA_GEN_1)
321 ucontext->legacy_mode = true;
322 uresp.max_qps = iwdev->rf->max_qp;
323 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
324 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
325 uresp.kernel_ver = req.userspace_ver;
326 if (ib_copy_to_udata(udata, &uresp,
327 min(sizeof(uresp), udata->outlen)))
330 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
332 ucontext->db_mmap_entry =
333 irdma_user_mmap_entry_insert(ucontext, bar_off,
336 if (!ucontext->db_mmap_entry)
339 uresp.kernel_ver = IRDMA_ABI_VER;
340 uresp.feature_flags = uk_attrs->feature_flags;
341 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
342 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
343 uresp.max_hw_inline = uk_attrs->max_hw_inline;
344 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
345 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
346 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
347 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
348 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
349 uresp.hw_rev = uk_attrs->hw_rev;
350 if (ib_copy_to_udata(udata, &uresp,
351 min(sizeof(uresp), udata->outlen))) {
352 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
357 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
358 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
359 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
360 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
365 ibdev_err(&iwdev->ibdev,
366 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
367 req.userspace_ver, IRDMA_ABI_VER);
372 * irdma_dealloc_ucontext - deallocate the user context data structure
373 * @context: user context created during alloc
375 static void irdma_dealloc_ucontext(struct ib_ucontext *context)
377 struct irdma_ucontext *ucontext = to_ucontext(context);
379 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
383 * irdma_alloc_pd - allocate protection domain
387 static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
389 struct irdma_pd *iwpd = to_iwpd(pd);
390 struct irdma_device *iwdev = to_iwdev(pd->device);
391 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
392 struct irdma_pci_f *rf = iwdev->rf;
393 struct irdma_alloc_pd_resp uresp = {};
394 struct irdma_sc_pd *sc_pd;
398 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
403 sc_pd = &iwpd->sc_pd;
405 struct irdma_ucontext *ucontext =
406 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
408 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
410 if (ib_copy_to_udata(udata, &uresp,
411 min(sizeof(uresp), udata->outlen))) {
416 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
421 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
427 * irdma_dealloc_pd - deallocate pd
428 * @ibpd: ptr of pd to be deallocated
431 static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
433 struct irdma_pd *iwpd = to_iwpd(ibpd);
434 struct irdma_device *iwdev = to_iwdev(ibpd->device);
436 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
442 * irdma_get_pbl - Retrieve pbl from a list given a virtual
444 * @va: user virtual address
445 * @pbl_list: pbl list to search in (QP's or CQ's)
447 static struct irdma_pbl *irdma_get_pbl(unsigned long va,
448 struct list_head *pbl_list)
450 struct irdma_pbl *iwpbl;
452 list_for_each_entry (iwpbl, pbl_list, list) {
453 if (iwpbl->user_base == va) {
454 list_del(&iwpbl->list);
455 iwpbl->on_list = false;
464 * irdma_clean_cqes - clean cq entries for qp
465 * @iwqp: qp ptr (user or kernel)
468 static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
470 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
473 spin_lock_irqsave(&iwcq->lock, flags);
474 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
475 spin_unlock_irqrestore(&iwcq->lock, flags);
478 static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
480 if (iwqp->push_db_mmap_entry) {
481 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
482 iwqp->push_db_mmap_entry = NULL;
484 if (iwqp->push_wqe_mmap_entry) {
485 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
486 iwqp->push_wqe_mmap_entry = NULL;
490 static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
491 struct irdma_qp *iwqp,
492 u64 *push_wqe_mmap_key,
493 u64 *push_db_mmap_key)
495 struct irdma_device *iwdev = ucontext->iwdev;
498 rsvd = IRDMA_PF_BAR_RSVD;
499 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
500 /* skip over db page */
501 bar_off += IRDMA_HW_PAGE_SIZE;
503 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
504 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
505 bar_off, IRDMA_MMAP_IO_WC,
507 if (!iwqp->push_wqe_mmap_entry)
510 /* push doorbell page */
511 bar_off += IRDMA_HW_PAGE_SIZE;
512 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
513 bar_off, IRDMA_MMAP_IO_NC,
515 if (!iwqp->push_db_mmap_entry) {
516 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
524 * irdma_destroy_qp - destroy qp
525 * @ibqp: qp's ib pointer also to get to device's qp address
528 static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
530 struct irdma_qp *iwqp = to_iwqp(ibqp);
531 struct irdma_device *iwdev = iwqp->iwdev;
533 iwqp->sc_qp.qp_uk.destroy_pending = true;
535 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
536 irdma_modify_qp_to_err(&iwqp->sc_qp);
538 if (!iwqp->user_mode)
539 cancel_delayed_work_sync(&iwqp->dwork_flush);
541 irdma_qp_rem_ref(&iwqp->ibqp);
542 wait_for_completion(&iwqp->free_qp);
543 irdma_free_lsmm_rsrc(iwqp);
544 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
546 if (!iwqp->user_mode) {
548 irdma_clean_cqes(iwqp, iwqp->iwscq);
549 if (iwqp->iwrcq != iwqp->iwscq)
550 irdma_clean_cqes(iwqp, iwqp->iwrcq);
553 irdma_remove_push_mmap_entries(iwqp);
554 irdma_free_qp_rsrc(iwqp);
560 * irdma_setup_virt_qp - setup for allocation of virtual qp
561 * @iwdev: irdma device
563 * @init_info: initialize info to return
565 static void irdma_setup_virt_qp(struct irdma_device *iwdev,
566 struct irdma_qp *iwqp,
567 struct irdma_qp_init_info *init_info)
569 struct irdma_pbl *iwpbl = iwqp->iwpbl;
570 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
572 iwqp->page = qpmr->sq_page;
573 init_info->shadow_area_pa = qpmr->shadow;
574 if (iwpbl->pbl_allocated) {
575 init_info->virtual_map = true;
576 init_info->sq_pa = qpmr->sq_pbl.idx;
577 init_info->rq_pa = qpmr->rq_pbl.idx;
579 init_info->sq_pa = qpmr->sq_pbl.addr;
580 init_info->rq_pa = qpmr->rq_pbl.addr;
585 * irdma_setup_kmode_qp - setup initialization for kernel mode qp
586 * @iwdev: iwarp device
587 * @iwqp: qp ptr (user or kernel)
588 * @info: initialize info to return
589 * @init_attr: Initial QP create attributes
591 static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
592 struct irdma_qp *iwqp,
593 struct irdma_qp_init_info *info,
594 struct ib_qp_init_attr *init_attr)
596 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
597 u32 sqdepth, rqdepth;
601 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
602 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
604 irdma_get_wqe_shift(uk_attrs,
605 uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
606 ukinfo->max_sq_frag_cnt,
607 ukinfo->max_inline_data, &sqshift);
608 status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
613 if (uk_attrs->hw_rev == IRDMA_GEN_1)
614 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
616 irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
619 status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
624 iwqp->kqp.sq_wrid_mem =
625 kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
626 if (!iwqp->kqp.sq_wrid_mem)
629 iwqp->kqp.rq_wrid_mem =
630 kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
631 if (!iwqp->kqp.rq_wrid_mem) {
632 kfree(iwqp->kqp.sq_wrid_mem);
633 iwqp->kqp.sq_wrid_mem = NULL;
637 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
638 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
640 size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
641 size += (IRDMA_SHADOW_AREA_SIZE << 3);
643 mem->size = ALIGN(size, 256);
644 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
645 &mem->pa, GFP_KERNEL);
647 kfree(iwqp->kqp.sq_wrid_mem);
648 iwqp->kqp.sq_wrid_mem = NULL;
649 kfree(iwqp->kqp.rq_wrid_mem);
650 iwqp->kqp.rq_wrid_mem = NULL;
654 ukinfo->sq = mem->va;
655 info->sq_pa = mem->pa;
656 ukinfo->rq = &ukinfo->sq[sqdepth];
657 info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
658 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
659 info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
660 ukinfo->sq_size = sqdepth >> sqshift;
661 ukinfo->rq_size = rqdepth >> rqshift;
662 ukinfo->qp_id = iwqp->ibqp.qp_num;
664 init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
665 init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
670 static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
672 struct irdma_pci_f *rf = iwqp->iwdev->rf;
673 struct irdma_cqp_request *cqp_request;
674 struct cqp_cmds_info *cqp_info;
675 struct irdma_create_qp_info *qp_info;
678 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
682 cqp_info = &cqp_request->info;
683 qp_info = &cqp_request->info.in.u.qp_create.info;
684 memset(qp_info, 0, sizeof(*qp_info));
685 qp_info->mac_valid = true;
686 qp_info->cq_num_valid = true;
687 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
689 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
690 cqp_info->post_sq = 1;
691 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
692 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
693 status = irdma_handle_cqp_op(rf, cqp_request);
694 irdma_put_cqp_request(&rf->cqp, cqp_request);
699 static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
700 struct irdma_qp_host_ctx_info *ctx_info)
702 struct irdma_device *iwdev = iwqp->iwdev;
703 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
704 struct irdma_roce_offload_info *roce_info;
705 struct irdma_udp_offload_info *udp_info;
707 udp_info = &iwqp->udp_info;
708 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
709 udp_info->cwnd = iwdev->roce_cwnd;
710 udp_info->rexmit_thresh = 2;
711 udp_info->rnr_nak_thresh = 2;
712 udp_info->src_port = 0xc000;
713 udp_info->dst_port = ROCE_V2_UDP_DPORT;
714 roce_info = &iwqp->roce_info;
715 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
717 roce_info->rd_en = true;
718 roce_info->wr_rdresp_en = true;
719 roce_info->bind_en = true;
720 roce_info->dcqcn_en = false;
721 roce_info->rtomin = 5;
723 roce_info->ack_credits = iwdev->roce_ackcreds;
724 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
725 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
727 if (!iwqp->user_mode) {
728 roce_info->priv_mode_en = true;
729 roce_info->fast_reg_en = true;
730 roce_info->udprivcq_en = true;
732 roce_info->roce_tver = 0;
734 ctx_info->roce_info = &iwqp->roce_info;
735 ctx_info->udp_info = &iwqp->udp_info;
736 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
739 static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
740 struct irdma_qp_host_ctx_info *ctx_info)
742 struct irdma_device *iwdev = iwqp->iwdev;
743 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
744 struct irdma_iwarp_offload_info *iwarp_info;
746 iwarp_info = &iwqp->iwarp_info;
747 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
748 iwarp_info->rd_en = true;
749 iwarp_info->wr_rdresp_en = true;
750 iwarp_info->bind_en = true;
751 iwarp_info->ecn_en = true;
752 iwarp_info->rtomin = 5;
754 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
755 iwarp_info->ib_rd_en = true;
756 if (!iwqp->user_mode) {
757 iwarp_info->priv_mode_en = true;
758 iwarp_info->fast_reg_en = true;
760 iwarp_info->ddp_ver = 1;
761 iwarp_info->rdmap_ver = 1;
763 ctx_info->iwarp_info = &iwqp->iwarp_info;
764 ctx_info->iwarp_info_valid = true;
765 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
766 ctx_info->iwarp_info_valid = false;
769 static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
770 struct irdma_device *iwdev)
772 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
773 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
775 if (init_attr->create_flags)
778 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
779 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
780 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
783 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
784 if (init_attr->qp_type != IB_QPT_RC &&
785 init_attr->qp_type != IB_QPT_UD &&
786 init_attr->qp_type != IB_QPT_GSI)
789 if (init_attr->qp_type != IB_QPT_RC)
796 static void irdma_flush_worker(struct work_struct *work)
798 struct delayed_work *dwork = to_delayed_work(work);
799 struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
801 irdma_generate_flush_completions(iwqp);
805 * irdma_create_qp - create qp
807 * @init_attr: attributes for qp
808 * @udata: user data for create qp
810 static int irdma_create_qp(struct ib_qp *ibqp,
811 struct ib_qp_init_attr *init_attr,
812 struct ib_udata *udata)
814 struct ib_pd *ibpd = ibqp->pd;
815 struct irdma_pd *iwpd = to_iwpd(ibpd);
816 struct irdma_device *iwdev = to_iwdev(ibpd->device);
817 struct irdma_pci_f *rf = iwdev->rf;
818 struct irdma_qp *iwqp = to_iwqp(ibqp);
819 struct irdma_create_qp_req req;
820 struct irdma_create_qp_resp uresp = {};
825 struct irdma_sc_qp *qp;
826 struct irdma_sc_dev *dev = &rf->sc_dev;
827 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
828 struct irdma_qp_init_info init_info = {};
829 struct irdma_qp_host_ctx_info *ctx_info;
832 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
836 sq_size = init_attr->cap.max_send_wr;
837 rq_size = init_attr->cap.max_recv_wr;
839 init_info.vsi = &iwdev->vsi;
840 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
841 init_info.qp_uk_init_info.sq_size = sq_size;
842 init_info.qp_uk_init_info.rq_size = rq_size;
843 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
844 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
845 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
848 qp->qp_uk.back_qp = iwqp;
849 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
852 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
854 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
855 iwqp->q2_ctx_mem.size,
856 &iwqp->q2_ctx_mem.pa,
858 if (!iwqp->q2_ctx_mem.va)
861 init_info.q2 = iwqp->q2_ctx_mem.va;
862 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
863 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
864 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
866 if (init_attr->qp_type == IB_QPT_GSI)
869 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
870 &qp_num, &rf->next_qp);
875 iwqp->ibqp.qp_num = qp_num;
877 iwqp->iwscq = to_iwcq(init_attr->send_cq);
878 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
879 iwqp->host_ctx.va = init_info.host_ctx;
880 iwqp->host_ctx.pa = init_info.host_ctx_pa;
881 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
883 init_info.pd = &iwpd->sc_pd;
884 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
885 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
886 init_info.qp_uk_init_info.first_sq_wq = 1;
887 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
888 init_waitqueue_head(&iwqp->waitq);
889 init_waitqueue_head(&iwqp->mod_qp_waitq);
892 err_code = ib_copy_from_udata(&req, udata,
893 min(sizeof(req), udata->inlen));
895 ibdev_dbg(&iwdev->ibdev,
896 "VERBS: ib_copy_from_data fail\n");
900 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
902 if (req.user_wqe_bufs) {
903 struct irdma_ucontext *ucontext =
904 rdma_udata_to_drv_context(udata,
905 struct irdma_ucontext,
908 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
909 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
910 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
911 &ucontext->qp_reg_mem_list);
912 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
916 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
920 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
921 irdma_setup_virt_qp(iwdev, iwqp, &init_info);
923 INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
924 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
925 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
929 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
933 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
934 if (init_attr->qp_type == IB_QPT_RC) {
935 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
936 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
937 IRDMA_WRITE_WITH_IMM |
940 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
941 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
945 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
946 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
949 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
950 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
952 err_code = irdma_sc_qp_init(qp, &init_info);
954 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
958 ctx_info = &iwqp->ctx_info;
959 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
960 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
962 if (rdma_protocol_roce(&iwdev->ibdev, 1))
963 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
965 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
967 err_code = irdma_cqp_create_qp_cmd(iwqp);
971 refcount_set(&iwqp->refcnt, 1);
972 spin_lock_init(&iwqp->lock);
973 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
974 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
975 rf->qp_table[qp_num] = iwqp;
976 iwqp->max_send_wr = sq_size;
977 iwqp->max_recv_wr = rq_size;
979 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
980 if (dev->ws_add(&iwdev->vsi, 0)) {
981 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
986 irdma_qp_add_qos(&iwqp->sc_qp);
990 /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
991 if (udata->outlen < sizeof(uresp)) {
993 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
995 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
998 uresp.actual_sq_size = sq_size;
999 uresp.actual_rq_size = rq_size;
1000 uresp.qp_id = qp_num;
1001 uresp.qp_caps = qp->qp_uk.qp_caps;
1003 err_code = ib_copy_to_udata(udata, &uresp,
1004 min(sizeof(uresp), udata->outlen));
1006 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1007 irdma_destroy_qp(&iwqp->ibqp, udata);
1012 init_completion(&iwqp->free_qp);
1016 irdma_free_qp_rsrc(iwqp);
1020 static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1024 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1025 if (iwqp->roce_info.wr_rdresp_en) {
1026 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1027 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1029 if (iwqp->roce_info.rd_en)
1030 acc_flags |= IB_ACCESS_REMOTE_READ;
1031 if (iwqp->roce_info.bind_en)
1032 acc_flags |= IB_ACCESS_MW_BIND;
1034 if (iwqp->iwarp_info.wr_rdresp_en) {
1035 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1036 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1038 if (iwqp->iwarp_info.rd_en)
1039 acc_flags |= IB_ACCESS_REMOTE_READ;
1040 if (iwqp->iwarp_info.bind_en)
1041 acc_flags |= IB_ACCESS_MW_BIND;
1047 * irdma_query_qp - query qp attributes
1049 * @attr: attributes pointer
1050 * @attr_mask: Not used
1051 * @init_attr: qp attributes to return
1053 static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1054 int attr_mask, struct ib_qp_init_attr *init_attr)
1056 struct irdma_qp *iwqp = to_iwqp(ibqp);
1057 struct irdma_sc_qp *qp = &iwqp->sc_qp;
1059 memset(attr, 0, sizeof(*attr));
1060 memset(init_attr, 0, sizeof(*init_attr));
1062 attr->qp_state = iwqp->ibqp_state;
1063 attr->cur_qp_state = iwqp->ibqp_state;
1064 attr->cap.max_send_wr = iwqp->max_send_wr;
1065 attr->cap.max_recv_wr = iwqp->max_recv_wr;
1066 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1067 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1068 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1069 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1071 if (rdma_protocol_roce(ibqp->device, 1)) {
1072 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1073 attr->qkey = iwqp->roce_info.qkey;
1074 attr->rq_psn = iwqp->udp_info.epsn;
1075 attr->sq_psn = iwqp->udp_info.psn_nxt;
1076 attr->dest_qp_num = iwqp->roce_info.dest_qp;
1077 attr->pkey_index = iwqp->roce_info.p_key;
1078 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1079 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1080 attr->max_rd_atomic = iwqp->roce_info.ord_size;
1081 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1084 init_attr->event_handler = iwqp->ibqp.event_handler;
1085 init_attr->qp_context = iwqp->ibqp.qp_context;
1086 init_attr->send_cq = iwqp->ibqp.send_cq;
1087 init_attr->recv_cq = iwqp->ibqp.recv_cq;
1088 init_attr->cap = attr->cap;
1094 * irdma_query_pkey - Query partition key
1095 * @ibdev: device pointer from stack
1096 * @port: port number
1097 * @index: index of pkey
1098 * @pkey: pointer to store the pkey
1100 static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1103 if (index >= IRDMA_PKEY_TBL_SZ)
1106 *pkey = IRDMA_DEFAULT_PKEY;
1111 * irdma_modify_qp_roce - modify qp request
1112 * @ibqp: qp's pointer for modify
1113 * @attr: access attributes
1114 * @attr_mask: state mask
1117 int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1118 int attr_mask, struct ib_udata *udata)
1120 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1121 struct irdma_qp *iwqp = to_iwqp(ibqp);
1122 struct irdma_device *iwdev = iwqp->iwdev;
1123 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1124 struct irdma_qp_host_ctx_info *ctx_info;
1125 struct irdma_roce_offload_info *roce_info;
1126 struct irdma_udp_offload_info *udp_info;
1127 struct irdma_modify_qp_info info = {};
1128 struct irdma_modify_qp_resp uresp = {};
1129 struct irdma_modify_qp_req ureq = {};
1130 unsigned long flags;
1131 u8 issue_modify_qp = 0;
1134 ctx_info = &iwqp->ctx_info;
1135 roce_info = &iwqp->roce_info;
1136 udp_info = &iwqp->udp_info;
1138 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1141 if (attr_mask & IB_QP_DEST_QPN)
1142 roce_info->dest_qp = attr->dest_qp_num;
1144 if (attr_mask & IB_QP_PKEY_INDEX) {
1145 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1151 if (attr_mask & IB_QP_QKEY)
1152 roce_info->qkey = attr->qkey;
1154 if (attr_mask & IB_QP_PATH_MTU)
1155 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1157 if (attr_mask & IB_QP_SQ_PSN) {
1158 udp_info->psn_nxt = attr->sq_psn;
1159 udp_info->lsn = 0xffff;
1160 udp_info->psn_una = attr->sq_psn;
1161 udp_info->psn_max = attr->sq_psn;
1164 if (attr_mask & IB_QP_RQ_PSN)
1165 udp_info->epsn = attr->rq_psn;
1167 if (attr_mask & IB_QP_RNR_RETRY)
1168 udp_info->rnr_nak_thresh = attr->rnr_retry;
1170 if (attr_mask & IB_QP_RETRY_CNT)
1171 udp_info->rexmit_thresh = attr->retry_cnt;
1173 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1175 if (attr_mask & IB_QP_AV) {
1176 struct irdma_av *av = &iwqp->roce_ah.av;
1177 const struct ib_gid_attr *sgid_attr;
1178 u16 vlan_id = VLAN_N_VID;
1181 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1182 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1183 udp_info->ttl = attr->ah_attr.grh.hop_limit;
1184 udp_info->flow_label = attr->ah_attr.grh.flow_label;
1185 udp_info->tos = attr->ah_attr.grh.traffic_class;
1186 udp_info->src_port =
1187 rdma_get_udp_sport(udp_info->flow_label,
1189 roce_info->dest_qp);
1190 irdma_qp_rem_qos(&iwqp->sc_qp);
1191 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1192 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1193 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1194 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1196 irdma_qp_add_qos(&iwqp->sc_qp);
1198 sgid_attr = attr->ah_attr.grh.sgid_attr;
1199 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1200 ctx_info->roce_info->mac_addr);
1204 if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
1206 if (vlan_id < VLAN_N_VID) {
1207 udp_info->insert_vlan_tag = true;
1208 udp_info->vlan_tag = vlan_id |
1209 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1211 udp_info->insert_vlan_tag = false;
1214 av->attrs = attr->ah_attr;
1215 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1216 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1217 if (av->net_type == RDMA_NETWORK_IPV6) {
1219 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1221 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1223 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1224 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1226 udp_info->ipv4 = false;
1227 irdma_copy_ip_ntohl(local_ip, daddr);
1229 udp_info->arp_idx = irdma_arp_table(iwdev->rf,
1233 } else if (av->net_type == RDMA_NETWORK_IPV4) {
1234 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1235 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1237 local_ip[0] = ntohl(daddr);
1239 udp_info->ipv4 = true;
1240 udp_info->dest_ip_addr[0] = 0;
1241 udp_info->dest_ip_addr[1] = 0;
1242 udp_info->dest_ip_addr[2] = 0;
1243 udp_info->dest_ip_addr[3] = local_ip[0];
1245 udp_info->local_ipaddr[0] = 0;
1246 udp_info->local_ipaddr[1] = 0;
1247 udp_info->local_ipaddr[2] = 0;
1248 udp_info->local_ipaddr[3] = ntohl(saddr);
1251 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1252 attr->ah_attr.roce.dmac);
1255 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1256 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1257 ibdev_err(&iwdev->ibdev,
1258 "rd_atomic = %d, above max_hw_ord=%d\n",
1259 attr->max_rd_atomic,
1260 dev->hw_attrs.max_hw_ord);
1263 if (attr->max_rd_atomic)
1264 roce_info->ord_size = attr->max_rd_atomic;
1265 info.ord_valid = true;
1268 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1269 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1270 ibdev_err(&iwdev->ibdev,
1271 "rd_atomic = %d, above max_hw_ird=%d\n",
1272 attr->max_rd_atomic,
1273 dev->hw_attrs.max_hw_ird);
1276 if (attr->max_dest_rd_atomic)
1277 roce_info->ird_size = attr->max_dest_rd_atomic;
1280 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1281 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1282 roce_info->wr_rdresp_en = true;
1283 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1284 roce_info->wr_rdresp_en = true;
1285 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1286 roce_info->rd_en = true;
1289 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1291 ibdev_dbg(&iwdev->ibdev,
1292 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1293 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1294 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1296 spin_lock_irqsave(&iwqp->lock, flags);
1297 if (attr_mask & IB_QP_STATE) {
1298 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1299 iwqp->ibqp.qp_type, attr_mask)) {
1300 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1301 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1306 info.curr_iwarp_state = iwqp->iwarp_state;
1308 switch (attr->qp_state) {
1310 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1315 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1316 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1317 issue_modify_qp = 1;
1321 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1325 info.arp_cache_idx_valid = true;
1326 info.cq_num_valid = true;
1327 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1328 issue_modify_qp = 1;
1331 if (iwqp->ibqp_state < IB_QPS_RTR ||
1332 iwqp->ibqp_state == IB_QPS_ERR) {
1337 info.arp_cache_idx_valid = true;
1338 info.cq_num_valid = true;
1339 info.ord_valid = true;
1340 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1341 issue_modify_qp = 1;
1342 if (iwdev->push_mode && udata &&
1343 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1344 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1345 spin_unlock_irqrestore(&iwqp->lock, flags);
1346 irdma_alloc_push_page(iwqp);
1347 spin_lock_irqsave(&iwqp->lock, flags);
1351 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1354 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1359 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1360 issue_modify_qp = 1;
1365 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1366 spin_unlock_irqrestore(&iwqp->lock, flags);
1367 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1368 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1369 spin_lock_irqsave(&iwqp->lock, flags);
1372 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1373 spin_unlock_irqrestore(&iwqp->lock, flags);
1375 if (ib_copy_from_udata(&ureq, udata,
1376 min(sizeof(ureq), udata->inlen)))
1379 irdma_flush_wqes(iwqp,
1380 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1381 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1387 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1388 issue_modify_qp = 1;
1395 iwqp->ibqp_state = attr->qp_state;
1398 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1399 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1400 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1401 spin_unlock_irqrestore(&iwqp->lock, flags);
1403 if (attr_mask & IB_QP_STATE) {
1404 if (issue_modify_qp) {
1405 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1406 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1408 spin_lock_irqsave(&iwqp->lock, flags);
1409 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1410 iwqp->iwarp_state = info.next_iwarp_state;
1411 iwqp->ibqp_state = attr->qp_state;
1413 if (iwqp->ibqp_state > IB_QPS_RTS &&
1414 !iwqp->flush_issued) {
1415 spin_unlock_irqrestore(&iwqp->lock, flags);
1416 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1419 iwqp->flush_issued = 1;
1421 spin_unlock_irqrestore(&iwqp->lock, flags);
1424 iwqp->ibqp_state = attr->qp_state;
1426 if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1427 struct irdma_ucontext *ucontext;
1429 ucontext = rdma_udata_to_drv_context(udata,
1430 struct irdma_ucontext, ibucontext);
1431 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1432 !iwqp->push_wqe_mmap_entry &&
1433 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1434 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1435 uresp.push_valid = 1;
1436 uresp.push_offset = iwqp->sc_qp.push_offset;
1438 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1441 irdma_remove_push_mmap_entries(iwqp);
1442 ibdev_dbg(&iwdev->ibdev,
1443 "VERBS: copy_to_udata failed\n");
1451 spin_unlock_irqrestore(&iwqp->lock, flags);
1457 * irdma_modify_qp - modify qp request
1458 * @ibqp: qp's pointer for modify
1459 * @attr: access attributes
1460 * @attr_mask: state mask
1463 int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1464 struct ib_udata *udata)
1466 struct irdma_qp *iwqp = to_iwqp(ibqp);
1467 struct irdma_device *iwdev = iwqp->iwdev;
1468 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1469 struct irdma_qp_host_ctx_info *ctx_info;
1470 struct irdma_tcp_offload_info *tcp_info;
1471 struct irdma_iwarp_offload_info *offload_info;
1472 struct irdma_modify_qp_info info = {};
1473 struct irdma_modify_qp_resp uresp = {};
1474 struct irdma_modify_qp_req ureq = {};
1475 u8 issue_modify_qp = 0;
1478 unsigned long flags;
1480 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1483 ctx_info = &iwqp->ctx_info;
1484 offload_info = &iwqp->iwarp_info;
1485 tcp_info = &iwqp->tcp_info;
1486 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1487 ibdev_dbg(&iwdev->ibdev,
1488 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1489 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1490 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1491 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1493 spin_lock_irqsave(&iwqp->lock, flags);
1494 if (attr_mask & IB_QP_STATE) {
1495 info.curr_iwarp_state = iwqp->iwarp_state;
1496 switch (attr->qp_state) {
1499 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1504 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1505 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1506 issue_modify_qp = 1;
1508 if (iwdev->push_mode && udata &&
1509 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1510 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1511 spin_unlock_irqrestore(&iwqp->lock, flags);
1512 irdma_alloc_push_page(iwqp);
1513 spin_lock_irqsave(&iwqp->lock, flags);
1517 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1523 issue_modify_qp = 1;
1524 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1525 iwqp->hte_added = 1;
1526 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1527 info.tcp_ctx_valid = true;
1528 info.ord_valid = true;
1529 info.arp_cache_idx_valid = true;
1530 info.cq_num_valid = true;
1533 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1538 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1539 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1544 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1549 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1550 issue_modify_qp = 1;
1553 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1558 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1559 issue_modify_qp = 1;
1563 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1564 spin_unlock_irqrestore(&iwqp->lock, flags);
1566 if (ib_copy_from_udata(&ureq, udata,
1567 min(sizeof(ureq), udata->inlen)))
1570 irdma_flush_wqes(iwqp,
1571 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1572 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1578 if (iwqp->sc_qp.term_flags) {
1579 spin_unlock_irqrestore(&iwqp->lock, flags);
1580 irdma_terminate_del_timer(&iwqp->sc_qp);
1581 spin_lock_irqsave(&iwqp->lock, flags);
1583 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1584 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1586 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1587 info.reset_tcp_conn = true;
1591 issue_modify_qp = 1;
1592 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1599 iwqp->ibqp_state = attr->qp_state;
1601 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1602 ctx_info->iwarp_info_valid = true;
1603 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1604 offload_info->wr_rdresp_en = true;
1605 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1606 offload_info->wr_rdresp_en = true;
1607 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1608 offload_info->rd_en = true;
1611 if (ctx_info->iwarp_info_valid) {
1612 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1613 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1614 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1616 spin_unlock_irqrestore(&iwqp->lock, flags);
1618 if (attr_mask & IB_QP_STATE) {
1619 if (issue_modify_qp) {
1620 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1621 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1625 spin_lock_irqsave(&iwqp->lock, flags);
1626 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1627 iwqp->iwarp_state = info.next_iwarp_state;
1628 iwqp->ibqp_state = attr->qp_state;
1630 spin_unlock_irqrestore(&iwqp->lock, flags);
1633 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1635 if (iwqp->hw_tcp_state) {
1636 spin_lock_irqsave(&iwqp->lock, flags);
1637 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1638 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1639 spin_unlock_irqrestore(&iwqp->lock, flags);
1641 irdma_cm_disconn(iwqp);
1643 int close_timer_started;
1645 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1647 if (iwqp->cm_node) {
1648 refcount_inc(&iwqp->cm_node->refcnt);
1649 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1650 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1651 if (iwqp->cm_id && close_timer_started == 1)
1652 irdma_schedule_cm_timer(iwqp->cm_node,
1653 (struct irdma_puda_buf *)iwqp,
1654 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1656 irdma_rem_ref_cm_node(iwqp->cm_node);
1658 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1662 if (attr_mask & IB_QP_STATE && udata &&
1663 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1664 struct irdma_ucontext *ucontext;
1666 ucontext = rdma_udata_to_drv_context(udata,
1667 struct irdma_ucontext, ibucontext);
1668 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1669 !iwqp->push_wqe_mmap_entry &&
1670 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1671 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1672 uresp.push_valid = 1;
1673 uresp.push_offset = iwqp->sc_qp.push_offset;
1676 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1679 irdma_remove_push_mmap_entries(iwqp);
1680 ibdev_dbg(&iwdev->ibdev,
1681 "VERBS: copy_to_udata failed\n");
1688 spin_unlock_irqrestore(&iwqp->lock, flags);
1694 * irdma_cq_free_rsrc - free up resources for cq
1695 * @rf: RDMA PCI function
1698 static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1700 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1702 if (!iwcq->user_mode) {
1703 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1704 iwcq->kmem.va, iwcq->kmem.pa);
1705 iwcq->kmem.va = NULL;
1706 dma_free_coherent(rf->sc_dev.hw->device,
1707 iwcq->kmem_shadow.size,
1708 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1709 iwcq->kmem_shadow.va = NULL;
1712 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1716 * irdma_free_cqbuf - worker to free a cq buffer
1717 * @work: provides access to the cq buffer to free
1719 static void irdma_free_cqbuf(struct work_struct *work)
1721 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1723 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1724 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1725 cq_buf->kmem_buf.va = NULL;
1730 * irdma_process_resize_list - remove resized cq buffers from the resize_list
1731 * @iwcq: cq which owns the resize_list
1732 * @iwdev: irdma device
1733 * @lcqe_buf: the buffer where the last cqe is received
1735 static int irdma_process_resize_list(struct irdma_cq *iwcq,
1736 struct irdma_device *iwdev,
1737 struct irdma_cq_buf *lcqe_buf)
1739 struct list_head *tmp_node, *list_node;
1740 struct irdma_cq_buf *cq_buf;
1743 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1744 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1745 if (cq_buf == lcqe_buf)
1748 list_del(&cq_buf->list);
1749 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1757 * irdma_destroy_cq - destroy cq
1758 * @ib_cq: cq pointer
1761 static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1763 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1764 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1765 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1766 struct irdma_sc_dev *dev = cq->dev;
1767 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1768 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1769 unsigned long flags;
1771 spin_lock_irqsave(&iwcq->lock, flags);
1772 if (!list_empty(&iwcq->cmpl_generated))
1773 irdma_remove_cmpls_list(iwcq);
1774 if (!list_empty(&iwcq->resize_list))
1775 irdma_process_resize_list(iwcq, iwdev, NULL);
1776 spin_unlock_irqrestore(&iwcq->lock, flags);
1778 irdma_cq_wq_destroy(iwdev->rf, cq);
1779 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1781 spin_lock_irqsave(&iwceq->ce_lock, flags);
1782 irdma_sc_cleanup_ceqes(cq, ceq);
1783 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1789 * irdma_resize_cq - resize cq
1790 * @ibcq: cq to be resized
1791 * @entries: desired cq size
1794 static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1795 struct ib_udata *udata)
1797 struct irdma_cq *iwcq = to_iwcq(ibcq);
1798 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1799 struct irdma_cqp_request *cqp_request;
1800 struct cqp_cmds_info *cqp_info;
1801 struct irdma_modify_cq_info *m_info;
1802 struct irdma_modify_cq_info info = {};
1803 struct irdma_dma_mem kmem_buf;
1804 struct irdma_cq_mr *cqmr_buf;
1805 struct irdma_pbl *iwpbl_buf;
1806 struct irdma_device *iwdev;
1807 struct irdma_pci_f *rf;
1808 struct irdma_cq_buf *cq_buf = NULL;
1809 unsigned long flags;
1812 iwdev = to_iwdev(ibcq->device);
1815 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1816 IRDMA_FEATURE_CQ_RESIZE))
1819 if (entries > rf->max_cqe)
1822 if (!iwcq->user_mode) {
1824 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1828 info.cq_size = max(entries, 4);
1830 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1834 struct irdma_resize_cq_req req = {};
1835 struct irdma_ucontext *ucontext =
1836 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1839 /* CQ resize not supported with legacy GEN_1 libi40iw */
1840 if (ucontext->legacy_mode)
1843 if (ib_copy_from_udata(&req, udata,
1844 min(sizeof(req), udata->inlen)))
1847 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1848 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1849 &ucontext->cq_reg_mem_list);
1850 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1855 cqmr_buf = &iwpbl_buf->cq_mr;
1856 if (iwpbl_buf->pbl_allocated) {
1857 info.virtual_map = true;
1858 info.pbl_chunk_size = 1;
1859 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1861 info.cq_pa = cqmr_buf->cq_pbl.addr;
1864 /* Kmode CQ resize */
1867 rsize = info.cq_size * sizeof(struct irdma_cqe);
1868 kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1869 kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1870 kmem_buf.size, &kmem_buf.pa,
1875 info.cq_base = kmem_buf.va;
1876 info.cq_pa = kmem_buf.pa;
1877 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1884 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1890 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1891 info.cq_resize = true;
1893 cqp_info = &cqp_request->info;
1894 m_info = &cqp_info->in.u.cq_modify.info;
1895 memcpy(m_info, &info, sizeof(*m_info));
1897 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1898 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1899 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1900 cqp_info->post_sq = 1;
1901 ret = irdma_handle_cqp_op(rf, cqp_request);
1902 irdma_put_cqp_request(&rf->cqp, cqp_request);
1906 spin_lock_irqsave(&iwcq->lock, flags);
1908 cq_buf->kmem_buf = iwcq->kmem;
1909 cq_buf->hw = dev->hw;
1910 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1911 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1912 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1913 iwcq->kmem = kmem_buf;
1916 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1917 ibcq->cqe = info.cq_size - 1;
1918 spin_unlock_irqrestore(&iwcq->lock, flags);
1923 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
1932 static inline int cq_validate_flags(u32 flags, u8 hw_rev)
1934 /* GEN1 does not support CQ create flags */
1935 if (hw_rev == IRDMA_GEN_1)
1936 return flags ? -EOPNOTSUPP : 0;
1938 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
1942 * irdma_create_cq - create cq
1943 * @ibcq: CQ allocated
1944 * @attr: attributes for cq
1947 static int irdma_create_cq(struct ib_cq *ibcq,
1948 const struct ib_cq_init_attr *attr,
1949 struct ib_udata *udata)
1951 struct ib_device *ibdev = ibcq->device;
1952 struct irdma_device *iwdev = to_iwdev(ibdev);
1953 struct irdma_pci_f *rf = iwdev->rf;
1954 struct irdma_cq *iwcq = to_iwcq(ibcq);
1956 struct irdma_sc_cq *cq;
1957 struct irdma_sc_dev *dev = &rf->sc_dev;
1958 struct irdma_cq_init_info info = {};
1959 struct irdma_cqp_request *cqp_request;
1960 struct cqp_cmds_info *cqp_info;
1961 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1962 unsigned long flags;
1964 int entries = attr->cqe;
1966 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1969 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1976 spin_lock_init(&iwcq->lock);
1977 INIT_LIST_HEAD(&iwcq->resize_list);
1978 INIT_LIST_HEAD(&iwcq->cmpl_generated);
1980 ukinfo->cq_size = max(entries, 4);
1981 ukinfo->cq_id = cq_num;
1982 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1983 if (attr->comp_vector < rf->ceqs_count)
1984 info.ceq_id = attr->comp_vector;
1985 info.ceq_id_valid = true;
1987 info.type = IRDMA_CQ_TYPE_IWARP;
1988 info.vsi = &iwdev->vsi;
1991 struct irdma_ucontext *ucontext;
1992 struct irdma_create_cq_req req = {};
1993 struct irdma_cq_mr *cqmr;
1994 struct irdma_pbl *iwpbl;
1995 struct irdma_pbl *iwpbl_shadow;
1996 struct irdma_cq_mr *cqmr_shadow;
1998 iwcq->user_mode = true;
2000 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2002 if (ib_copy_from_udata(&req, udata,
2003 min(sizeof(req), udata->inlen))) {
2008 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2009 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2010 &ucontext->cq_reg_mem_list);
2011 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2017 iwcq->iwpbl = iwpbl;
2018 iwcq->cq_mem_size = 0;
2019 cqmr = &iwpbl->cq_mr;
2021 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2022 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2023 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2024 iwpbl_shadow = irdma_get_pbl(
2025 (unsigned long)req.user_shadow_area,
2026 &ucontext->cq_reg_mem_list);
2027 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2029 if (!iwpbl_shadow) {
2033 iwcq->iwpbl_shadow = iwpbl_shadow;
2034 cqmr_shadow = &iwpbl_shadow->cq_mr;
2035 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2038 info.shadow_area_pa = cqmr->shadow;
2040 if (iwpbl->pbl_allocated) {
2041 info.virtual_map = true;
2042 info.pbl_chunk_size = 1;
2043 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2045 info.cq_base_pa = cqmr->cq_pbl.addr;
2048 /* Kmode allocations */
2051 if (entries < 1 || entries > rf->max_cqe) {
2057 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2059 ukinfo->cq_size = entries;
2061 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2062 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2063 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2065 &iwcq->kmem.pa, GFP_KERNEL);
2066 if (!iwcq->kmem.va) {
2071 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2073 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2074 iwcq->kmem_shadow.size,
2075 &iwcq->kmem_shadow.pa,
2077 if (!iwcq->kmem_shadow.va) {
2081 info.shadow_area_pa = iwcq->kmem_shadow.pa;
2082 ukinfo->shadow_area = iwcq->kmem_shadow.va;
2083 ukinfo->cq_base = iwcq->kmem.va;
2084 info.cq_base_pa = iwcq->kmem.pa;
2087 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2088 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2089 (u32)IRDMA_MAX_CQ_READ_THRESH);
2091 if (irdma_sc_cq_init(cq, &info)) {
2092 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2097 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2103 cqp_info = &cqp_request->info;
2104 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2105 cqp_info->post_sq = 1;
2106 cqp_info->in.u.cq_create.cq = cq;
2107 cqp_info->in.u.cq_create.check_overflow = true;
2108 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2109 err_code = irdma_handle_cqp_op(rf, cqp_request);
2110 irdma_put_cqp_request(&rf->cqp, cqp_request);
2115 struct irdma_create_cq_resp resp = {};
2117 resp.cq_id = info.cq_uk_init_info.cq_id;
2118 resp.cq_size = info.cq_uk_init_info.cq_size;
2119 if (ib_copy_to_udata(udata, &resp,
2120 min(sizeof(resp), udata->outlen))) {
2121 ibdev_dbg(&iwdev->ibdev,
2122 "VERBS: copy to user data\n");
2129 irdma_cq_wq_destroy(rf, cq);
2131 irdma_cq_free_rsrc(rf, iwcq);
2137 * irdma_get_mr_access - get hw MR access permissions from IB access flags
2138 * @access: IB access flags
2140 static inline u16 irdma_get_mr_access(int access)
2144 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2145 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2146 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2147 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2148 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2149 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2150 hw_access |= (access & IB_ACCESS_MW_BIND) ?
2151 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2152 hw_access |= (access & IB_ZERO_BASED) ?
2153 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2154 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2160 * irdma_free_stag - free stag resource
2161 * @iwdev: irdma device
2162 * @stag: stag to free
2164 static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2168 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2169 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2173 * irdma_create_stag - create random stag
2174 * @iwdev: irdma device
2176 static u32 irdma_create_stag(struct irdma_device *iwdev)
2180 u32 next_stag_index;
2186 get_random_bytes(&random, sizeof(random));
2187 consumer_key = (u8)random;
2189 driver_key = random & ~iwdev->rf->mr_stagmask;
2190 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2191 next_stag_index %= iwdev->rf->max_mr;
2193 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2194 iwdev->rf->max_mr, &stag_index,
2198 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2200 stag += (u32)consumer_key;
2206 * irdma_next_pbl_addr - Get next pbl address
2207 * @pbl: pointer to a pble
2208 * @pinfo: info pointer
2211 static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2215 if (!(*pinfo) || *idx != (*pinfo)->cnt)
2220 return (*pinfo)->addr;
2224 * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
2225 * @iwmr: iwmr for IB's user page addresses
2226 * @pbl: ple pointer to save 1 level or 0 level pble
2227 * @level: indicated level 0, 1 or 2
2229 static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2230 enum irdma_pble_level level)
2232 struct ib_umem *region = iwmr->region;
2233 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2234 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2235 struct irdma_pble_info *pinfo;
2236 struct ib_block_iter biter;
2240 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2242 if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2243 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2245 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2246 *pbl = rdma_block_iter_dma_address(&biter);
2247 if (++pbl_cnt == palloc->total_cnt)
2249 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2254 * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
2255 * @arr: lvl1 pbl array
2256 * @npages: page count
2257 * @pg_size: page size
2260 static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2264 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2265 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2273 * irdma_check_mr_contiguous - check if MR is physically contiguous
2274 * @palloc: pbl allocation struct
2275 * @pg_size: page size
2277 static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2280 struct irdma_pble_level2 *lvl2 = &palloc->level2;
2281 struct irdma_pble_info *leaf = lvl2->leaf;
2283 u64 *start_addr = NULL;
2287 if (palloc->level == PBLE_LEVEL_1) {
2288 arr = palloc->level1.addr;
2289 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2294 start_addr = leaf->addr;
2296 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2298 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2300 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2309 * irdma_setup_pbles - copy user pg address to pble's
2310 * @rf: RDMA PCI function
2311 * @iwmr: mr pointer for this memory registration
2312 * @use_pbles: flag if to use pble's
2314 static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2317 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2318 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2319 struct irdma_pble_info *pinfo;
2322 enum irdma_pble_level level = PBLE_LEVEL_1;
2325 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2330 iwpbl->pbl_allocated = true;
2331 level = palloc->level;
2332 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2333 palloc->level2.leaf;
2336 pbl = iwmr->pgaddrmem;
2339 irdma_copy_user_pgaddrs(iwmr, pbl, level);
2342 iwmr->pgaddrmem[0] = *pbl;
2348 * irdma_handle_q_mem - handle memory for qp and cq
2349 * @iwdev: irdma device
2350 * @req: information for q memory management
2351 * @iwpbl: pble struct
2352 * @use_pbles: flag to use pble
2354 static int irdma_handle_q_mem(struct irdma_device *iwdev,
2355 struct irdma_mem_reg_req *req,
2356 struct irdma_pbl *iwpbl, bool use_pbles)
2358 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2359 struct irdma_mr *iwmr = iwpbl->iwmr;
2360 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2361 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2362 struct irdma_hmc_pble *hmc_p;
2363 u64 *arr = iwmr->pgaddrmem;
2368 pg_size = iwmr->page_size;
2369 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2373 if (use_pbles && palloc->level != PBLE_LEVEL_1) {
2374 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2375 iwpbl->pbl_allocated = false;
2380 arr = palloc->level1.addr;
2382 switch (iwmr->type) {
2383 case IRDMA_MEMREG_TYPE_QP:
2384 total = req->sq_pages + req->rq_pages;
2385 hmc_p = &qpmr->sq_pbl;
2386 qpmr->shadow = (dma_addr_t)arr[total];
2389 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2392 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2398 hmc_p->idx = palloc->level1.idx;
2399 hmc_p = &qpmr->rq_pbl;
2400 hmc_p->idx = palloc->level1.idx + req->sq_pages;
2402 hmc_p->addr = arr[0];
2403 hmc_p = &qpmr->rq_pbl;
2404 hmc_p->addr = arr[req->sq_pages];
2407 case IRDMA_MEMREG_TYPE_CQ:
2408 hmc_p = &cqmr->cq_pbl;
2411 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2414 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2418 hmc_p->idx = palloc->level1.idx;
2420 hmc_p->addr = arr[0];
2423 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2427 if (use_pbles && ret) {
2428 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2429 iwpbl->pbl_allocated = false;
2436 * irdma_hw_alloc_mw - create the hw memory window
2437 * @iwdev: irdma device
2438 * @iwmr: pointer to memory window info
2440 static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2442 struct irdma_mw_alloc_info *info;
2443 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2444 struct irdma_cqp_request *cqp_request;
2445 struct cqp_cmds_info *cqp_info;
2448 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2452 cqp_info = &cqp_request->info;
2453 info = &cqp_info->in.u.mw_alloc.info;
2454 memset(info, 0, sizeof(*info));
2455 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2456 info->mw_wide = true;
2458 info->page_size = PAGE_SIZE;
2459 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2460 info->pd_id = iwpd->sc_pd.pd_id;
2461 info->remote_access = true;
2462 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2463 cqp_info->post_sq = 1;
2464 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2465 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2466 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2467 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2473 * irdma_alloc_mw - Allocate memory window
2474 * @ibmw: Memory Window
2475 * @udata: user data pointer
2477 static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2479 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2480 struct irdma_mr *iwmr = to_iwmw(ibmw);
2484 stag = irdma_create_stag(iwdev);
2491 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2493 irdma_free_stag(iwdev, stag);
2501 * irdma_dealloc_mw - Dealloc memory window
2502 * @ibmw: memory window structure.
2504 static int irdma_dealloc_mw(struct ib_mw *ibmw)
2506 struct ib_pd *ibpd = ibmw->pd;
2507 struct irdma_pd *iwpd = to_iwpd(ibpd);
2508 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2509 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2510 struct irdma_cqp_request *cqp_request;
2511 struct cqp_cmds_info *cqp_info;
2512 struct irdma_dealloc_stag_info *info;
2514 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2518 cqp_info = &cqp_request->info;
2519 info = &cqp_info->in.u.dealloc_stag.info;
2520 memset(info, 0, sizeof(*info));
2521 info->pd_id = iwpd->sc_pd.pd_id;
2522 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2524 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2525 cqp_info->post_sq = 1;
2526 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2527 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2528 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2529 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2530 irdma_free_stag(iwdev, iwmr->stag);
2536 * irdma_hw_alloc_stag - cqp command to allocate stag
2537 * @iwdev: irdma device
2538 * @iwmr: irdma mr pointer
2540 static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2541 struct irdma_mr *iwmr)
2543 struct irdma_allocate_stag_info *info;
2544 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2546 struct irdma_cqp_request *cqp_request;
2547 struct cqp_cmds_info *cqp_info;
2549 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2553 cqp_info = &cqp_request->info;
2554 info = &cqp_info->in.u.alloc_stag.info;
2555 memset(info, 0, sizeof(*info));
2556 info->page_size = PAGE_SIZE;
2557 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2558 info->pd_id = iwpd->sc_pd.pd_id;
2559 info->total_len = iwmr->len;
2560 info->remote_access = true;
2561 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2562 cqp_info->post_sq = 1;
2563 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2564 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2565 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2566 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2572 * irdma_alloc_mr - register stag for fast memory registration
2574 * @mr_type: memory for stag registrion
2575 * @max_num_sg: man number of pages
2577 static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2580 struct irdma_device *iwdev = to_iwdev(pd->device);
2581 struct irdma_pble_alloc *palloc;
2582 struct irdma_pbl *iwpbl;
2583 struct irdma_mr *iwmr;
2587 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2589 return ERR_PTR(-ENOMEM);
2591 stag = irdma_create_stag(iwdev);
2598 iwmr->ibmr.rkey = stag;
2599 iwmr->ibmr.lkey = stag;
2601 iwmr->ibmr.device = pd->device;
2602 iwpbl = &iwmr->iwpbl;
2604 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2605 palloc = &iwpbl->pble_alloc;
2606 iwmr->page_cnt = max_num_sg;
2607 err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2612 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2614 goto err_alloc_stag;
2616 iwpbl->pbl_allocated = true;
2620 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2622 irdma_free_stag(iwdev, stag);
2626 return ERR_PTR(err_code);
2630 * irdma_set_page - populate pbl list for fmr
2631 * @ibmr: ib mem to access iwarp mr pointer
2632 * @addr: page dma address fro pbl list
2634 static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2636 struct irdma_mr *iwmr = to_iwmr(ibmr);
2637 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2638 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2641 if (unlikely(iwmr->npages == iwmr->page_cnt))
2644 pbl = palloc->level1.addr;
2645 pbl[iwmr->npages++] = addr;
2651 * irdma_map_mr_sg - map of sg list for fmr
2652 * @ibmr: ib mem to access iwarp mr pointer
2653 * @sg: scatter gather list
2654 * @sg_nents: number of sg pages
2655 * @sg_offset: scatter gather list for fmr
2657 static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2658 int sg_nents, unsigned int *sg_offset)
2660 struct irdma_mr *iwmr = to_iwmr(ibmr);
2664 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2668 * irdma_hwreg_mr - send cqp command for memory registration
2669 * @iwdev: irdma device
2670 * @iwmr: irdma mr pointer
2671 * @access: access for MR
2673 static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2676 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2677 struct irdma_reg_ns_stag_info *stag_info;
2678 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2679 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2680 struct irdma_cqp_request *cqp_request;
2681 struct cqp_cmds_info *cqp_info;
2684 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2688 cqp_info = &cqp_request->info;
2689 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2690 memset(stag_info, 0, sizeof(*stag_info));
2691 stag_info->va = iwpbl->user_base;
2692 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2693 stag_info->stag_key = (u8)iwmr->stag;
2694 stag_info->total_len = iwmr->len;
2695 stag_info->access_rights = irdma_get_mr_access(access);
2696 stag_info->pd_id = iwpd->sc_pd.pd_id;
2697 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2698 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2700 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2701 stag_info->page_size = iwmr->page_size;
2703 if (iwpbl->pbl_allocated) {
2704 if (palloc->level == PBLE_LEVEL_1) {
2705 stag_info->first_pm_pbl_index = palloc->level1.idx;
2706 stag_info->chunk_size = 1;
2708 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2709 stag_info->chunk_size = 3;
2712 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2715 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2716 cqp_info->post_sq = 1;
2717 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2718 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2719 ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2720 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2726 * irdma_reg_user_mr - Register a user memory region
2728 * @start: virtual start address
2729 * @len: length of mr
2730 * @virt: virtual address
2731 * @access: access of mr
2734 static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2735 u64 virt, int access,
2736 struct ib_udata *udata)
2738 struct irdma_device *iwdev = to_iwdev(pd->device);
2739 struct irdma_ucontext *ucontext;
2740 struct irdma_pble_alloc *palloc;
2741 struct irdma_pbl *iwpbl;
2742 struct irdma_mr *iwmr;
2743 struct ib_umem *region;
2744 struct irdma_mem_reg_req req;
2745 u32 total, stag = 0;
2746 u8 shadow_pgcnt = 1;
2747 bool use_pbles = false;
2748 unsigned long flags;
2752 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2753 return ERR_PTR(-EINVAL);
2755 region = ib_umem_get(pd->device, start, len, access);
2757 if (IS_ERR(region)) {
2758 ibdev_dbg(&iwdev->ibdev,
2759 "VERBS: Failed to create ib_umem region\n");
2760 return (struct ib_mr *)region;
2763 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2764 ib_umem_release(region);
2765 return ERR_PTR(-EFAULT);
2768 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2770 ib_umem_release(region);
2771 return ERR_PTR(-ENOMEM);
2774 iwpbl = &iwmr->iwpbl;
2776 iwmr->region = region;
2778 iwmr->ibmr.device = pd->device;
2779 iwmr->ibmr.iova = virt;
2780 iwmr->page_size = PAGE_SIZE;
2782 if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
2783 iwmr->page_size = ib_umem_find_best_pgsz(region,
2784 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
2786 if (unlikely(!iwmr->page_size)) {
2788 ib_umem_release(region);
2789 return ERR_PTR(-EOPNOTSUPP);
2792 iwmr->len = region->length;
2793 iwpbl->user_base = virt;
2794 palloc = &iwpbl->pble_alloc;
2795 iwmr->type = req.reg_type;
2796 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2798 switch (req.reg_type) {
2799 case IRDMA_MEMREG_TYPE_QP:
2800 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
2801 if (total > iwmr->page_cnt) {
2805 total = req.sq_pages + req.rq_pages;
2806 use_pbles = (total > 2);
2807 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2811 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2813 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2814 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2815 iwpbl->on_list = true;
2816 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2818 case IRDMA_MEMREG_TYPE_CQ:
2819 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2821 total = req.cq_pages + shadow_pgcnt;
2822 if (total > iwmr->page_cnt) {
2827 use_pbles = (req.cq_pages > 1);
2828 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2832 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2834 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2835 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2836 iwpbl->on_list = true;
2837 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2839 case IRDMA_MEMREG_TYPE_MEM:
2840 use_pbles = (iwmr->page_cnt != 1);
2842 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2847 ret = irdma_check_mr_contiguous(palloc,
2850 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2851 iwpbl->pbl_allocated = false;
2855 stag = irdma_create_stag(iwdev);
2862 iwmr->ibmr.rkey = stag;
2863 iwmr->ibmr.lkey = stag;
2864 err = irdma_hwreg_mr(iwdev, iwmr, access);
2866 irdma_free_stag(iwdev, stag);
2875 iwmr->type = req.reg_type;
2880 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2881 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2882 ib_umem_release(region);
2885 return ERR_PTR(err);
2889 * irdma_reg_phys_mr - register kernel physical memory
2891 * @addr: physical address of memory to register
2892 * @size: size of memory to register
2893 * @access: Access rights
2894 * @iova_start: start of virtual address for physical buffers
2896 struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2899 struct irdma_device *iwdev = to_iwdev(pd->device);
2900 struct irdma_pbl *iwpbl;
2901 struct irdma_mr *iwmr;
2905 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2907 return ERR_PTR(-ENOMEM);
2910 iwmr->ibmr.device = pd->device;
2911 iwpbl = &iwmr->iwpbl;
2913 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2914 iwpbl->user_base = *iova_start;
2915 stag = irdma_create_stag(iwdev);
2922 iwmr->ibmr.iova = *iova_start;
2923 iwmr->ibmr.rkey = stag;
2924 iwmr->ibmr.lkey = stag;
2926 iwmr->pgaddrmem[0] = addr;
2928 iwmr->page_size = SZ_4K;
2929 ret = irdma_hwreg_mr(iwdev, iwmr, access);
2931 irdma_free_stag(iwdev, stag);
2940 return ERR_PTR(ret);
2944 * irdma_get_dma_mr - register physical mem
2946 * @acc: access for memory
2948 static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
2952 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2956 * irdma_del_memlist - Deleting pbl list entries for CQ/QP
2957 * @iwmr: iwmr for IB's user page addresses
2958 * @ucontext: ptr to user context
2960 static void irdma_del_memlist(struct irdma_mr *iwmr,
2961 struct irdma_ucontext *ucontext)
2963 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2964 unsigned long flags;
2966 switch (iwmr->type) {
2967 case IRDMA_MEMREG_TYPE_CQ:
2968 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2969 if (iwpbl->on_list) {
2970 iwpbl->on_list = false;
2971 list_del(&iwpbl->list);
2973 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2975 case IRDMA_MEMREG_TYPE_QP:
2976 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2977 if (iwpbl->on_list) {
2978 iwpbl->on_list = false;
2979 list_del(&iwpbl->list);
2981 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2989 * irdma_dereg_mr - deregister mr
2990 * @ib_mr: mr ptr for dereg
2993 static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
2995 struct ib_pd *ibpd = ib_mr->pd;
2996 struct irdma_pd *iwpd = to_iwpd(ibpd);
2997 struct irdma_mr *iwmr = to_iwmr(ib_mr);
2998 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
2999 struct irdma_dealloc_stag_info *info;
3000 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3001 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3002 struct irdma_cqp_request *cqp_request;
3003 struct cqp_cmds_info *cqp_info;
3005 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3007 struct irdma_ucontext *ucontext;
3009 ucontext = rdma_udata_to_drv_context(udata,
3010 struct irdma_ucontext,
3012 irdma_del_memlist(iwmr, ucontext);
3017 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3021 cqp_info = &cqp_request->info;
3022 info = &cqp_info->in.u.dealloc_stag.info;
3023 memset(info, 0, sizeof(*info));
3024 info->pd_id = iwpd->sc_pd.pd_id;
3025 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3027 if (iwpbl->pbl_allocated)
3028 info->dealloc_pbl = true;
3030 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3031 cqp_info->post_sq = 1;
3032 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3033 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3034 irdma_handle_cqp_op(iwdev->rf, cqp_request);
3035 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3036 irdma_free_stag(iwdev, iwmr->stag);
3038 if (iwpbl->pbl_allocated)
3039 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3040 ib_umem_release(iwmr->region);
3047 * irdma_post_send - kernel application wr
3048 * @ibqp: qp ptr for wr
3049 * @ib_wr: work request ptr
3050 * @bad_wr: return of bad wr if err
3052 static int irdma_post_send(struct ib_qp *ibqp,
3053 const struct ib_send_wr *ib_wr,
3054 const struct ib_send_wr **bad_wr)
3056 struct irdma_qp *iwqp;
3057 struct irdma_qp_uk *ukqp;
3058 struct irdma_sc_dev *dev;
3059 struct irdma_post_sq_info info;
3061 unsigned long flags;
3063 struct irdma_ah *ah;
3065 iwqp = to_iwqp(ibqp);
3066 ukqp = &iwqp->sc_qp.qp_uk;
3067 dev = &iwqp->iwdev->rf->sc_dev;
3069 spin_lock_irqsave(&iwqp->lock, flags);
3071 memset(&info, 0, sizeof(info));
3073 info.wr_id = (ib_wr->wr_id);
3074 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3075 info.signaled = true;
3076 if (ib_wr->send_flags & IB_SEND_FENCE)
3077 info.read_fence = true;
3078 switch (ib_wr->opcode) {
3079 case IB_WR_SEND_WITH_IMM:
3080 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3081 info.imm_data_valid = true;
3082 info.imm_data = ntohl(ib_wr->ex.imm_data);
3089 case IB_WR_SEND_WITH_INV:
3090 if (ib_wr->opcode == IB_WR_SEND ||
3091 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3092 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3093 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3095 info.op_type = IRDMA_OP_TYPE_SEND;
3097 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3098 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3100 info.op_type = IRDMA_OP_TYPE_SEND_INV;
3101 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3104 if (ib_wr->send_flags & IB_SEND_INLINE) {
3105 info.op.inline_send.data = (void *)(unsigned long)
3106 ib_wr->sg_list[0].addr;
3107 info.op.inline_send.len = ib_wr->sg_list[0].length;
3108 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3109 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3110 ah = to_iwah(ud_wr(ib_wr)->ah);
3111 info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
3112 info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
3113 info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3115 err = irdma_uk_inline_send(ukqp, &info, false);
3117 info.op.send.num_sges = ib_wr->num_sge;
3118 info.op.send.sg_list = ib_wr->sg_list;
3119 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3120 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3121 ah = to_iwah(ud_wr(ib_wr)->ah);
3122 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3123 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3124 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3126 err = irdma_uk_send(ukqp, &info, false);
3129 case IB_WR_RDMA_WRITE_WITH_IMM:
3130 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3131 info.imm_data_valid = true;
3132 info.imm_data = ntohl(ib_wr->ex.imm_data);
3138 case IB_WR_RDMA_WRITE:
3139 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3140 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3142 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3144 if (ib_wr->send_flags & IB_SEND_INLINE) {
3145 info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
3146 info.op.inline_rdma_write.len =
3147 ib_wr->sg_list[0].length;
3148 info.op.inline_rdma_write.rem_addr.addr =
3149 rdma_wr(ib_wr)->remote_addr;
3150 info.op.inline_rdma_write.rem_addr.lkey =
3151 rdma_wr(ib_wr)->rkey;
3152 err = irdma_uk_inline_rdma_write(ukqp, &info, false);
3154 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
3155 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3156 info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
3157 info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3158 err = irdma_uk_rdma_write(ukqp, &info, false);
3161 case IB_WR_RDMA_READ_WITH_INV:
3164 case IB_WR_RDMA_READ:
3165 if (ib_wr->num_sge >
3166 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3170 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3171 info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
3172 info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
3173 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3174 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3175 err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3177 case IB_WR_LOCAL_INV:
3178 info.op_type = IRDMA_OP_TYPE_INV_STAG;
3179 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3180 err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3182 case IB_WR_REG_MR: {
3183 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3184 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3185 struct irdma_fast_reg_stag_info stag_info = {};
3187 stag_info.signaled = info.signaled;
3188 stag_info.read_fence = info.read_fence;
3189 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3190 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3191 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3192 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3193 stag_info.wr_id = ib_wr->wr_id;
3194 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3195 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3196 stag_info.total_len = iwmr->ibmr.length;
3197 stag_info.reg_addr_pa = *palloc->level1.addr;
3198 stag_info.first_pm_pbl_index = palloc->level1.idx;
3199 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3200 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3201 stag_info.chunk_size = 1;
3202 err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3208 ibdev_dbg(&iwqp->iwdev->ibdev,
3209 "VERBS: upost_send bad opcode = 0x%x\n",
3216 ib_wr = ib_wr->next;
3219 if (!iwqp->flush_issued) {
3220 if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
3221 irdma_uk_qp_post_wr(ukqp);
3222 spin_unlock_irqrestore(&iwqp->lock, flags);
3224 spin_unlock_irqrestore(&iwqp->lock, flags);
3225 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3226 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3235 * irdma_post_recv - post receive wr for kernel application
3236 * @ibqp: ib qp pointer
3237 * @ib_wr: work request for receive
3238 * @bad_wr: bad wr caused an error
3240 static int irdma_post_recv(struct ib_qp *ibqp,
3241 const struct ib_recv_wr *ib_wr,
3242 const struct ib_recv_wr **bad_wr)
3244 struct irdma_qp *iwqp;
3245 struct irdma_qp_uk *ukqp;
3246 struct irdma_post_rq_info post_recv = {};
3247 unsigned long flags;
3250 iwqp = to_iwqp(ibqp);
3251 ukqp = &iwqp->sc_qp.qp_uk;
3253 spin_lock_irqsave(&iwqp->lock, flags);
3255 post_recv.num_sges = ib_wr->num_sge;
3256 post_recv.wr_id = ib_wr->wr_id;
3257 post_recv.sg_list = ib_wr->sg_list;
3258 err = irdma_uk_post_receive(ukqp, &post_recv);
3260 ibdev_dbg(&iwqp->iwdev->ibdev,
3261 "VERBS: post_recv err %d\n", err);
3265 ib_wr = ib_wr->next;
3269 spin_unlock_irqrestore(&iwqp->lock, flags);
3270 if (iwqp->flush_issued)
3271 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
3272 msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
3281 * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
3282 * @opcode: iwarp flush code
3284 static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3287 case FLUSH_PROT_ERR:
3288 return IB_WC_LOC_PROT_ERR;
3289 case FLUSH_REM_ACCESS_ERR:
3290 return IB_WC_REM_ACCESS_ERR;
3291 case FLUSH_LOC_QP_OP_ERR:
3292 return IB_WC_LOC_QP_OP_ERR;
3293 case FLUSH_REM_OP_ERR:
3294 return IB_WC_REM_OP_ERR;
3295 case FLUSH_LOC_LEN_ERR:
3296 return IB_WC_LOC_LEN_ERR;
3297 case FLUSH_GENERAL_ERR:
3298 return IB_WC_WR_FLUSH_ERR;
3299 case FLUSH_RETRY_EXC_ERR:
3300 return IB_WC_RETRY_EXC_ERR;
3301 case FLUSH_MW_BIND_ERR:
3302 return IB_WC_MW_BIND_ERR;
3303 case FLUSH_FATAL_ERR:
3305 return IB_WC_FATAL_ERR;
3310 * irdma_process_cqe - process cqe info
3311 * @entry: processed cqe
3312 * @cq_poll_info: cqe info
3314 static void irdma_process_cqe(struct ib_wc *entry,
3315 struct irdma_cq_poll_info *cq_poll_info)
3317 struct irdma_qp *iwqp;
3318 struct irdma_sc_qp *qp;
3320 entry->wc_flags = 0;
3321 entry->pkey_index = 0;
3322 entry->wr_id = cq_poll_info->wr_id;
3324 qp = cq_poll_info->qp_handle;
3325 iwqp = qp->qp_uk.back_qp;
3326 entry->qp = qp->qp_uk.back_qp;
3328 if (cq_poll_info->error) {
3329 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3330 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3332 entry->vendor_err = cq_poll_info->major_err << 16 |
3333 cq_poll_info->minor_err;
3335 entry->status = IB_WC_SUCCESS;
3336 if (cq_poll_info->imm_valid) {
3337 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3338 entry->wc_flags |= IB_WC_WITH_IMM;
3340 if (cq_poll_info->ud_smac_valid) {
3341 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3342 entry->wc_flags |= IB_WC_WITH_SMAC;
3345 if (cq_poll_info->ud_vlan_valid) {
3346 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3348 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3350 entry->vlan_id = vlan;
3351 entry->wc_flags |= IB_WC_WITH_VLAN;
3358 switch (cq_poll_info->op_type) {
3359 case IRDMA_OP_TYPE_RDMA_WRITE:
3360 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
3361 entry->opcode = IB_WC_RDMA_WRITE;
3363 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
3364 case IRDMA_OP_TYPE_RDMA_READ:
3365 entry->opcode = IB_WC_RDMA_READ;
3367 case IRDMA_OP_TYPE_SEND_INV:
3368 case IRDMA_OP_TYPE_SEND_SOL:
3369 case IRDMA_OP_TYPE_SEND_SOL_INV:
3370 case IRDMA_OP_TYPE_SEND:
3371 entry->opcode = IB_WC_SEND;
3373 case IRDMA_OP_TYPE_FAST_REG_NSMR:
3374 entry->opcode = IB_WC_REG_MR;
3376 case IRDMA_OP_TYPE_INV_STAG:
3377 entry->opcode = IB_WC_LOCAL_INV;
3379 case IRDMA_OP_TYPE_REC_IMM:
3380 case IRDMA_OP_TYPE_REC:
3381 entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
3382 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
3383 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3384 cq_poll_info->stag_invalid_set) {
3385 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3386 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3390 ibdev_err(&iwqp->iwdev->ibdev,
3391 "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
3392 entry->status = IB_WC_GENERAL_ERR;
3396 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3397 entry->src_qp = cq_poll_info->ud_src_qpn;
3400 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3401 entry->network_hdr_type = cq_poll_info->ipv4 ?
3405 entry->src_qp = cq_poll_info->qp_id;
3408 entry->byte_len = cq_poll_info->bytes_xfered;
3412 * irdma_poll_one - poll one entry of the CQ
3413 * @ukcq: ukcq to poll
3414 * @cur_cqe: current CQE info to be filled in
3415 * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
3417 * Returns the internal irdma device error code or 0 on success
3419 static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3420 struct irdma_cq_poll_info *cur_cqe,
3421 struct ib_wc *entry)
3423 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3428 irdma_process_cqe(entry, cur_cqe);
3434 * __irdma_poll_cq - poll cq for completion (kernel apps)
3436 * @num_entries: number of entries to poll
3437 * @entry: wr of a completed entry
3439 static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3441 struct list_head *tmp_node, *list_node;
3442 struct irdma_cq_buf *last_buf = NULL;
3443 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3444 struct irdma_cq_buf *cq_buf;
3446 struct irdma_device *iwdev;
3447 struct irdma_cq_uk *ukcq;
3448 bool cq_new_cqe = false;
3449 int resized_bufs = 0;
3452 iwdev = to_iwdev(iwcq->ibcq.device);
3453 ukcq = &iwcq->sc_cq.cq_uk;
3455 /* go through the list of previously resized CQ buffers */
3456 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3457 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3458 while (npolled < num_entries) {
3459 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3467 /* QP using the CQ is destroyed. Skip reporting this CQE */
3468 if (ret == -EFAULT) {
3475 /* save the resized CQ buffer which received the last cqe */
3481 /* check the current CQ for new cqes */
3482 while (npolled < num_entries) {
3483 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3484 if (ret == -ENOENT) {
3485 ret = irdma_generated_cmpls(iwcq, cur_cqe);
3487 irdma_process_cqe(entry + npolled, cur_cqe);
3497 /* QP using the CQ is destroyed. Skip reporting this CQE */
3498 if (ret == -EFAULT) {
3506 /* all previous CQ resizes are complete */
3507 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3509 /* only CQ resizes up to the last_buf are complete */
3510 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3512 /* report to the HW the number of complete CQ resizes */
3513 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3517 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3524 * irdma_poll_cq - poll cq for completion (kernel apps)
3526 * @num_entries: number of entries to poll
3527 * @entry: wr of a completed entry
3529 static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3530 struct ib_wc *entry)
3532 struct irdma_cq *iwcq;
3533 unsigned long flags;
3536 iwcq = to_iwcq(ibcq);
3538 spin_lock_irqsave(&iwcq->lock, flags);
3539 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3540 spin_unlock_irqrestore(&iwcq->lock, flags);
3546 * irdma_req_notify_cq - arm cq kernel application
3548 * @notify_flags: notofication flags
3550 static int irdma_req_notify_cq(struct ib_cq *ibcq,
3551 enum ib_cq_notify_flags notify_flags)
3553 struct irdma_cq *iwcq;
3554 struct irdma_cq_uk *ukcq;
3555 unsigned long flags;
3556 enum irdma_cmpl_notify cq_notify;
3557 bool promo_event = false;
3560 cq_notify = notify_flags == IB_CQ_SOLICITED ?
3561 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3562 iwcq = to_iwcq(ibcq);
3563 ukcq = &iwcq->sc_cq.cq_uk;
3565 spin_lock_irqsave(&iwcq->lock, flags);
3566 /* Only promote to arm the CQ for any event if the last arm event was solicited. */
3567 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3570 if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
3571 iwcq->last_notify = cq_notify;
3572 irdma_uk_cq_request_notification(ukcq, cq_notify);
3575 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3576 (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
3578 spin_unlock_irqrestore(&iwcq->lock, flags);
3583 static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3584 struct ib_port_immutable *immutable)
3586 struct ib_port_attr attr;
3589 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3590 err = ib_query_port(ibdev, port_num, &attr);
3594 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3595 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3596 immutable->gid_tbl_len = attr.gid_tbl_len;
3601 static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3602 struct ib_port_immutable *immutable)
3604 struct ib_port_attr attr;
3607 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3608 err = ib_query_port(ibdev, port_num, &attr);
3611 immutable->gid_tbl_len = attr.gid_tbl_len;
3616 static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
3618 [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
3619 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
3620 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
3621 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
3622 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
3623 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
3624 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
3625 [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
3626 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
3627 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
3628 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
3629 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
3630 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
3633 [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3635 [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3637 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3639 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3641 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3643 [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3645 [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3647 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3649 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3650 "ip4OutMcastOctets",
3651 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3653 [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3655 [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3657 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3659 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3661 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3663 [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3665 [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3667 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3669 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3670 "ip6OutMcastOctets",
3671 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3673 [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3675 [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name =
3677 [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3679 [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3681 [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3683 [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3685 [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3687 [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3689 [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name =
3691 [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name =
3693 [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3695 [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
3697 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32]
3698 .name = "RxECNMrkd",
3701 static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
3703 struct irdma_device *iwdev = to_iwdev(dev);
3705 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
3706 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3707 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3711 * irdma_alloc_hw_port_stats - Allocate a hw stats structure
3712 * @ibdev: device pointer from stack
3713 * @port_num: port number
3715 static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
3718 int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +
3719 IRDMA_HW_STAT_INDEX_MAX_64;
3720 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
3722 BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) !=
3723 (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
3725 return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
3730 * irdma_get_hw_stats - Populates the rdma_hw_stats structure
3731 * @ibdev: device pointer from stack
3732 * @stats: stats pointer from stack
3733 * @port_num: port number
3734 * @index: which hw counter the stack is requesting we update
3736 static int irdma_get_hw_stats(struct ib_device *ibdev,
3737 struct rdma_hw_stats *stats, u32 port_num,
3740 struct irdma_device *iwdev = to_iwdev(ibdev);
3741 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3743 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3744 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3746 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3748 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
3750 return stats->num_counters;
3754 * irdma_query_gid - Query port GID
3755 * @ibdev: device pointer from stack
3756 * @port: port number
3757 * @index: Entry index
3760 static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
3763 struct irdma_device *iwdev = to_iwdev(ibdev);
3765 memset(gid->raw, 0, sizeof(gid->raw));
3766 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3772 * mcast_list_add - Add a new mcast item to list
3773 * @rf: RDMA PCI function
3774 * @new_elem: pointer to element to add
3776 static void mcast_list_add(struct irdma_pci_f *rf,
3777 struct mc_table_list *new_elem)
3779 list_add(&new_elem->list, &rf->mc_qht_list.list);
3783 * mcast_list_del - Remove an mcast item from list
3784 * @mc_qht_elem: pointer to mcast table list element
3786 static void mcast_list_del(struct mc_table_list *mc_qht_elem)
3789 list_del(&mc_qht_elem->list);
3793 * mcast_list_lookup_ip - Search mcast list for address
3794 * @rf: RDMA PCI function
3795 * @ip_mcast: pointer to mcast IP address
3797 static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
3800 struct mc_table_list *mc_qht_el;
3801 struct list_head *pos, *q;
3803 list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
3804 mc_qht_el = list_entry(pos, struct mc_table_list, list);
3805 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3806 sizeof(mc_qht_el->mc_info.dest_ip)))
3814 * irdma_mcast_cqp_op - perform a mcast cqp operation
3815 * @iwdev: irdma device
3816 * @mc_grp_ctx: mcast group info
3819 * returns error status
3821 static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3822 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3824 struct cqp_cmds_info *cqp_info;
3825 struct irdma_cqp_request *cqp_request;
3828 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3832 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3833 cqp_info = &cqp_request->info;
3834 cqp_info->cqp_cmd = op;
3835 cqp_info->post_sq = 1;
3836 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3837 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3838 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3839 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3845 * irdma_mcast_mac - Get the multicast MAC for an IP address
3846 * @ip_addr: IPv4 or IPv6 address
3847 * @mac: pointer to result MAC address
3848 * @ipv4: flag indicating IPv4 or IPv6
3851 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
3853 u8 *ip = (u8 *)ip_addr;
3856 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
3859 mac4[3] = ip[2] & 0x7F;
3862 ether_addr_copy(mac, mac4);
3864 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
3871 ether_addr_copy(mac, mac6);
3876 * irdma_attach_mcast - attach a qp to a multicast group
3878 * @ibgid: pointer to global ID
3881 * returns error status
3883 static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3885 struct irdma_qp *iwqp = to_iwqp(ibqp);
3886 struct irdma_device *iwdev = iwqp->iwdev;
3887 struct irdma_pci_f *rf = iwdev->rf;
3888 struct mc_table_list *mc_qht_elem;
3889 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
3890 unsigned long flags;
3891 u32 ip_addr[4] = {};
3897 union irdma_sockaddr sgid_addr;
3898 unsigned char dmac[ETH_ALEN];
3900 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3902 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3903 irdma_copy_ip_ntohl(ip_addr,
3904 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
3905 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3907 ibdev_dbg(&iwdev->ibdev,
3908 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
3910 irdma_mcast_mac(ip_addr, dmac, false);
3912 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3914 vlan_id = irdma_get_vlan_ipv4(ip_addr);
3915 irdma_mcast_mac(ip_addr, dmac, true);
3916 ibdev_dbg(&iwdev->ibdev,
3917 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3918 ibqp->qp_num, ip_addr, dmac);
3921 spin_lock_irqsave(&rf->qh_list_lock, flags);
3922 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3924 struct irdma_dma_mem *dma_mem_mc;
3926 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3927 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3931 mc_qht_elem->mc_info.ipv4_valid = ipv4;
3932 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3933 sizeof(mc_qht_elem->mc_info.dest_ip));
3934 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3935 &mgn, &rf->next_mcg);
3941 mc_qht_elem->mc_info.mgn = mgn;
3942 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
3943 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
3944 IRDMA_HW_PAGE_SIZE);
3945 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
3949 if (!dma_mem_mc->va) {
3950 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
3955 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
3956 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
3957 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
3958 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
3959 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
3960 if (vlan_id < VLAN_N_VID)
3961 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
3962 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
3963 mc_qht_elem->mc_grp_ctx.qs_handle =
3964 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
3965 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
3967 spin_lock_irqsave(&rf->qh_list_lock, flags);
3968 mcast_list_add(rf, mc_qht_elem);
3970 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
3971 IRDMA_MAX_MGS_PER_CTX) {
3972 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3977 mcg_info.qp_id = iwqp->ibqp.qp_num;
3978 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
3979 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
3980 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3982 /* Only if there is a change do we need to modify or create */
3984 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3985 IRDMA_OP_MC_CREATE);
3986 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
3987 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
3988 IRDMA_OP_MC_MODIFY);
3999 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4000 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4001 mcast_list_del(mc_qht_elem);
4002 dma_free_coherent(rf->hw.device,
4003 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4004 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4005 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4006 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4007 irdma_free_rsrc(rf, rf->allocated_mcgs,
4008 mc_qht_elem->mc_grp_ctx.mg_id);
4016 * irdma_detach_mcast - detach a qp from a multicast group
4018 * @ibgid: pointer to global ID
4021 * returns error status
4023 static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4025 struct irdma_qp *iwqp = to_iwqp(ibqp);
4026 struct irdma_device *iwdev = iwqp->iwdev;
4027 struct irdma_pci_f *rf = iwdev->rf;
4028 u32 ip_addr[4] = {};
4029 struct mc_table_list *mc_qht_elem;
4030 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4032 unsigned long flags;
4033 union irdma_sockaddr sgid_addr;
4035 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4036 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4037 irdma_copy_ip_ntohl(ip_addr,
4038 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4040 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4042 spin_lock_irqsave(&rf->qh_list_lock, flags);
4043 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4045 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4046 ibdev_dbg(&iwdev->ibdev,
4047 "VERBS: address not found MCG\n");
4051 mcg_info.qp_id = iwqp->ibqp.qp_num;
4052 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4053 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4054 mcast_list_del(mc_qht_elem);
4055 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4056 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4057 IRDMA_OP_MC_DESTROY);
4059 ibdev_dbg(&iwdev->ibdev,
4060 "VERBS: failed MC_DESTROY MCG\n");
4061 spin_lock_irqsave(&rf->qh_list_lock, flags);
4062 mcast_list_add(rf, mc_qht_elem);
4063 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4067 dma_free_coherent(rf->hw.device,
4068 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4069 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4070 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4071 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4072 irdma_free_rsrc(rf, rf->allocated_mcgs,
4073 mc_qht_elem->mc_grp_ctx.mg_id);
4076 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4077 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4078 IRDMA_OP_MC_MODIFY);
4080 ibdev_dbg(&iwdev->ibdev,
4081 "VERBS: failed Modify MCG\n");
4089 static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
4091 struct irdma_pci_f *rf = iwdev->rf;
4094 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
4099 err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
4100 irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
4103 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
4108 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4111 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4113 } while (!ah->sc_ah.ah_info.ah_valid && --cnt);
4116 ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
4124 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
4129 static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
4131 struct irdma_pd *pd = to_iwpd(ibah->pd);
4132 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4133 struct rdma_ah_attr *ah_attr = attr->ah_attr;
4134 const struct ib_gid_attr *sgid_attr;
4135 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4136 struct irdma_pci_f *rf = iwdev->rf;
4137 struct irdma_sc_ah *sc_ah;
4138 struct irdma_ah_info *ah_info;
4139 union irdma_sockaddr sgid_addr, dgid_addr;
4145 sc_ah->ah_info.vsi = &iwdev->vsi;
4146 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4147 ah->sgid_index = ah_attr->grh.sgid_index;
4148 sgid_attr = ah_attr->grh.sgid_attr;
4149 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4150 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4151 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4152 ah->av.attrs = *ah_attr;
4153 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4154 ah_info = &sc_ah->ah_info;
4155 ah_info->pd_idx = pd->sc_pd.pd_id;
4156 if (ah_attr->ah_flags & IB_AH_GRH) {
4157 ah_info->flow_label = ah_attr->grh.flow_label;
4158 ah_info->hop_ttl = ah_attr->grh.hop_limit;
4159 ah_info->tc_tos = ah_attr->grh.traffic_class;
4162 ether_addr_copy(dmac, ah_attr->roce.dmac);
4163 if (ah->av.net_type == RDMA_NETWORK_IPV4) {
4164 ah_info->ipv4_valid = true;
4165 ah_info->dest_ip_addr[0] =
4166 ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4167 ah_info->src_ip_addr[0] =
4168 ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4169 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4170 ah_info->dest_ip_addr[0]);
4171 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4172 ah_info->do_lpbk = true;
4173 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4176 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4177 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4178 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4179 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4180 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4181 ah_info->dest_ip_addr);
4182 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4183 ah_info->do_lpbk = true;
4184 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4188 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4193 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4194 ah_info->ipv4_valid, dmac);
4196 if (ah_info->dst_arpindex == -1)
4199 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
4200 ah_info->vlan_tag = 0;
4202 if (ah_info->vlan_tag < VLAN_N_VID) {
4203 ah_info->insert_vlan_tag = true;
4204 ah_info->vlan_tag |=
4205 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
4212 * irdma_ah_exists - Check for existing identical AH
4213 * @iwdev: irdma device
4214 * @new_ah: AH to check for
4216 * returns true if AH is found, false if not found.
4218 static bool irdma_ah_exists(struct irdma_device *iwdev,
4219 struct irdma_ah *new_ah)
4221 struct irdma_ah *ah;
4222 u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4223 new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4224 new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4225 new_ah->sc_ah.ah_info.dest_ip_addr[3];
4227 hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
4228 /* Set ah_valid and ah_id the same so memcmp can work */
4229 new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
4230 new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
4231 if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
4232 sizeof(ah->sc_ah.ah_info))) {
4233 refcount_inc(&ah->refcnt);
4234 new_ah->parent_ah = ah;
4243 * irdma_destroy_ah - Destroy address handle
4244 * @ibah: pointer to address handle
4245 * @ah_flags: flags for sleepable
4247 static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4249 struct irdma_device *iwdev = to_iwdev(ibah->device);
4250 struct irdma_ah *ah = to_iwah(ibah);
4252 if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
4253 mutex_lock(&iwdev->ah_tbl_lock);
4254 if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
4255 mutex_unlock(&iwdev->ah_tbl_lock);
4258 hash_del(&ah->parent_ah->list);
4259 kfree(ah->parent_ah);
4260 mutex_unlock(&iwdev->ah_tbl_lock);
4263 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4266 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4267 ah->sc_ah.ah_info.ah_idx);
4273 * irdma_create_user_ah - create user address handle
4274 * @ibah: address handle
4275 * @attr: address handle attributes
4278 * returns 0 on success, error otherwise
4280 static int irdma_create_user_ah(struct ib_ah *ibah,
4281 struct rdma_ah_init_attr *attr,
4282 struct ib_udata *udata)
4284 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4285 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4286 struct irdma_create_ah_resp uresp;
4287 struct irdma_ah *parent_ah;
4290 err = irdma_setup_ah(ibah, attr);
4293 mutex_lock(&iwdev->ah_tbl_lock);
4294 if (!irdma_ah_exists(iwdev, ah)) {
4295 err = irdma_create_hw_ah(iwdev, ah, true);
4297 mutex_unlock(&iwdev->ah_tbl_lock);
4300 /* Add new AH to list */
4301 parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
4303 u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
4304 parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
4305 parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
4306 parent_ah->sc_ah.ah_info.dest_ip_addr[3];
4308 ah->parent_ah = parent_ah;
4309 hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
4310 refcount_set(&parent_ah->refcnt, 1);
4313 mutex_unlock(&iwdev->ah_tbl_lock);
4315 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4316 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
4318 irdma_destroy_ah(ibah, attr->flags);
4324 * irdma_create_ah - create address handle
4325 * @ibah: address handle
4326 * @attr: address handle attributes
4329 * returns 0 on success, error otherwise
4331 static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
4332 struct ib_udata *udata)
4334 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4335 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4338 err = irdma_setup_ah(ibah, attr);
4341 err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
4347 * irdma_query_ah - Query address handle
4348 * @ibah: pointer to address handle
4349 * @ah_attr: address handle attributes
4351 static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4353 struct irdma_ah *ah = to_iwah(ibah);
4355 memset(ah_attr, 0, sizeof(*ah_attr));
4356 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4357 ah_attr->ah_flags = IB_AH_GRH;
4358 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4359 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4360 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4361 ah_attr->grh.sgid_index = ah->sgid_index;
4362 ah_attr->grh.sgid_index = ah->sgid_index;
4363 memcpy(&ah_attr->grh.dgid, &ah->dgid,
4364 sizeof(ah_attr->grh.dgid));
4370 static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4373 return IB_LINK_LAYER_ETHERNET;
4376 static const struct ib_device_ops irdma_roce_dev_ops = {
4377 .attach_mcast = irdma_attach_mcast,
4378 .create_ah = irdma_create_ah,
4379 .create_user_ah = irdma_create_user_ah,
4380 .destroy_ah = irdma_destroy_ah,
4381 .detach_mcast = irdma_detach_mcast,
4382 .get_link_layer = irdma_get_link_layer,
4383 .get_port_immutable = irdma_roce_port_immutable,
4384 .modify_qp = irdma_modify_qp_roce,
4385 .query_ah = irdma_query_ah,
4386 .query_pkey = irdma_query_pkey,
4389 static const struct ib_device_ops irdma_iw_dev_ops = {
4390 .modify_qp = irdma_modify_qp,
4391 .get_port_immutable = irdma_iw_port_immutable,
4392 .query_gid = irdma_query_gid,
4395 static const struct ib_device_ops irdma_dev_ops = {
4396 .owner = THIS_MODULE,
4397 .driver_id = RDMA_DRIVER_IRDMA,
4398 .uverbs_abi_ver = IRDMA_ABI_VER,
4400 .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4401 .alloc_mr = irdma_alloc_mr,
4402 .alloc_mw = irdma_alloc_mw,
4403 .alloc_pd = irdma_alloc_pd,
4404 .alloc_ucontext = irdma_alloc_ucontext,
4405 .create_cq = irdma_create_cq,
4406 .create_qp = irdma_create_qp,
4407 .dealloc_driver = irdma_ib_dealloc_device,
4408 .dealloc_mw = irdma_dealloc_mw,
4409 .dealloc_pd = irdma_dealloc_pd,
4410 .dealloc_ucontext = irdma_dealloc_ucontext,
4411 .dereg_mr = irdma_dereg_mr,
4412 .destroy_cq = irdma_destroy_cq,
4413 .destroy_qp = irdma_destroy_qp,
4414 .disassociate_ucontext = irdma_disassociate_ucontext,
4415 .get_dev_fw_str = irdma_get_dev_fw_str,
4416 .get_dma_mr = irdma_get_dma_mr,
4417 .get_hw_stats = irdma_get_hw_stats,
4418 .map_mr_sg = irdma_map_mr_sg,
4420 .mmap_free = irdma_mmap_free,
4421 .poll_cq = irdma_poll_cq,
4422 .post_recv = irdma_post_recv,
4423 .post_send = irdma_post_send,
4424 .query_device = irdma_query_device,
4425 .query_port = irdma_query_port,
4426 .query_qp = irdma_query_qp,
4427 .reg_user_mr = irdma_reg_user_mr,
4428 .req_notify_cq = irdma_req_notify_cq,
4429 .resize_cq = irdma_resize_cq,
4430 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4431 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4432 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4433 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4434 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4435 INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
4439 * irdma_init_roce_device - initialization of roce rdma device
4440 * @iwdev: irdma device
4442 static void irdma_init_roce_device(struct irdma_device *iwdev)
4444 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4445 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4446 iwdev->netdev->dev_addr);
4447 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4451 * irdma_init_iw_device - initialization of iwarp rdma device
4452 * @iwdev: irdma device
4454 static int irdma_init_iw_device(struct irdma_device *iwdev)
4456 struct net_device *netdev = iwdev->netdev;
4458 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4459 addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
4461 iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
4462 iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
4463 iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
4464 iwdev->ibdev.ops.iw_connect = irdma_connect;
4465 iwdev->ibdev.ops.iw_accept = irdma_accept;
4466 iwdev->ibdev.ops.iw_reject = irdma_reject;
4467 iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
4468 iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
4469 memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4470 sizeof(iwdev->ibdev.iw_ifname));
4471 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4477 * irdma_init_rdma_device - initialization of rdma device
4478 * @iwdev: irdma device
4480 static int irdma_init_rdma_device(struct irdma_device *iwdev)
4482 struct pci_dev *pcidev = iwdev->rf->pcidev;
4485 if (iwdev->roce_mode) {
4486 irdma_init_roce_device(iwdev);
4488 ret = irdma_init_iw_device(iwdev);
4492 iwdev->ibdev.phys_port_cnt = 1;
4493 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4494 iwdev->ibdev.dev.parent = &pcidev->dev;
4495 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4501 * irdma_port_ibevent - indicate port event
4502 * @iwdev: irdma device
4504 void irdma_port_ibevent(struct irdma_device *iwdev)
4506 struct ib_event event;
4508 event.device = &iwdev->ibdev;
4509 event.element.port_num = 1;
4511 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4512 ib_dispatch_event(&event);
4516 * irdma_ib_unregister_device - unregister rdma device from IB
4518 * @iwdev: irdma device
4520 void irdma_ib_unregister_device(struct irdma_device *iwdev)
4522 iwdev->iw_status = 0;
4523 irdma_port_ibevent(iwdev);
4524 ib_unregister_device(&iwdev->ibdev);
4528 * irdma_ib_register_device - register irdma device to IB core
4529 * @iwdev: irdma device
4531 int irdma_ib_register_device(struct irdma_device *iwdev)
4535 ret = irdma_init_rdma_device(iwdev);
4539 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4542 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4543 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4547 iwdev->iw_status = 1;
4548 irdma_port_ibevent(iwdev);
4554 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4560 * irdma_ib_dealloc_device
4563 * callback from ibdev dealloc_driver to deallocate resources
4564 * unber irdma device
4566 void irdma_ib_dealloc_device(struct ib_device *ibdev)
4568 struct irdma_device *iwdev = to_iwdev(ibdev);
4570 irdma_rt_deinit_hw(iwdev);
4571 irdma_ctrl_deinit_hw(iwdev->rf);