2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
41 #include "usnic_abi.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_ib_verbs.h"
46 #include "usnic_fwd.h"
47 #include "usnic_log.h"
48 #include "usnic_uiom.h"
49 #include "usnic_transport.h"
51 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
53 const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
54 { /*USNIC_TRANSPORT_UNKNOWN*/
56 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
59 { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
61 {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
62 {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
63 {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
64 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
67 { /*USNIC_TRANSPORT_IPV4_UDP*/
69 {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
70 {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
71 {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
72 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
77 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
79 *fw_ver = *((u64 *)fw_ver_str);
82 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
83 struct ib_udata *udata)
85 struct usnic_ib_dev *us_ibdev;
86 struct usnic_ib_create_qp_resp resp;
88 struct vnic_dev_bar *bar;
89 struct usnic_vnic_res_chunk *chunk;
90 struct usnic_ib_qp_grp_flow *default_flow;
93 memset(&resp, 0, sizeof(resp));
95 us_ibdev = qp_grp->vf->pf;
96 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
98 usnic_err("Failed to get pdev of qp_grp %d\n",
103 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
105 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
106 qp_grp->grp_id, pci_name(pdev));
110 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
111 resp.bar_bus_addr = bar->bus_addr;
112 resp.bar_len = bar->len;
114 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
116 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
117 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
120 return PTR_ERR(chunk);
123 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
124 resp.rq_cnt = chunk->cnt;
125 for (i = 0; i < chunk->cnt; i++)
126 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
128 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
130 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
131 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
134 return PTR_ERR(chunk);
137 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
138 resp.wq_cnt = chunk->cnt;
139 for (i = 0; i < chunk->cnt; i++)
140 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
142 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
144 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
145 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
148 return PTR_ERR(chunk);
151 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
152 resp.cq_cnt = chunk->cnt;
153 for (i = 0; i < chunk->cnt; i++)
154 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
156 default_flow = list_first_entry(&qp_grp->flows_lst,
157 struct usnic_ib_qp_grp_flow, link);
158 resp.transport = default_flow->trans_type;
160 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
162 usnic_err("Failed to copy udata for %s",
163 dev_name(&us_ibdev->ib_dev.dev));
170 static struct usnic_ib_qp_grp*
171 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
172 struct usnic_ib_pd *pd,
173 struct usnic_transport_spec *trans_spec,
174 struct usnic_vnic_res_spec *res_spec)
176 struct usnic_ib_vf *vf;
177 struct usnic_vnic *vnic;
178 struct usnic_ib_qp_grp *qp_grp;
179 struct device *dev, **dev_list;
182 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
184 if (list_empty(&us_ibdev->vf_dev_list)) {
185 usnic_info("No vfs to allocate\n");
189 if (usnic_ib_share_vf) {
190 /* Try to find resouces on a used vf which is in pd */
191 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
192 if (IS_ERR(dev_list))
193 return ERR_CAST(dev_list);
194 for (i = 0; dev_list[i]; i++) {
196 vf = pci_get_drvdata(to_pci_dev(dev));
197 spin_lock(&vf->lock);
199 if (!usnic_vnic_check_room(vnic, res_spec)) {
200 usnic_dbg("Found used vnic %s from %s\n",
201 dev_name(&us_ibdev->ib_dev.dev),
202 pci_name(usnic_vnic_get_pdev(
204 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
209 spin_unlock(&vf->lock);
212 spin_unlock(&vf->lock);
215 usnic_uiom_free_dev_list(dev_list);
218 /* Try to find resources on an unused vf */
219 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
220 spin_lock(&vf->lock);
222 if (vf->qp_grp_ref_cnt == 0 &&
223 usnic_vnic_check_room(vnic, res_spec) == 0) {
224 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
228 spin_unlock(&vf->lock);
231 spin_unlock(&vf->lock);
234 usnic_info("No free qp grp found on %s\n",
235 dev_name(&us_ibdev->ib_dev.dev));
236 return ERR_PTR(-ENOMEM);
239 if (IS_ERR_OR_NULL(qp_grp)) {
240 usnic_err("Failed to allocate qp_grp\n");
241 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
246 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
248 struct usnic_ib_vf *vf = qp_grp->vf;
250 WARN_ON(qp_grp->state != IB_QPS_RESET);
252 spin_lock(&vf->lock);
253 usnic_ib_qp_grp_destroy(qp_grp);
254 spin_unlock(&vf->lock);
257 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
259 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
260 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
266 /* Start of ib callback functions */
268 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
271 return IB_LINK_LAYER_ETHERNET;
274 int usnic_ib_query_device(struct ib_device *ibdev,
275 struct ib_device_attr *props,
276 struct ib_udata *uhw)
278 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
280 struct ethtool_drvinfo info;
284 if (uhw->inlen || uhw->outlen)
287 mutex_lock(&us_ibdev->usdev_lock);
288 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
289 memset(props, 0, sizeof(*props));
290 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
292 memcpy(&props->sys_image_guid, &gid.global.interface_id,
293 sizeof(gid.global.interface_id));
294 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
295 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
296 props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
297 props->vendor_id = PCI_VENDOR_ID_CISCO;
298 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
299 props->hw_ver = us_ibdev->pdev->subsystem_device;
300 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
301 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
302 props->max_qp = qp_per_vf *
303 kref_read(&us_ibdev->vf_cnt);
304 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
305 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
306 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
307 kref_read(&us_ibdev->vf_cnt);
308 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
309 props->max_mr = USNIC_UIOM_MAX_MR_CNT;
310 props->local_ca_ack_delay = 0;
311 props->max_pkeys = 0;
312 props->atomic_cap = IB_ATOMIC_NONE;
313 props->masked_atomic_cap = props->atomic_cap;
314 props->max_qp_rd_atom = 0;
315 props->max_qp_init_rd_atom = 0;
316 props->max_res_rd_atom = 0;
318 props->max_srq_wr = 0;
319 props->max_srq_sge = 0;
320 props->max_fast_reg_page_list_len = 0;
321 props->max_mcast_grp = 0;
322 props->max_mcast_qp_attach = 0;
323 props->max_total_mcast_qp_attach = 0;
324 props->max_map_per_fmr = 0;
325 /* Owned by Userspace
326 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
327 mutex_unlock(&us_ibdev->usdev_lock);
332 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
333 struct ib_port_attr *props)
335 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
339 mutex_lock(&us_ibdev->usdev_lock);
340 if (ib_get_eth_speed(ibdev, port, &props->active_speed,
341 &props->active_width)) {
342 mutex_unlock(&us_ibdev->usdev_lock);
346 /* props being zeroed by the caller, avoid zeroing it here */
353 if (!us_ibdev->ufdev->link_up) {
354 props->state = IB_PORT_DOWN;
355 props->phys_state = 3;
356 } else if (!us_ibdev->ufdev->inaddr) {
357 props->state = IB_PORT_INIT;
358 props->phys_state = 4;
360 props->state = IB_PORT_ACTIVE;
361 props->phys_state = 5;
364 props->port_cap_flags = 0;
365 props->gid_tbl_len = 1;
366 props->pkey_tbl_len = 1;
367 props->bad_pkey_cntr = 0;
368 props->qkey_viol_cntr = 0;
369 props->max_mtu = IB_MTU_4096;
370 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
371 /* Userspace will adjust for hdrs */
372 props->max_msg_sz = us_ibdev->ufdev->mtu;
373 props->max_vl_num = 1;
374 mutex_unlock(&us_ibdev->usdev_lock);
379 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
381 struct ib_qp_init_attr *qp_init_attr)
383 struct usnic_ib_qp_grp *qp_grp;
384 struct usnic_ib_vf *vf;
389 memset(qp_attr, 0, sizeof(*qp_attr));
390 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
392 qp_grp = to_uqp_grp(qp);
394 mutex_lock(&vf->pf->usdev_lock);
396 qp_attr->qp_state = qp_grp->state;
397 qp_attr->cur_qp_state = qp_grp->state;
399 switch (qp_grp->ibqp.qp_type) {
404 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
409 mutex_unlock(&vf->pf->usdev_lock);
413 mutex_unlock(&vf->pf->usdev_lock);
417 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
421 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
427 mutex_lock(&us_ibdev->usdev_lock);
428 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
429 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
431 mutex_unlock(&us_ibdev->usdev_lock);
436 struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
438 struct usnic_ib_dev *us_ibdev = to_usdev(device);
440 if (us_ibdev->netdev)
441 dev_hold(us_ibdev->netdev);
443 return us_ibdev->netdev;
446 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
456 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
457 struct ib_ucontext *context,
458 struct ib_udata *udata)
460 struct usnic_ib_pd *pd;
465 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
467 return ERR_PTR(-ENOMEM);
469 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
470 if (IS_ERR_OR_NULL(umem_pd)) {
472 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
475 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
476 pd, context, dev_name(&ibdev->dev));
480 int usnic_ib_dealloc_pd(struct ib_pd *pd)
482 usnic_info("freeing domain 0x%p\n", pd);
484 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
489 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
490 struct ib_qp_init_attr *init_attr,
491 struct ib_udata *udata)
494 struct usnic_ib_dev *us_ibdev;
495 struct usnic_ib_qp_grp *qp_grp;
496 struct usnic_ib_ucontext *ucontext;
498 struct usnic_vnic_res_spec res_spec;
499 struct usnic_ib_create_qp_cmd cmd;
500 struct usnic_transport_spec trans_spec;
504 ucontext = to_uucontext(pd->uobject->context);
505 us_ibdev = to_usdev(pd->device);
507 if (init_attr->create_flags)
508 return ERR_PTR(-EINVAL);
510 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
512 usnic_err("%s: cannot copy udata for create_qp\n",
513 dev_name(&us_ibdev->ib_dev.dev));
514 return ERR_PTR(-EINVAL);
517 err = create_qp_validate_user_data(cmd);
519 usnic_err("%s: Failed to validate user data\n",
520 dev_name(&us_ibdev->ib_dev.dev));
521 return ERR_PTR(-EINVAL);
524 if (init_attr->qp_type != IB_QPT_UD) {
525 usnic_err("%s asked to make a non-UD QP: %d\n",
526 dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
527 return ERR_PTR(-EINVAL);
530 trans_spec = cmd.spec;
531 mutex_lock(&us_ibdev->usdev_lock);
532 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
533 res_spec = min_transport_spec[trans_spec.trans_type];
534 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
535 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
538 if (IS_ERR_OR_NULL(qp_grp)) {
539 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
540 goto out_release_mutex;
543 err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
546 goto out_release_qp_grp;
549 qp_grp->ctx = ucontext;
550 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
551 usnic_ib_log_vf(qp_grp->vf);
552 mutex_unlock(&us_ibdev->usdev_lock);
553 return &qp_grp->ibqp;
556 qp_grp_destroy(qp_grp);
558 mutex_unlock(&us_ibdev->usdev_lock);
562 int usnic_ib_destroy_qp(struct ib_qp *qp)
564 struct usnic_ib_qp_grp *qp_grp;
565 struct usnic_ib_vf *vf;
569 qp_grp = to_uqp_grp(qp);
571 mutex_lock(&vf->pf->usdev_lock);
572 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
573 usnic_err("Failed to move qp grp %u to reset\n",
577 list_del(&qp_grp->link);
578 qp_grp_destroy(qp_grp);
579 mutex_unlock(&vf->pf->usdev_lock);
584 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
585 int attr_mask, struct ib_udata *udata)
587 struct usnic_ib_qp_grp *qp_grp;
591 qp_grp = to_uqp_grp(ibqp);
593 mutex_lock(&qp_grp->vf->pf->usdev_lock);
594 if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
595 /* usnic devices only have one port */
599 if (attr_mask & IB_QP_STATE) {
600 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
602 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
607 mutex_unlock(&qp_grp->vf->pf->usdev_lock);
611 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
612 const struct ib_cq_init_attr *attr,
613 struct ib_ucontext *context,
614 struct ib_udata *udata)
620 return ERR_PTR(-EINVAL);
622 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
624 return ERR_PTR(-EBUSY);
629 int usnic_ib_destroy_cq(struct ib_cq *cq)
636 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
637 u64 virt_addr, int access_flags,
638 struct ib_udata *udata)
640 struct usnic_ib_mr *mr;
643 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
646 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
648 return ERR_PTR(-ENOMEM);
650 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
652 if (IS_ERR_OR_NULL(mr->umem)) {
653 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
657 mr->ibmr.lkey = mr->ibmr.rkey = 0;
665 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
667 struct usnic_ib_mr *mr = to_umr(ibmr);
669 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
671 usnic_uiom_reg_release(mr->umem, ibmr->uobject->context);
676 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
677 struct ib_udata *udata)
679 struct usnic_ib_ucontext *context;
680 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
683 context = kmalloc(sizeof(*context), GFP_KERNEL);
685 return ERR_PTR(-ENOMEM);
687 INIT_LIST_HEAD(&context->qp_grp_list);
688 mutex_lock(&us_ibdev->usdev_lock);
689 list_add_tail(&context->link, &us_ibdev->ctx_list);
690 mutex_unlock(&us_ibdev->usdev_lock);
692 return &context->ibucontext;
695 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
697 struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
698 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
701 mutex_lock(&us_ibdev->usdev_lock);
702 BUG_ON(!list_empty(&context->qp_grp_list));
703 list_del(&context->link);
704 mutex_unlock(&us_ibdev->usdev_lock);
709 int usnic_ib_mmap(struct ib_ucontext *context,
710 struct vm_area_struct *vma)
712 struct usnic_ib_ucontext *uctx = to_ucontext(context);
713 struct usnic_ib_dev *us_ibdev;
714 struct usnic_ib_qp_grp *qp_grp;
715 struct usnic_ib_vf *vf;
716 struct vnic_dev_bar *bar;
723 us_ibdev = to_usdev(context->device);
724 vma->vm_flags |= VM_IO;
725 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
726 vfid = vma->vm_pgoff;
727 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
728 vma->vm_pgoff, PAGE_SHIFT, vfid);
730 mutex_lock(&us_ibdev->usdev_lock);
731 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
733 if (usnic_vnic_get_index(vf->vnic) == vfid) {
734 bar = usnic_vnic_get_bar(vf->vnic, 0);
735 if ((vma->vm_end - vma->vm_start) != bar->len) {
736 usnic_err("Bar0 Len %lu - Request map %lu\n",
738 vma->vm_end - vma->vm_start);
739 mutex_unlock(&us_ibdev->usdev_lock);
742 bus_addr = bar->bus_addr;
744 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
745 &bus_addr, bar->vaddr, bar->len);
746 mutex_unlock(&us_ibdev->usdev_lock);
748 return remap_pfn_range(vma,
750 bus_addr >> PAGE_SHIFT,
751 len, vma->vm_page_prot);
755 mutex_unlock(&us_ibdev->usdev_lock);
756 usnic_err("No VF %u found\n", vfid);
760 /* In ib callbacks section - Start of stub funcs */
761 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
762 struct rdma_ah_attr *ah_attr,
764 struct ib_udata *udata)
768 return ERR_PTR(-EPERM);
771 int usnic_ib_destroy_ah(struct ib_ah *ah)
777 int usnic_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
778 const struct ib_send_wr **bad_wr)
784 int usnic_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
785 const struct ib_recv_wr **bad_wr)
791 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
798 int usnic_ib_req_notify_cq(struct ib_cq *cq,
799 enum ib_cq_notify_flags flags)
805 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
808 return ERR_PTR(-ENOMEM);
812 /* In ib callbacks section - End of stub funcs */
813 /* End of ib callbacks section */