2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/uverbs_ioctl.h>
42 #include "usnic_abi.h"
44 #include "usnic_common_util.h"
45 #include "usnic_ib_qp_grp.h"
46 #include "usnic_ib_verbs.h"
47 #include "usnic_fwd.h"
48 #include "usnic_log.h"
49 #include "usnic_uiom.h"
50 #include "usnic_transport.h"
52 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
54 const struct usnic_vnic_res_spec min_transport_spec[USNIC_TRANSPORT_MAX] = {
55 { /*USNIC_TRANSPORT_UNKNOWN*/
57 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
60 { /*USNIC_TRANSPORT_ROCE_CUSTOM*/
62 {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
63 {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
64 {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
65 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
68 { /*USNIC_TRANSPORT_IPV4_UDP*/
70 {.type = USNIC_VNIC_RES_TYPE_WQ, .cnt = 1,},
71 {.type = USNIC_VNIC_RES_TYPE_RQ, .cnt = 1,},
72 {.type = USNIC_VNIC_RES_TYPE_CQ, .cnt = 1,},
73 {.type = USNIC_VNIC_RES_TYPE_EOL, .cnt = 0,},
78 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
80 *fw_ver = *((u64 *)fw_ver_str);
83 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
84 struct ib_udata *udata)
86 struct usnic_ib_dev *us_ibdev;
87 struct usnic_ib_create_qp_resp resp;
89 struct vnic_dev_bar *bar;
90 struct usnic_vnic_res_chunk *chunk;
91 struct usnic_ib_qp_grp_flow *default_flow;
94 memset(&resp, 0, sizeof(resp));
96 us_ibdev = qp_grp->vf->pf;
97 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
99 usnic_err("Failed to get pdev of qp_grp %d\n",
104 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
106 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
107 qp_grp->grp_id, pci_name(pdev));
111 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
112 resp.bar_bus_addr = bar->bus_addr;
113 resp.bar_len = bar->len;
115 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
117 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
118 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
121 return PTR_ERR(chunk);
124 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
125 resp.rq_cnt = chunk->cnt;
126 for (i = 0; i < chunk->cnt; i++)
127 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
129 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
131 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
132 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
135 return PTR_ERR(chunk);
138 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
139 resp.wq_cnt = chunk->cnt;
140 for (i = 0; i < chunk->cnt; i++)
141 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
143 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
145 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
146 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
149 return PTR_ERR(chunk);
152 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
153 resp.cq_cnt = chunk->cnt;
154 for (i = 0; i < chunk->cnt; i++)
155 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
157 default_flow = list_first_entry(&qp_grp->flows_lst,
158 struct usnic_ib_qp_grp_flow, link);
159 resp.transport = default_flow->trans_type;
161 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
163 usnic_err("Failed to copy udata for %s",
164 dev_name(&us_ibdev->ib_dev.dev));
171 static struct usnic_ib_qp_grp*
172 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
173 struct usnic_ib_pd *pd,
174 struct usnic_transport_spec *trans_spec,
175 struct usnic_vnic_res_spec *res_spec)
177 struct usnic_ib_vf *vf;
178 struct usnic_vnic *vnic;
179 struct usnic_ib_qp_grp *qp_grp;
180 struct device *dev, **dev_list;
183 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
185 if (list_empty(&us_ibdev->vf_dev_list)) {
186 usnic_info("No vfs to allocate\n");
190 if (usnic_ib_share_vf) {
191 /* Try to find resouces on a used vf which is in pd */
192 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
193 if (IS_ERR(dev_list))
194 return ERR_CAST(dev_list);
195 for (i = 0; dev_list[i]; i++) {
197 vf = pci_get_drvdata(to_pci_dev(dev));
198 spin_lock(&vf->lock);
200 if (!usnic_vnic_check_room(vnic, res_spec)) {
201 usnic_dbg("Found used vnic %s from %s\n",
202 dev_name(&us_ibdev->ib_dev.dev),
203 pci_name(usnic_vnic_get_pdev(
205 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
210 spin_unlock(&vf->lock);
213 spin_unlock(&vf->lock);
216 usnic_uiom_free_dev_list(dev_list);
219 /* Try to find resources on an unused vf */
220 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
221 spin_lock(&vf->lock);
223 if (vf->qp_grp_ref_cnt == 0 &&
224 usnic_vnic_check_room(vnic, res_spec) == 0) {
225 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf,
229 spin_unlock(&vf->lock);
232 spin_unlock(&vf->lock);
235 usnic_info("No free qp grp found on %s\n",
236 dev_name(&us_ibdev->ib_dev.dev));
237 return ERR_PTR(-ENOMEM);
240 if (IS_ERR_OR_NULL(qp_grp)) {
241 usnic_err("Failed to allocate qp_grp\n");
242 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
247 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
249 struct usnic_ib_vf *vf = qp_grp->vf;
251 WARN_ON(qp_grp->state != IB_QPS_RESET);
253 spin_lock(&vf->lock);
254 usnic_ib_qp_grp_destroy(qp_grp);
255 spin_unlock(&vf->lock);
258 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
260 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
261 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
267 /* Start of ib callback functions */
269 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
272 return IB_LINK_LAYER_ETHERNET;
275 int usnic_ib_query_device(struct ib_device *ibdev,
276 struct ib_device_attr *props,
277 struct ib_udata *uhw)
279 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
281 struct ethtool_drvinfo info;
285 if (uhw->inlen || uhw->outlen)
288 mutex_lock(&us_ibdev->usdev_lock);
289 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
290 memset(props, 0, sizeof(*props));
291 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
293 memcpy(&props->sys_image_guid, &gid.global.interface_id,
294 sizeof(gid.global.interface_id));
295 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
296 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
297 props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
298 props->vendor_id = PCI_VENDOR_ID_CISCO;
299 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
300 props->hw_ver = us_ibdev->pdev->subsystem_device;
301 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
302 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
303 props->max_qp = qp_per_vf *
304 kref_read(&us_ibdev->vf_cnt);
305 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
306 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
307 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
308 kref_read(&us_ibdev->vf_cnt);
309 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
310 props->max_mr = USNIC_UIOM_MAX_MR_CNT;
311 props->local_ca_ack_delay = 0;
312 props->max_pkeys = 0;
313 props->atomic_cap = IB_ATOMIC_NONE;
314 props->masked_atomic_cap = props->atomic_cap;
315 props->max_qp_rd_atom = 0;
316 props->max_qp_init_rd_atom = 0;
317 props->max_res_rd_atom = 0;
319 props->max_srq_wr = 0;
320 props->max_srq_sge = 0;
321 props->max_fast_reg_page_list_len = 0;
322 props->max_mcast_grp = 0;
323 props->max_mcast_qp_attach = 0;
324 props->max_total_mcast_qp_attach = 0;
325 props->max_map_per_fmr = 0;
326 /* Owned by Userspace
327 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
328 mutex_unlock(&us_ibdev->usdev_lock);
333 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
334 struct ib_port_attr *props)
336 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
340 if (ib_get_eth_speed(ibdev, port, &props->active_speed,
341 &props->active_width))
345 * usdev_lock is acquired after (and not before) ib_get_eth_speed call
346 * because acquiring rtnl_lock in ib_get_eth_speed, while holding
347 * usdev_lock could lead to a deadlock.
349 mutex_lock(&us_ibdev->usdev_lock);
350 /* props being zeroed by the caller, avoid zeroing it here */
357 if (!us_ibdev->ufdev->link_up) {
358 props->state = IB_PORT_DOWN;
359 props->phys_state = 3;
360 } else if (!us_ibdev->ufdev->inaddr) {
361 props->state = IB_PORT_INIT;
362 props->phys_state = 4;
364 props->state = IB_PORT_ACTIVE;
365 props->phys_state = 5;
368 props->port_cap_flags = 0;
369 props->gid_tbl_len = 1;
370 props->pkey_tbl_len = 1;
371 props->bad_pkey_cntr = 0;
372 props->qkey_viol_cntr = 0;
373 props->max_mtu = IB_MTU_4096;
374 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
375 /* Userspace will adjust for hdrs */
376 props->max_msg_sz = us_ibdev->ufdev->mtu;
377 props->max_vl_num = 1;
378 mutex_unlock(&us_ibdev->usdev_lock);
383 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
385 struct ib_qp_init_attr *qp_init_attr)
387 struct usnic_ib_qp_grp *qp_grp;
388 struct usnic_ib_vf *vf;
393 memset(qp_attr, 0, sizeof(*qp_attr));
394 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
396 qp_grp = to_uqp_grp(qp);
398 mutex_lock(&vf->pf->usdev_lock);
400 qp_attr->qp_state = qp_grp->state;
401 qp_attr->cur_qp_state = qp_grp->state;
403 switch (qp_grp->ibqp.qp_type) {
408 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
413 mutex_unlock(&vf->pf->usdev_lock);
417 mutex_unlock(&vf->pf->usdev_lock);
421 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
425 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
431 mutex_lock(&us_ibdev->usdev_lock);
432 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
433 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
435 mutex_unlock(&us_ibdev->usdev_lock);
440 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
450 int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
452 struct usnic_ib_pd *pd = to_upd(ibpd);
455 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
456 if (IS_ERR_OR_NULL(umem_pd)) {
457 return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
463 void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
465 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
468 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
469 struct ib_qp_init_attr *init_attr,
470 struct ib_udata *udata)
473 struct usnic_ib_dev *us_ibdev;
474 struct usnic_ib_qp_grp *qp_grp;
475 struct usnic_ib_ucontext *ucontext = rdma_udata_to_drv_context(
476 udata, struct usnic_ib_ucontext, ibucontext);
478 struct usnic_vnic_res_spec res_spec;
479 struct usnic_ib_create_qp_cmd cmd;
480 struct usnic_transport_spec trans_spec;
484 us_ibdev = to_usdev(pd->device);
486 if (init_attr->create_flags)
487 return ERR_PTR(-EINVAL);
489 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
491 usnic_err("%s: cannot copy udata for create_qp\n",
492 dev_name(&us_ibdev->ib_dev.dev));
493 return ERR_PTR(-EINVAL);
496 err = create_qp_validate_user_data(cmd);
498 usnic_err("%s: Failed to validate user data\n",
499 dev_name(&us_ibdev->ib_dev.dev));
500 return ERR_PTR(-EINVAL);
503 if (init_attr->qp_type != IB_QPT_UD) {
504 usnic_err("%s asked to make a non-UD QP: %d\n",
505 dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
506 return ERR_PTR(-EINVAL);
509 trans_spec = cmd.spec;
510 mutex_lock(&us_ibdev->usdev_lock);
511 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
512 res_spec = min_transport_spec[trans_spec.trans_type];
513 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
514 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
517 if (IS_ERR_OR_NULL(qp_grp)) {
518 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
519 goto out_release_mutex;
522 err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
525 goto out_release_qp_grp;
528 qp_grp->ctx = ucontext;
529 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
530 usnic_ib_log_vf(qp_grp->vf);
531 mutex_unlock(&us_ibdev->usdev_lock);
532 return &qp_grp->ibqp;
535 qp_grp_destroy(qp_grp);
537 mutex_unlock(&us_ibdev->usdev_lock);
541 int usnic_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
543 struct usnic_ib_qp_grp *qp_grp;
544 struct usnic_ib_vf *vf;
548 qp_grp = to_uqp_grp(qp);
550 mutex_lock(&vf->pf->usdev_lock);
551 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
552 usnic_err("Failed to move qp grp %u to reset\n",
556 list_del(&qp_grp->link);
557 qp_grp_destroy(qp_grp);
558 mutex_unlock(&vf->pf->usdev_lock);
563 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
564 int attr_mask, struct ib_udata *udata)
566 struct usnic_ib_qp_grp *qp_grp;
570 qp_grp = to_uqp_grp(ibqp);
572 mutex_lock(&qp_grp->vf->pf->usdev_lock);
573 if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) {
574 /* usnic devices only have one port */
578 if (attr_mask & IB_QP_STATE) {
579 status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL);
581 usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask);
586 mutex_unlock(&qp_grp->vf->pf->usdev_lock);
590 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
591 const struct ib_cq_init_attr *attr,
592 struct ib_udata *udata)
598 return ERR_PTR(-EINVAL);
600 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
602 return ERR_PTR(-EBUSY);
607 int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
614 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
615 u64 virt_addr, int access_flags,
616 struct ib_udata *udata)
618 struct usnic_ib_mr *mr;
621 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
624 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
626 return ERR_PTR(-ENOMEM);
628 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
630 if (IS_ERR_OR_NULL(mr->umem)) {
631 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
635 mr->ibmr.lkey = mr->ibmr.rkey = 0;
643 int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
645 struct usnic_ib_mr *mr = to_umr(ibmr);
647 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
649 usnic_uiom_reg_release(mr->umem);
654 int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
656 struct ib_device *ibdev = uctx->device;
657 struct usnic_ib_ucontext *context = to_ucontext(uctx);
658 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
661 INIT_LIST_HEAD(&context->qp_grp_list);
662 mutex_lock(&us_ibdev->usdev_lock);
663 list_add_tail(&context->link, &us_ibdev->ctx_list);
664 mutex_unlock(&us_ibdev->usdev_lock);
669 void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
671 struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
672 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
675 mutex_lock(&us_ibdev->usdev_lock);
676 WARN_ON_ONCE(!list_empty(&context->qp_grp_list));
677 list_del(&context->link);
678 mutex_unlock(&us_ibdev->usdev_lock);
681 int usnic_ib_mmap(struct ib_ucontext *context,
682 struct vm_area_struct *vma)
684 struct usnic_ib_ucontext *uctx = to_ucontext(context);
685 struct usnic_ib_dev *us_ibdev;
686 struct usnic_ib_qp_grp *qp_grp;
687 struct usnic_ib_vf *vf;
688 struct vnic_dev_bar *bar;
695 us_ibdev = to_usdev(context->device);
696 vma->vm_flags |= VM_IO;
697 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
698 vfid = vma->vm_pgoff;
699 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
700 vma->vm_pgoff, PAGE_SHIFT, vfid);
702 mutex_lock(&us_ibdev->usdev_lock);
703 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
705 if (usnic_vnic_get_index(vf->vnic) == vfid) {
706 bar = usnic_vnic_get_bar(vf->vnic, 0);
707 if ((vma->vm_end - vma->vm_start) != bar->len) {
708 usnic_err("Bar0 Len %lu - Request map %lu\n",
710 vma->vm_end - vma->vm_start);
711 mutex_unlock(&us_ibdev->usdev_lock);
714 bus_addr = bar->bus_addr;
716 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
717 &bus_addr, bar->vaddr, bar->len);
718 mutex_unlock(&us_ibdev->usdev_lock);
720 return remap_pfn_range(vma,
722 bus_addr >> PAGE_SHIFT,
723 len, vma->vm_page_prot);
727 mutex_unlock(&us_ibdev->usdev_lock);
728 usnic_err("No VF %u found\n", vfid);