Merge tag 'usb-5.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[linux-2.6-microblaze.git] / drivers / infiniband / hw / usnic / usnic_ib_main.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  * Author: Upinder Malhi <umalhi@cisco.com>
33  * Author: Anant Deepak <anadeepa@cisco.com>
34  * Author: Cesare Cantu' <cantuc@cisco.com>
35  * Author: Jeff Squyres <jsquyres@cisco.com>
36  * Author: Kiran Thirumalai <kithirum@cisco.com>
37  * Author: Xuyang Wang <xuywang@cisco.com>
38  * Author: Reese Faucette <rfaucett@cisco.com>
39  *
40  */
41
42 #include <linux/module.h>
43 #include <linux/inetdevice.h>
44 #include <linux/init.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/pci.h>
48 #include <linux/netdevice.h>
49
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52
53 #include "usnic_abi.h"
54 #include "usnic_common_util.h"
55 #include "usnic_ib.h"
56 #include "usnic_ib_qp_grp.h"
57 #include "usnic_log.h"
58 #include "usnic_fwd.h"
59 #include "usnic_debugfs.h"
60 #include "usnic_ib_verbs.h"
61 #include "usnic_transport.h"
62 #include "usnic_uiom.h"
63 #include "usnic_ib_sysfs.h"
64
65 unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
66 unsigned int usnic_ib_share_vf = 1;
67
68 static const char usnic_version[] =
69         DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
70         DRV_VERSION " (" DRV_RELDATE ")\n";
71
72 static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
73 static LIST_HEAD(usnic_ib_ibdev_list);
74
75 /* Callback dump funcs */
76 static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
77 {
78         struct usnic_ib_vf *vf = obj;
79         return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
80 }
81 /* End callback dump funcs */
82
83 static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
84 {
85         usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
86                         usnic_ib_dump_vf_hdr,
87                         usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
88 }
89
90 void usnic_ib_log_vf(struct usnic_ib_vf *vf)
91 {
92         char *buf = kzalloc(1000, GFP_KERNEL);
93
94         if (!buf)
95                 return;
96
97         usnic_ib_dump_vf(vf, buf, 1000);
98         usnic_dbg("%s\n", buf);
99
100         kfree(buf);
101 }
102
103 /* Start of netdev section */
104 static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
105 {
106         struct usnic_ib_ucontext *ctx;
107         struct usnic_ib_qp_grp *qp_grp;
108         enum ib_qp_state cur_state;
109         int status;
110
111         BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
112
113         list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
114                 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
115                         cur_state = qp_grp->state;
116                         if (cur_state == IB_QPS_INIT ||
117                                 cur_state == IB_QPS_RTR ||
118                                 cur_state == IB_QPS_RTS) {
119                                 status = usnic_ib_qp_grp_modify(qp_grp,
120                                                                 IB_QPS_ERR,
121                                                                 NULL);
122                                 if (status) {
123                                         usnic_err("Failed to transition qp grp %u from %s to %s\n",
124                                                 qp_grp->grp_id,
125                                                 usnic_ib_qp_grp_state_to_string
126                                                 (cur_state),
127                                                 usnic_ib_qp_grp_state_to_string
128                                                 (IB_QPS_ERR));
129                                 }
130                         }
131                 }
132         }
133 }
134
135 static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
136                                         unsigned long event)
137 {
138         struct net_device *netdev;
139         struct ib_event ib_event;
140
141         memset(&ib_event, 0, sizeof(ib_event));
142
143         mutex_lock(&us_ibdev->usdev_lock);
144         netdev = us_ibdev->netdev;
145         switch (event) {
146         case NETDEV_REBOOT:
147                 usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
148                 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
149                 ib_event.event = IB_EVENT_PORT_ERR;
150                 ib_event.device = &us_ibdev->ib_dev;
151                 ib_event.element.port_num = 1;
152                 ib_dispatch_event(&ib_event);
153                 break;
154         case NETDEV_UP:
155         case NETDEV_DOWN:
156         case NETDEV_CHANGE:
157                 if (!us_ibdev->ufdev->link_up &&
158                                 netif_carrier_ok(netdev)) {
159                         usnic_fwd_carrier_up(us_ibdev->ufdev);
160                         usnic_info("Link UP on %s\n",
161                                    dev_name(&us_ibdev->ib_dev.dev));
162                         ib_event.event = IB_EVENT_PORT_ACTIVE;
163                         ib_event.device = &us_ibdev->ib_dev;
164                         ib_event.element.port_num = 1;
165                         ib_dispatch_event(&ib_event);
166                 } else if (us_ibdev->ufdev->link_up &&
167                                 !netif_carrier_ok(netdev)) {
168                         usnic_fwd_carrier_down(us_ibdev->ufdev);
169                         usnic_info("Link DOWN on %s\n",
170                                    dev_name(&us_ibdev->ib_dev.dev));
171                         usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
172                         ib_event.event = IB_EVENT_PORT_ERR;
173                         ib_event.device = &us_ibdev->ib_dev;
174                         ib_event.element.port_num = 1;
175                         ib_dispatch_event(&ib_event);
176                 } else {
177                         usnic_dbg("Ignoring %s on %s\n",
178                                         netdev_cmd_to_name(event),
179                                         dev_name(&us_ibdev->ib_dev.dev));
180                 }
181                 break;
182         case NETDEV_CHANGEADDR:
183                 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
184                                 sizeof(us_ibdev->ufdev->mac))) {
185                         usnic_dbg("Ignoring addr change on %s\n",
186                                   dev_name(&us_ibdev->ib_dev.dev));
187                 } else {
188                         usnic_info(" %s old mac: %pM new mac: %pM\n",
189                                         dev_name(&us_ibdev->ib_dev.dev),
190                                         us_ibdev->ufdev->mac,
191                                         netdev->dev_addr);
192                         usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
193                         usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
194                         ib_event.event = IB_EVENT_GID_CHANGE;
195                         ib_event.device = &us_ibdev->ib_dev;
196                         ib_event.element.port_num = 1;
197                         ib_dispatch_event(&ib_event);
198                 }
199
200                 break;
201         case NETDEV_CHANGEMTU:
202                 if (us_ibdev->ufdev->mtu != netdev->mtu) {
203                         usnic_info("MTU Change on %s old: %u new: %u\n",
204                                         dev_name(&us_ibdev->ib_dev.dev),
205                                         us_ibdev->ufdev->mtu, netdev->mtu);
206                         usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
207                         usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
208                 } else {
209                         usnic_dbg("Ignoring MTU change on %s\n",
210                                   dev_name(&us_ibdev->ib_dev.dev));
211                 }
212                 break;
213         default:
214                 usnic_dbg("Ignoring event %s on %s",
215                                 netdev_cmd_to_name(event),
216                                 dev_name(&us_ibdev->ib_dev.dev));
217         }
218         mutex_unlock(&us_ibdev->usdev_lock);
219 }
220
221 static int usnic_ib_netdevice_event(struct notifier_block *notifier,
222                                         unsigned long event, void *ptr)
223 {
224         struct usnic_ib_dev *us_ibdev;
225         struct ib_device *ibdev;
226
227         struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
228
229         ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
230         if (!ibdev)
231                 return NOTIFY_DONE;
232
233         us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
234         usnic_ib_handle_usdev_event(us_ibdev, event);
235         ib_device_put(ibdev);
236         return NOTIFY_DONE;
237 }
238
239 static struct notifier_block usnic_ib_netdevice_notifier = {
240         .notifier_call = usnic_ib_netdevice_event
241 };
242 /* End of netdev section */
243
244 /* Start of inet section */
245 static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
246                                         unsigned long event, void *ptr)
247 {
248         struct in_ifaddr *ifa = ptr;
249         struct ib_event ib_event;
250
251         mutex_lock(&us_ibdev->usdev_lock);
252
253         switch (event) {
254         case NETDEV_DOWN:
255                 usnic_info("%s via ip notifiers",
256                                 netdev_cmd_to_name(event));
257                 usnic_fwd_del_ipaddr(us_ibdev->ufdev);
258                 usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
259                 ib_event.event = IB_EVENT_GID_CHANGE;
260                 ib_event.device = &us_ibdev->ib_dev;
261                 ib_event.element.port_num = 1;
262                 ib_dispatch_event(&ib_event);
263                 break;
264         case NETDEV_UP:
265                 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
266                 usnic_info("%s via ip notifiers: ip %pI4",
267                                 netdev_cmd_to_name(event),
268                                 &us_ibdev->ufdev->inaddr);
269                 ib_event.event = IB_EVENT_GID_CHANGE;
270                 ib_event.device = &us_ibdev->ib_dev;
271                 ib_event.element.port_num = 1;
272                 ib_dispatch_event(&ib_event);
273                 break;
274         default:
275                 usnic_info("Ignoring event %s on %s",
276                                 netdev_cmd_to_name(event),
277                                 dev_name(&us_ibdev->ib_dev.dev));
278         }
279         mutex_unlock(&us_ibdev->usdev_lock);
280
281         return NOTIFY_DONE;
282 }
283
284 static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
285                                         unsigned long event, void *ptr)
286 {
287         struct usnic_ib_dev *us_ibdev;
288         struct in_ifaddr *ifa = ptr;
289         struct net_device *netdev = ifa->ifa_dev->dev;
290         struct ib_device *ibdev;
291
292         ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
293         if (!ibdev)
294                 return NOTIFY_DONE;
295
296         us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
297         usnic_ib_handle_inet_event(us_ibdev, event, ptr);
298         ib_device_put(ibdev);
299         return NOTIFY_DONE;
300 }
301 static struct notifier_block usnic_ib_inetaddr_notifier = {
302         .notifier_call = usnic_ib_inetaddr_event
303 };
304 /* End of inet section*/
305
306 static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num,
307                                 struct ib_port_immutable *immutable)
308 {
309         struct ib_port_attr attr;
310         int err;
311
312         immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
313
314         err = ib_query_port(ibdev, port_num, &attr);
315         if (err)
316                 return err;
317
318         immutable->gid_tbl_len = attr.gid_tbl_len;
319
320         return 0;
321 }
322
323 static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
324 {
325         struct usnic_ib_dev *us_ibdev =
326                 container_of(device, struct usnic_ib_dev, ib_dev);
327         struct ethtool_drvinfo info;
328
329         mutex_lock(&us_ibdev->usdev_lock);
330         us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
331         mutex_unlock(&us_ibdev->usdev_lock);
332
333         snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
334 }
335
336 static const struct ib_device_ops usnic_dev_ops = {
337         .owner = THIS_MODULE,
338         .driver_id = RDMA_DRIVER_USNIC,
339         .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION,
340
341         .alloc_pd = usnic_ib_alloc_pd,
342         .alloc_ucontext = usnic_ib_alloc_ucontext,
343         .create_cq = usnic_ib_create_cq,
344         .create_qp = usnic_ib_create_qp,
345         .dealloc_pd = usnic_ib_dealloc_pd,
346         .dealloc_ucontext = usnic_ib_dealloc_ucontext,
347         .dereg_mr = usnic_ib_dereg_mr,
348         .destroy_cq = usnic_ib_destroy_cq,
349         .destroy_qp = usnic_ib_destroy_qp,
350         .device_group = &usnic_attr_group,
351         .get_dev_fw_str = usnic_get_dev_fw_str,
352         .get_link_layer = usnic_ib_port_link_layer,
353         .get_port_immutable = usnic_port_immutable,
354         .mmap = usnic_ib_mmap,
355         .modify_qp = usnic_ib_modify_qp,
356         .query_device = usnic_ib_query_device,
357         .query_gid = usnic_ib_query_gid,
358         .query_port = usnic_ib_query_port,
359         .query_qp = usnic_ib_query_qp,
360         .reg_user_mr = usnic_ib_reg_mr,
361         INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
362         INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
363         INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
364 };
365
366 /* Start of PF discovery section */
367 static void *usnic_ib_device_add(struct pci_dev *dev)
368 {
369         struct usnic_ib_dev *us_ibdev;
370         union ib_gid gid;
371         struct in_device *ind;
372         struct net_device *netdev;
373         int ret;
374
375         usnic_dbg("\n");
376         netdev = pci_get_drvdata(dev);
377
378         us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev);
379         if (!us_ibdev) {
380                 usnic_err("Device %s context alloc failed\n",
381                                 netdev_name(pci_get_drvdata(dev)));
382                 return ERR_PTR(-EFAULT);
383         }
384
385         us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
386         if (!us_ibdev->ufdev) {
387                 usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
388                 goto err_dealloc;
389         }
390
391         mutex_init(&us_ibdev->usdev_lock);
392         INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
393         INIT_LIST_HEAD(&us_ibdev->ctx_list);
394
395         us_ibdev->pdev = dev;
396         us_ibdev->netdev = pci_get_drvdata(dev);
397         us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
398         us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
399         us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
400         us_ibdev->ib_dev.dev.parent = &dev->dev;
401
402         ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
403
404         ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
405         if (ret)
406                 goto err_fwd_dealloc;
407
408         dma_set_max_seg_size(&dev->dev, SZ_2G);
409         if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev))
410                 goto err_fwd_dealloc;
411
412         usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
413         usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
414         if (netif_carrier_ok(us_ibdev->netdev))
415                 usnic_fwd_carrier_up(us_ibdev->ufdev);
416
417         rcu_read_lock();
418         ind = __in_dev_get_rcu(netdev);
419         if (ind) {
420                 const struct in_ifaddr *ifa;
421
422                 ifa = rcu_dereference(ind->ifa_list);
423                 if (ifa)
424                         usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
425         }
426         rcu_read_unlock();
427
428         usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
429                                 us_ibdev->ufdev->inaddr, &gid.raw[0]);
430         memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
431                 sizeof(gid.global.interface_id));
432         kref_init(&us_ibdev->vf_cnt);
433
434         usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
435                    dev_name(&us_ibdev->ib_dev.dev),
436                    netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
437                    us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
438         return us_ibdev;
439
440 err_fwd_dealloc:
441         usnic_fwd_dev_free(us_ibdev->ufdev);
442 err_dealloc:
443         usnic_err("failed -- deallocing device\n");
444         ib_dealloc_device(&us_ibdev->ib_dev);
445         return NULL;
446 }
447
448 static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
449 {
450         usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
451         usnic_ib_sysfs_unregister_usdev(us_ibdev);
452         usnic_fwd_dev_free(us_ibdev->ufdev);
453         ib_unregister_device(&us_ibdev->ib_dev);
454         ib_dealloc_device(&us_ibdev->ib_dev);
455 }
456
457 static void usnic_ib_undiscover_pf(struct kref *kref)
458 {
459         struct usnic_ib_dev *us_ibdev, *tmp;
460         struct pci_dev *dev;
461         bool found = false;
462
463         dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
464         mutex_lock(&usnic_ib_ibdev_list_lock);
465         list_for_each_entry_safe(us_ibdev, tmp,
466                                 &usnic_ib_ibdev_list, ib_dev_link) {
467                 if (us_ibdev->pdev == dev) {
468                         list_del(&us_ibdev->ib_dev_link);
469                         found = true;
470                         break;
471                 }
472         }
473
474
475         mutex_unlock(&usnic_ib_ibdev_list_lock);
476         if (found)
477                 usnic_ib_device_remove(us_ibdev);
478         else
479                 WARN(1, "Failed to remove PF %s\n", pci_name(dev));
480 }
481
482 static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
483 {
484         struct usnic_ib_dev *us_ibdev;
485         struct pci_dev *parent_pci, *vf_pci;
486         int err;
487
488         vf_pci = usnic_vnic_get_pdev(vnic);
489         parent_pci = pci_physfn(vf_pci);
490
491         BUG_ON(!parent_pci);
492
493         mutex_lock(&usnic_ib_ibdev_list_lock);
494         list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
495                 if (us_ibdev->pdev == parent_pci) {
496                         kref_get(&us_ibdev->vf_cnt);
497                         goto out;
498                 }
499         }
500
501         us_ibdev = usnic_ib_device_add(parent_pci);
502         if (IS_ERR_OR_NULL(us_ibdev)) {
503                 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
504                 goto out;
505         }
506
507         err = usnic_ib_sysfs_register_usdev(us_ibdev);
508         if (err) {
509                 usnic_ib_device_remove(us_ibdev);
510                 us_ibdev = ERR_PTR(err);
511                 goto out;
512         }
513
514         list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
515 out:
516         mutex_unlock(&usnic_ib_ibdev_list_lock);
517         return us_ibdev;
518 }
519 /* End of PF discovery section */
520
521 /* Start of PCI section */
522
523 static const struct pci_device_id usnic_ib_pci_ids[] = {
524         {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
525         {0,}
526 };
527
528 static int usnic_ib_pci_probe(struct pci_dev *pdev,
529                                 const struct pci_device_id *id)
530 {
531         int err;
532         struct usnic_ib_dev *pf;
533         struct usnic_ib_vf *vf;
534         enum usnic_vnic_res_type res_type;
535
536         vf = kzalloc(sizeof(*vf), GFP_KERNEL);
537         if (!vf)
538                 return -ENOMEM;
539
540         err = pci_enable_device(pdev);
541         if (err) {
542                 usnic_err("Failed to enable %s with err %d\n",
543                                 pci_name(pdev), err);
544                 goto out_clean_vf;
545         }
546
547         err = pci_request_regions(pdev, DRV_NAME);
548         if (err) {
549                 usnic_err("Failed to request region for %s with err %d\n",
550                                 pci_name(pdev), err);
551                 goto out_disable_device;
552         }
553
554         pci_set_master(pdev);
555         pci_set_drvdata(pdev, vf);
556
557         vf->vnic = usnic_vnic_alloc(pdev);
558         if (IS_ERR_OR_NULL(vf->vnic)) {
559                 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
560                 usnic_err("Failed to alloc vnic for %s with err %d\n",
561                                 pci_name(pdev), err);
562                 goto out_release_regions;
563         }
564
565         pf = usnic_ib_discover_pf(vf->vnic);
566         if (IS_ERR_OR_NULL(pf)) {
567                 usnic_err("Failed to discover pf of vnic %s with err%ld\n",
568                                 pci_name(pdev), PTR_ERR(pf));
569                 err = pf ? PTR_ERR(pf) : -EFAULT;
570                 goto out_clean_vnic;
571         }
572
573         vf->pf = pf;
574         spin_lock_init(&vf->lock);
575         mutex_lock(&pf->usdev_lock);
576         list_add_tail(&vf->link, &pf->vf_dev_list);
577         /*
578          * Save max settings (will be same for each VF, easier to re-write than
579          * to say "if (!set) { set_values(); set=1; }
580          */
581         for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
582                         res_type < USNIC_VNIC_RES_TYPE_MAX;
583                         res_type++) {
584                 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
585                                                                 res_type);
586         }
587
588         mutex_unlock(&pf->usdev_lock);
589
590         usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
591                    dev_name(&pf->ib_dev.dev));
592         usnic_ib_log_vf(vf);
593         return 0;
594
595 out_clean_vnic:
596         usnic_vnic_free(vf->vnic);
597 out_release_regions:
598         pci_set_drvdata(pdev, NULL);
599         pci_clear_master(pdev);
600         pci_release_regions(pdev);
601 out_disable_device:
602         pci_disable_device(pdev);
603 out_clean_vf:
604         kfree(vf);
605         return err;
606 }
607
608 static void usnic_ib_pci_remove(struct pci_dev *pdev)
609 {
610         struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
611         struct usnic_ib_dev *pf = vf->pf;
612
613         mutex_lock(&pf->usdev_lock);
614         list_del(&vf->link);
615         mutex_unlock(&pf->usdev_lock);
616
617         kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
618         usnic_vnic_free(vf->vnic);
619         pci_set_drvdata(pdev, NULL);
620         pci_clear_master(pdev);
621         pci_release_regions(pdev);
622         pci_disable_device(pdev);
623         kfree(vf);
624
625         usnic_info("Removed VF %s\n", pci_name(pdev));
626 }
627
628 /* PCI driver entry points */
629 static struct pci_driver usnic_ib_pci_driver = {
630         .name = DRV_NAME,
631         .id_table = usnic_ib_pci_ids,
632         .probe = usnic_ib_pci_probe,
633         .remove = usnic_ib_pci_remove,
634 };
635 /* End of PCI section */
636
637 /* Start of module section */
638 static int __init usnic_ib_init(void)
639 {
640         int err;
641
642         printk_once(KERN_INFO "%s", usnic_version);
643
644         err = usnic_uiom_init(DRV_NAME);
645         if (err) {
646                 usnic_err("Unable to initialize umem with err %d\n", err);
647                 return err;
648         }
649
650         err = pci_register_driver(&usnic_ib_pci_driver);
651         if (err) {
652                 usnic_err("Unable to register with PCI\n");
653                 goto out_umem_fini;
654         }
655
656         err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
657         if (err) {
658                 usnic_err("Failed to register netdev notifier\n");
659                 goto out_pci_unreg;
660         }
661
662         err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
663         if (err) {
664                 usnic_err("Failed to register inet addr notifier\n");
665                 goto out_unreg_netdev_notifier;
666         }
667
668         err = usnic_transport_init();
669         if (err) {
670                 usnic_err("Failed to initialize transport\n");
671                 goto out_unreg_inetaddr_notifier;
672         }
673
674         usnic_debugfs_init();
675
676         return 0;
677
678 out_unreg_inetaddr_notifier:
679         unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
680 out_unreg_netdev_notifier:
681         unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
682 out_pci_unreg:
683         pci_unregister_driver(&usnic_ib_pci_driver);
684 out_umem_fini:
685
686         return err;
687 }
688
689 static void __exit usnic_ib_destroy(void)
690 {
691         usnic_dbg("\n");
692         usnic_debugfs_exit();
693         usnic_transport_fini();
694         unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
695         unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
696         pci_unregister_driver(&usnic_ib_pci_driver);
697 }
698
699 MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
700 MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
701 MODULE_LICENSE("Dual BSD/GPL");
702 module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
703 module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
704 MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
705 MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
706 MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
707
708 module_init(usnic_ib_init);
709 module_exit(usnic_ib_destroy);
710 /* End of module section */