2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/workqueue.h>
28 #include <linux/pci.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_vlan.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/prefetch.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ktime.h>
42 #include <linux/numa.h>
43 #ifdef CONFIG_RFS_ACCEL
44 #include <linux/cpu_rmap.h>
46 #include <linux/crash_dump.h>
47 #include <net/busy_poll.h>
48 #include <net/vxlan.h>
50 #include "cq_enet_desc.h"
52 #include "vnic_intr.h"
53 #include "vnic_stats.h"
59 #include "enic_clsf.h"
61 #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
62 #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
63 #define MAX_TSO (1 << 16)
64 #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
66 #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
67 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
68 #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
70 #define RX_COPYBREAK_DEFAULT 256
72 /* Supported devices */
73 static const struct pci_device_id enic_id_table[] = {
74 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
75 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
76 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
77 { 0, } /* end of table */
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
82 MODULE_LICENSE("GPL");
83 MODULE_VERSION(DRV_VERSION);
84 MODULE_DEVICE_TABLE(pci, enic_id_table);
86 #define ENIC_LARGE_PKT_THRESHOLD 1000
87 #define ENIC_MAX_COALESCE_TIMERS 10
88 /* Interrupt moderation table, which will be used to decide the
89 * coalescing timer values
90 * {rx_rate in Mbps, mapping percentage of the range}
92 static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
106 /* This table helps the driver to pick different ranges for rx coalescing
107 * timer depending on the link speed.
109 static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
110 {0, 0}, /* 0 - 4 Gbps */
111 {0, 3}, /* 4 - 10 Gbps */
112 {3, 6}, /* 10 - 40 Gbps */
115 static void enic_init_affinity_hint(struct enic *enic)
117 int numa_node = dev_to_node(&enic->pdev->dev);
120 for (i = 0; i < enic->intr_count; i++) {
121 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
122 (enic->msix[i].affinity_mask &&
123 !cpumask_empty(enic->msix[i].affinity_mask)))
125 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
127 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
128 enic->msix[i].affinity_mask);
132 static void enic_free_affinity_hint(struct enic *enic)
136 for (i = 0; i < enic->intr_count; i++) {
137 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i))
139 free_cpumask_var(enic->msix[i].affinity_mask);
143 static void enic_set_affinity_hint(struct enic *enic)
148 for (i = 0; i < enic->intr_count; i++) {
149 if (enic_is_err_intr(enic, i) ||
150 enic_is_notify_intr(enic, i) ||
151 !enic->msix[i].affinity_mask ||
152 cpumask_empty(enic->msix[i].affinity_mask))
154 err = irq_set_affinity_hint(enic->msix_entry[i].vector,
155 enic->msix[i].affinity_mask);
157 netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
161 for (i = 0; i < enic->wq_count; i++) {
162 int wq_intr = enic_msix_wq_intr(enic, i);
164 if (enic->msix[wq_intr].affinity_mask &&
165 !cpumask_empty(enic->msix[wq_intr].affinity_mask))
166 netif_set_xps_queue(enic->netdev,
167 enic->msix[wq_intr].affinity_mask,
172 static void enic_unset_affinity_hint(struct enic *enic)
176 for (i = 0; i < enic->intr_count; i++)
177 irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
180 static void enic_udp_tunnel_add(struct net_device *netdev,
181 struct udp_tunnel_info *ti)
183 struct enic *enic = netdev_priv(netdev);
184 __be16 port = ti->port;
187 spin_lock_bh(&enic->devcmd_lock);
189 if (ti->type != UDP_TUNNEL_TYPE_VXLAN) {
190 netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported");
194 switch (ti->sa_family) {
196 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) {
197 netdev_info(netdev, "vxlan: only IPv4 offload supported");
207 if (enic->vxlan.vxlan_udp_port_number) {
208 if (ntohs(port) == enic->vxlan.vxlan_udp_port_number)
209 netdev_warn(netdev, "vxlan: udp port already offloaded");
211 netdev_info(netdev, "vxlan: offload supported for only one UDP port");
215 if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) &&
216 !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) {
217 netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter");
221 err = vnic_dev_overlay_offload_cfg(enic->vdev,
222 OVERLAY_CFG_VXLAN_PORT_UPDATE,
227 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
228 enic->vxlan.patch_level);
232 enic->vxlan.vxlan_udp_port_number = ntohs(port);
234 netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ",
235 (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family);
240 netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d",
241 ntohs(port), ti->sa_family, ti->type);
243 spin_unlock_bh(&enic->devcmd_lock);
246 static void enic_udp_tunnel_del(struct net_device *netdev,
247 struct udp_tunnel_info *ti)
249 struct enic *enic = netdev_priv(netdev);
252 spin_lock_bh(&enic->devcmd_lock);
254 if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) ||
255 ti->type != UDP_TUNNEL_TYPE_VXLAN) {
256 netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded",
257 ntohs(ti->port), ti->sa_family, ti->type);
261 err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
262 OVERLAY_OFFLOAD_DISABLE);
264 netdev_err(netdev, "vxlan: del offload udp port: %d failed",
269 enic->vxlan.vxlan_udp_port_number = 0;
271 netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n",
272 ntohs(ti->port), ti->sa_family);
275 spin_unlock_bh(&enic->devcmd_lock);
278 static netdev_features_t enic_features_check(struct sk_buff *skb,
279 struct net_device *dev,
280 netdev_features_t features)
282 const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
283 struct enic *enic = netdev_priv(dev);
288 if (!skb->encapsulation)
291 features = vxlan_features_check(skb, features);
293 switch (vlan_get_protocol(skb)) {
294 case htons(ETH_P_IPV6):
295 if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
297 proto = ipv6_hdr(skb)->nexthdr;
299 case htons(ETH_P_IP):
300 proto = ip_hdr(skb)->protocol;
306 switch (eth->h_proto) {
307 case ntohs(ETH_P_IPV6):
308 if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
311 case ntohs(ETH_P_IP):
318 if (proto == IPPROTO_UDP) {
320 port = be16_to_cpu(udph->dest);
323 /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
324 * for other UDP port tunnels
326 if (port != enic->vxlan.vxlan_udp_port_number)
332 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
335 int enic_is_dynamic(struct enic *enic)
337 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
340 int enic_sriov_enabled(struct enic *enic)
342 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
345 static int enic_is_sriov_vf(struct enic *enic)
347 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
350 int enic_is_valid_vf(struct enic *enic, int vf)
352 #ifdef CONFIG_PCI_IOV
353 return vf >= 0 && vf < enic->num_vfs;
359 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
361 struct enic *enic = vnic_dev_priv(wq->vdev);
364 pci_unmap_single(enic->pdev, buf->dma_addr,
365 buf->len, PCI_DMA_TODEVICE);
367 pci_unmap_page(enic->pdev, buf->dma_addr,
368 buf->len, PCI_DMA_TODEVICE);
371 dev_kfree_skb_any(buf->os_buf);
374 static void enic_wq_free_buf(struct vnic_wq *wq,
375 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
377 enic_free_wq_buf(wq, buf);
380 static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
381 u8 type, u16 q_number, u16 completed_index, void *opaque)
383 struct enic *enic = vnic_dev_priv(vdev);
385 spin_lock(&enic->wq_lock[q_number]);
387 vnic_wq_service(&enic->wq[q_number], cq_desc,
388 completed_index, enic_wq_free_buf,
391 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
392 vnic_wq_desc_avail(&enic->wq[q_number]) >=
393 (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
394 netif_wake_subqueue(enic->netdev, q_number);
396 spin_unlock(&enic->wq_lock[q_number]);
401 static bool enic_log_q_error(struct enic *enic)
407 for (i = 0; i < enic->wq_count; i++) {
408 error_status = vnic_wq_error_status(&enic->wq[i]);
411 netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
415 for (i = 0; i < enic->rq_count; i++) {
416 error_status = vnic_rq_error_status(&enic->rq[i]);
419 netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
426 static void enic_msglvl_check(struct enic *enic)
428 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
430 if (msg_enable != enic->msg_enable) {
431 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
432 enic->msg_enable, msg_enable);
433 enic->msg_enable = msg_enable;
437 static void enic_mtu_check(struct enic *enic)
439 u32 mtu = vnic_dev_mtu(enic->vdev);
440 struct net_device *netdev = enic->netdev;
442 if (mtu && mtu != enic->port_mtu) {
443 enic->port_mtu = mtu;
444 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
445 mtu = max_t(int, ENIC_MIN_MTU,
446 min_t(int, ENIC_MAX_MTU, mtu));
447 if (mtu != netdev->mtu)
448 schedule_work(&enic->change_mtu_work);
450 if (mtu < netdev->mtu)
452 "interface MTU (%d) set higher "
453 "than switch port MTU (%d)\n",
459 static void enic_link_check(struct enic *enic)
461 int link_status = vnic_dev_link_status(enic->vdev);
462 int carrier_ok = netif_carrier_ok(enic->netdev);
464 if (link_status && !carrier_ok) {
465 netdev_info(enic->netdev, "Link UP\n");
466 netif_carrier_on(enic->netdev);
467 } else if (!link_status && carrier_ok) {
468 netdev_info(enic->netdev, "Link DOWN\n");
469 netif_carrier_off(enic->netdev);
473 static void enic_notify_check(struct enic *enic)
475 enic_msglvl_check(enic);
476 enic_mtu_check(enic);
477 enic_link_check(enic);
480 #define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
482 static irqreturn_t enic_isr_legacy(int irq, void *data)
484 struct net_device *netdev = data;
485 struct enic *enic = netdev_priv(netdev);
486 unsigned int io_intr = enic_legacy_io_intr();
487 unsigned int err_intr = enic_legacy_err_intr();
488 unsigned int notify_intr = enic_legacy_notify_intr();
491 vnic_intr_mask(&enic->intr[io_intr]);
493 pba = vnic_intr_legacy_pba(enic->legacy_pba);
495 vnic_intr_unmask(&enic->intr[io_intr]);
496 return IRQ_NONE; /* not our interrupt */
499 if (ENIC_TEST_INTR(pba, notify_intr)) {
500 enic_notify_check(enic);
501 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
504 if (ENIC_TEST_INTR(pba, err_intr)) {
505 vnic_intr_return_all_credits(&enic->intr[err_intr]);
506 enic_log_q_error(enic);
507 /* schedule recovery from WQ/RQ error */
508 schedule_work(&enic->reset);
512 if (ENIC_TEST_INTR(pba, io_intr))
513 napi_schedule_irqoff(&enic->napi[0]);
515 vnic_intr_unmask(&enic->intr[io_intr]);
520 static irqreturn_t enic_isr_msi(int irq, void *data)
522 struct enic *enic = data;
524 /* With MSI, there is no sharing of interrupts, so this is
525 * our interrupt and there is no need to ack it. The device
526 * is not providing per-vector masking, so the OS will not
527 * write to PCI config space to mask/unmask the interrupt.
528 * We're using mask_on_assertion for MSI, so the device
529 * automatically masks the interrupt when the interrupt is
530 * generated. Later, when exiting polling, the interrupt
531 * will be unmasked (see enic_poll).
533 * Also, the device uses the same PCIe Traffic Class (TC)
534 * for Memory Write data and MSI, so there are no ordering
535 * issues; the MSI will always arrive at the Root Complex
536 * _after_ corresponding Memory Writes (i.e. descriptor
540 napi_schedule_irqoff(&enic->napi[0]);
545 static irqreturn_t enic_isr_msix(int irq, void *data)
547 struct napi_struct *napi = data;
549 napi_schedule_irqoff(napi);
554 static irqreturn_t enic_isr_msix_err(int irq, void *data)
556 struct enic *enic = data;
557 unsigned int intr = enic_msix_err_intr(enic);
559 vnic_intr_return_all_credits(&enic->intr[intr]);
561 if (enic_log_q_error(enic))
562 /* schedule recovery from WQ/RQ error */
563 schedule_work(&enic->reset);
568 static irqreturn_t enic_isr_msix_notify(int irq, void *data)
570 struct enic *enic = data;
571 unsigned int intr = enic_msix_notify_intr(enic);
573 enic_notify_check(enic);
574 vnic_intr_return_all_credits(&enic->intr[intr]);
579 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq,
580 struct sk_buff *skb, unsigned int len_left,
583 const skb_frag_t *frag;
586 /* Queue additional data fragments */
587 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
588 len_left -= skb_frag_size(frag);
589 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0,
592 if (unlikely(enic_dma_map_check(enic, dma_addr)))
594 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag),
595 (len_left == 0), /* EOP? */
602 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
603 struct sk_buff *skb, int vlan_tag_insert,
604 unsigned int vlan_tag, int loopback)
606 unsigned int head_len = skb_headlen(skb);
607 unsigned int len_left = skb->len - head_len;
608 int eop = (len_left == 0);
612 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
614 if (unlikely(enic_dma_map_check(enic, dma_addr)))
617 /* Queue the main skb fragment. The fragments are no larger
618 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
619 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
620 * per fragment is queued.
622 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert,
623 vlan_tag, eop, loopback);
626 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
631 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
632 struct sk_buff *skb, int vlan_tag_insert,
633 unsigned int vlan_tag, int loopback)
635 unsigned int head_len = skb_headlen(skb);
636 unsigned int len_left = skb->len - head_len;
637 unsigned int hdr_len = skb_checksum_start_offset(skb);
638 unsigned int csum_offset = hdr_len + skb->csum_offset;
639 int eop = (len_left == 0);
643 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
645 if (unlikely(enic_dma_map_check(enic, dma_addr)))
648 /* Queue the main skb fragment. The fragments are no larger
649 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
650 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
651 * per fragment is queued.
653 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset,
654 hdr_len, vlan_tag_insert, vlan_tag, eop,
658 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
663 static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
665 const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
667 switch (eth->h_proto) {
668 case ntohs(ETH_P_IP):
669 inner_ip_hdr(skb)->check = 0;
670 inner_tcp_hdr(skb)->check =
671 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
672 inner_ip_hdr(skb)->daddr, 0,
675 case ntohs(ETH_P_IPV6):
676 inner_tcp_hdr(skb)->check =
677 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
678 &inner_ipv6_hdr(skb)->daddr, 0,
682 WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
687 static void enic_preload_tcp_csum(struct sk_buff *skb)
689 /* Preload TCP csum field with IP pseudo hdr calculated
690 * with IP length set to zero. HW will later add in length
691 * to each TCP segment resulting from the TSO.
694 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
695 ip_hdr(skb)->check = 0;
696 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
697 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
698 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
699 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
700 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
704 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
705 struct sk_buff *skb, unsigned int mss,
706 int vlan_tag_insert, unsigned int vlan_tag,
709 unsigned int frag_len_left = skb_headlen(skb);
710 unsigned int len_left = skb->len - frag_len_left;
711 int eop = (len_left == 0);
712 unsigned int offset = 0;
713 unsigned int hdr_len;
718 if (skb->encapsulation) {
719 hdr_len = skb_inner_transport_header(skb) - skb->data;
720 hdr_len += inner_tcp_hdrlen(skb);
721 enic_preload_tcp_csum_encap(skb);
723 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
724 enic_preload_tcp_csum(skb);
727 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
728 * for the main skb fragment
730 while (frag_len_left) {
731 len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
732 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
734 if (unlikely(enic_dma_map_check(enic, dma_addr)))
736 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
737 vlan_tag_insert, vlan_tag,
738 eop && (len == frag_len_left), loopback);
739 frag_len_left -= len;
746 /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
747 * for additional data fragments
749 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
750 len_left -= skb_frag_size(frag);
751 frag_len_left = skb_frag_size(frag);
754 while (frag_len_left) {
755 len = min(frag_len_left,
756 (unsigned int)WQ_ENET_MAX_DESC_LEN);
757 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
760 if (unlikely(enic_dma_map_check(enic, dma_addr)))
762 enic_queue_wq_desc_cont(wq, skb, dma_addr, len,
764 (len == frag_len_left),/*EOP*/
766 frag_len_left -= len;
774 static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
777 unsigned int vlan_tag, int loopback)
779 unsigned int head_len = skb_headlen(skb);
780 unsigned int len_left = skb->len - head_len;
781 /* Hardware will overwrite the checksum fields, calculating from
782 * scratch and ignoring the value placed by software.
784 * mss[2], mss[1], mss[0] bits are set
786 unsigned int mss_or_csum = 7;
787 int eop = (len_left == 0);
791 dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
793 if (unlikely(enic_dma_map_check(enic, dma_addr)))
796 enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
797 vlan_tag_insert, vlan_tag,
798 WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop,
801 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
806 static inline void enic_queue_wq_skb(struct enic *enic,
807 struct vnic_wq *wq, struct sk_buff *skb)
809 unsigned int mss = skb_shinfo(skb)->gso_size;
810 unsigned int vlan_tag = 0;
811 int vlan_tag_insert = 0;
815 if (skb_vlan_tag_present(skb)) {
816 /* VLAN tag from trunking driver */
818 vlan_tag = skb_vlan_tag_get(skb);
819 } else if (enic->loop_enable) {
820 vlan_tag = enic->loop_tag;
825 err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
826 vlan_tag_insert, vlan_tag,
828 else if (skb->encapsulation)
829 err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
831 else if (skb->ip_summed == CHECKSUM_PARTIAL)
832 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
835 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert,
838 struct vnic_wq_buf *buf;
840 buf = wq->to_use->prev;
841 /* while not EOP of previous pkt && queue not empty.
842 * For all non EOP bufs, os_buf is NULL.
844 while (!buf->os_buf && (buf->next != wq->to_clean)) {
845 enic_free_wq_buf(wq, buf);
846 wq->ring.desc_avail++;
849 wq->to_use = buf->next;
854 /* netif_tx_lock held, process context with BHs disabled, or BH */
855 static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
856 struct net_device *netdev)
858 struct enic *enic = netdev_priv(netdev);
860 unsigned int txq_map;
861 struct netdev_queue *txq;
864 dev_kfree_skb_any(skb);
868 txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
869 wq = &enic->wq[txq_map];
870 txq = netdev_get_tx_queue(netdev, txq_map);
872 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
873 * which is very likely. In the off chance it's going to take
874 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
877 if (skb_shinfo(skb)->gso_size == 0 &&
878 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
879 skb_linearize(skb)) {
880 dev_kfree_skb_any(skb);
884 spin_lock(&enic->wq_lock[txq_map]);
886 if (vnic_wq_desc_avail(wq) <
887 skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
888 netif_tx_stop_queue(txq);
889 /* This is a hard error, log it */
890 netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
891 spin_unlock(&enic->wq_lock[txq_map]);
892 return NETDEV_TX_BUSY;
895 enic_queue_wq_skb(enic, wq, skb);
897 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
898 netif_tx_stop_queue(txq);
899 skb_tx_timestamp(skb);
900 if (!skb->xmit_more || netif_xmit_stopped(txq))
901 vnic_wq_doorbell(wq);
903 spin_unlock(&enic->wq_lock[txq_map]);
908 /* dev_base_lock rwlock held, nominally process context */
909 static void enic_get_stats(struct net_device *netdev,
910 struct rtnl_link_stats64 *net_stats)
912 struct enic *enic = netdev_priv(netdev);
913 struct vnic_stats *stats;
916 err = enic_dev_stats_dump(enic, &stats);
917 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
918 * For other failures, like devcmd failure, we return previously
924 net_stats->tx_packets = stats->tx.tx_frames_ok;
925 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
926 net_stats->tx_errors = stats->tx.tx_errors;
927 net_stats->tx_dropped = stats->tx.tx_drops;
929 net_stats->rx_packets = stats->rx.rx_frames_ok;
930 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
931 net_stats->rx_errors = stats->rx.rx_errors;
932 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
933 net_stats->rx_over_errors = enic->rq_truncated_pkts;
934 net_stats->rx_crc_errors = enic->rq_bad_fcs;
935 net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
938 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
940 struct enic *enic = netdev_priv(netdev);
942 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
943 unsigned int mc_count = netdev_mc_count(netdev);
945 netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
946 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
951 enic_dev_add_addr(enic, mc_addr);
957 static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
959 struct enic *enic = netdev_priv(netdev);
961 enic_dev_del_addr(enic, mc_addr);
967 static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
969 struct enic *enic = netdev_priv(netdev);
971 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
972 unsigned int uc_count = netdev_uc_count(netdev);
974 netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
975 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
980 enic_dev_add_addr(enic, uc_addr);
986 static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
988 struct enic *enic = netdev_priv(netdev);
990 enic_dev_del_addr(enic, uc_addr);
996 void enic_reset_addr_lists(struct enic *enic)
998 struct net_device *netdev = enic->netdev;
1000 __dev_uc_unsync(netdev, NULL);
1001 __dev_mc_unsync(netdev, NULL);
1008 static int enic_set_mac_addr(struct net_device *netdev, char *addr)
1010 struct enic *enic = netdev_priv(netdev);
1012 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
1013 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
1014 return -EADDRNOTAVAIL;
1016 if (!is_valid_ether_addr(addr))
1017 return -EADDRNOTAVAIL;
1020 memcpy(netdev->dev_addr, addr, netdev->addr_len);
1025 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
1027 struct enic *enic = netdev_priv(netdev);
1028 struct sockaddr *saddr = p;
1029 char *addr = saddr->sa_data;
1032 if (netif_running(enic->netdev)) {
1033 err = enic_dev_del_station_addr(enic);
1038 err = enic_set_mac_addr(netdev, addr);
1042 if (netif_running(enic->netdev)) {
1043 err = enic_dev_add_station_addr(enic);
1051 static int enic_set_mac_address(struct net_device *netdev, void *p)
1053 struct sockaddr *saddr = p;
1054 char *addr = saddr->sa_data;
1055 struct enic *enic = netdev_priv(netdev);
1058 err = enic_dev_del_station_addr(enic);
1062 err = enic_set_mac_addr(netdev, addr);
1066 return enic_dev_add_station_addr(enic);
1069 /* netif_tx_lock held, BHs disabled */
1070 static void enic_set_rx_mode(struct net_device *netdev)
1072 struct enic *enic = netdev_priv(netdev);
1074 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1075 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1076 int promisc = (netdev->flags & IFF_PROMISC) ||
1077 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1078 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1079 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1080 unsigned int flags = netdev->flags |
1081 (allmulti ? IFF_ALLMULTI : 0) |
1082 (promisc ? IFF_PROMISC : 0);
1084 if (enic->flags != flags) {
1085 enic->flags = flags;
1086 enic_dev_packet_filter(enic, directed,
1087 multicast, broadcast, promisc, allmulti);
1091 __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
1093 __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
1097 /* netif_tx_lock held, BHs disabled */
1098 static void enic_tx_timeout(struct net_device *netdev)
1100 struct enic *enic = netdev_priv(netdev);
1101 schedule_work(&enic->tx_hang_reset);
1104 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1106 struct enic *enic = netdev_priv(netdev);
1107 struct enic_port_profile *pp;
1110 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1114 if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1115 if (vf == PORT_SELF_VF) {
1116 memcpy(pp->vf_mac, mac, ETH_ALEN);
1120 * For sriov vf's set the mac in hw
1122 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1123 vnic_dev_set_mac_addr, mac);
1124 return enic_dev_status_to_errno(err);
1130 static int enic_set_vf_port(struct net_device *netdev, int vf,
1131 struct nlattr *port[])
1133 struct enic *enic = netdev_priv(netdev);
1134 struct enic_port_profile prev_pp;
1135 struct enic_port_profile *pp;
1136 int err = 0, restore_pp = 1;
1138 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1142 if (!port[IFLA_PORT_REQUEST])
1145 memcpy(&prev_pp, pp, sizeof(*enic->pp));
1146 memset(pp, 0, sizeof(*enic->pp));
1148 pp->set |= ENIC_SET_REQUEST;
1149 pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1151 if (port[IFLA_PORT_PROFILE]) {
1152 pp->set |= ENIC_SET_NAME;
1153 memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1157 if (port[IFLA_PORT_INSTANCE_UUID]) {
1158 pp->set |= ENIC_SET_INSTANCE;
1159 memcpy(pp->instance_uuid,
1160 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1163 if (port[IFLA_PORT_HOST_UUID]) {
1164 pp->set |= ENIC_SET_HOST;
1165 memcpy(pp->host_uuid,
1166 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1169 if (vf == PORT_SELF_VF) {
1170 /* Special case handling: mac came from IFLA_VF_MAC */
1171 if (!is_zero_ether_addr(prev_pp.vf_mac))
1172 memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1174 if (is_zero_ether_addr(netdev->dev_addr))
1175 eth_hw_addr_random(netdev);
1177 /* SR-IOV VF: get mac from adapter */
1178 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
1179 vnic_dev_get_mac_addr, pp->mac_addr);
1181 netdev_err(netdev, "Error getting mac for vf %d\n", vf);
1182 memcpy(pp, &prev_pp, sizeof(*pp));
1183 return enic_dev_status_to_errno(err);
1187 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1190 /* Things are still the way they were: Implicit
1191 * DISASSOCIATE failed
1193 memcpy(pp, &prev_pp, sizeof(*pp));
1195 memset(pp, 0, sizeof(*pp));
1196 if (vf == PORT_SELF_VF)
1197 eth_zero_addr(netdev->dev_addr);
1200 /* Set flag to indicate that the port assoc/disassoc
1201 * request has been sent out to fw
1203 pp->set |= ENIC_PORT_REQUEST_APPLIED;
1205 /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1206 if (pp->request == PORT_REQUEST_DISASSOCIATE) {
1207 eth_zero_addr(pp->mac_addr);
1208 if (vf == PORT_SELF_VF)
1209 eth_zero_addr(netdev->dev_addr);
1213 if (vf == PORT_SELF_VF)
1214 eth_zero_addr(pp->vf_mac);
1219 static int enic_get_vf_port(struct net_device *netdev, int vf,
1220 struct sk_buff *skb)
1222 struct enic *enic = netdev_priv(netdev);
1223 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1224 struct enic_port_profile *pp;
1227 ENIC_PP_BY_INDEX(enic, vf, pp, &err);
1231 if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1234 err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1238 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1239 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1240 ((pp->set & ENIC_SET_NAME) &&
1241 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1242 ((pp->set & ENIC_SET_INSTANCE) &&
1243 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1244 pp->instance_uuid)) ||
1245 ((pp->set & ENIC_SET_HOST) &&
1246 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1247 goto nla_put_failure;
1254 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1256 struct enic *enic = vnic_dev_priv(rq->vdev);
1261 pci_unmap_single(enic->pdev, buf->dma_addr,
1262 buf->len, PCI_DMA_FROMDEVICE);
1263 dev_kfree_skb_any(buf->os_buf);
1267 static int enic_rq_alloc_buf(struct vnic_rq *rq)
1269 struct enic *enic = vnic_dev_priv(rq->vdev);
1270 struct net_device *netdev = enic->netdev;
1271 struct sk_buff *skb;
1272 unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1273 unsigned int os_buf_index = 0;
1274 dma_addr_t dma_addr;
1275 struct vnic_rq_buf *buf = rq->to_use;
1278 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
1283 skb = netdev_alloc_skb_ip_align(netdev, len);
1287 dma_addr = pci_map_single(enic->pdev, skb->data, len,
1288 PCI_DMA_FROMDEVICE);
1289 if (unlikely(enic_dma_map_check(enic, dma_addr))) {
1294 enic_queue_rq_desc(rq, skb, os_buf_index,
1300 static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
1303 if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
1304 pkt_size->large_pkt_bytes_cnt += pkt_len;
1306 pkt_size->small_pkt_bytes_cnt += pkt_len;
1309 static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
1310 struct vnic_rq_buf *buf, u16 len)
1312 struct enic *enic = netdev_priv(netdev);
1313 struct sk_buff *new_skb;
1315 if (len > enic->rx_copybreak)
1317 new_skb = netdev_alloc_skb_ip_align(netdev, len);
1320 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
1322 memcpy(new_skb->data, (*skb)->data, len);
1328 static void enic_rq_indicate_buf(struct vnic_rq *rq,
1329 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
1330 int skipped, void *opaque)
1332 struct enic *enic = vnic_dev_priv(rq->vdev);
1333 struct net_device *netdev = enic->netdev;
1334 struct sk_buff *skb;
1335 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1337 u8 type, color, eop, sop, ingress_port, vlan_stripped;
1338 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
1339 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
1340 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
1342 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1344 bool outer_csum_ok = true, encap = false;
1351 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
1352 &type, &color, &q_number, &completed_index,
1353 &ingress_port, &fcoe, &eop, &sop, &rss_type,
1354 &csum_not_calc, &rss_hash, &bytes_written,
1355 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
1356 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
1357 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
1358 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
1364 if (bytes_written > 0)
1366 else if (bytes_written == 0)
1367 enic->rq_truncated_pkts++;
1370 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1371 PCI_DMA_FROMDEVICE);
1372 dev_kfree_skb_any(skb);
1378 if (eop && bytes_written > 0) {
1383 if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
1385 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1386 PCI_DMA_FROMDEVICE);
1388 prefetch(skb->data - NET_IP_ALIGN);
1390 skb_put(skb, bytes_written);
1391 skb->protocol = eth_type_trans(skb, netdev);
1392 skb_record_rx_queue(skb, q_number);
1393 if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
1396 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
1397 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
1398 case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX:
1399 skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4);
1401 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4:
1402 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6:
1403 case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX:
1404 skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3);
1408 if (enic->vxlan.vxlan_udp_port_number) {
1409 switch (enic->vxlan.patch_level) {
1413 outer_csum_ok = fcoe_fc_crc_ok;
1418 (rss_hash & BIT(0))) {
1420 outer_csum_ok = (rss_hash & BIT(1)) &&
1421 (rss_hash & BIT(2));
1427 /* Hardware does not provide whole packet checksum. It only
1428 * provides pseudo checksum. Since hw validates the packet
1429 * checksum but not provide us the checksum value. use
1430 * CHECSUM_UNNECESSARY.
1432 * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
1433 * inner csum_ok. outer_csum_ok is set by hw when outer udp
1434 * csum is correct or is zero.
1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1437 tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
1438 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 skb->csum_level = encap;
1443 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1445 skb_mark_napi_id(skb, &enic->napi[rq->index]);
1446 if (!(netdev->features & NETIF_F_GRO))
1447 netif_receive_skb(skb);
1449 napi_gro_receive(&enic->napi[q_number], skb);
1450 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1451 enic_intr_update_pkt_size(&cq->pkt_size_counter,
1458 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1459 PCI_DMA_FROMDEVICE);
1460 dev_kfree_skb_any(skb);
1465 static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1466 u8 type, u16 q_number, u16 completed_index, void *opaque)
1468 struct enic *enic = vnic_dev_priv(vdev);
1470 vnic_rq_service(&enic->rq[q_number], cq_desc,
1471 completed_index, VNIC_RQ_RETURN_DESC,
1472 enic_rq_indicate_buf, opaque);
1477 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
1479 unsigned int intr = enic_msix_rq_intr(enic, rq->index);
1480 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1481 u32 timer = cq->tobe_rx_coal_timeval;
1483 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
1484 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
1485 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
1489 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
1491 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1492 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1493 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
1499 ktime_t now = ktime_get();
1501 delta = ktime_us_delta(now, cq->prev_ts);
1502 if (delta < ENIC_AIC_TS_BREAK)
1506 traffic = pkt_size_counter->large_pkt_bytes_cnt +
1507 pkt_size_counter->small_pkt_bytes_cnt;
1508 /* The table takes Mbps
1509 * traffic *= 8 => bits
1510 * traffic *= (10^6 / delta) => bps
1511 * traffic /= 10^6 => Mbps
1513 * Combining, traffic *= (8 / delta)
1517 traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
1519 for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
1520 if (traffic < mod_table[index].rx_rate)
1522 range_start = (pkt_size_counter->small_pkt_bytes_cnt >
1523 pkt_size_counter->large_pkt_bytes_cnt << 1) ?
1524 rx_coal->small_pkt_range_start :
1525 rx_coal->large_pkt_range_start;
1526 timer = range_start + ((rx_coal->range_end - range_start) *
1527 mod_table[index].range_percent / 100);
1529 cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
1531 pkt_size_counter->large_pkt_bytes_cnt = 0;
1532 pkt_size_counter->small_pkt_bytes_cnt = 0;
1535 static int enic_poll(struct napi_struct *napi, int budget)
1537 struct net_device *netdev = napi->dev;
1538 struct enic *enic = netdev_priv(netdev);
1539 unsigned int cq_rq = enic_cq_rq(enic, 0);
1540 unsigned int cq_wq = enic_cq_wq(enic, 0);
1541 unsigned int intr = enic_legacy_io_intr();
1542 unsigned int rq_work_to_do = budget;
1543 unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1544 unsigned int work_done, rq_work_done = 0, wq_work_done;
1547 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
1548 enic_wq_service, NULL);
1551 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1552 rq_work_to_do, enic_rq_service, NULL);
1554 /* Accumulate intr event credits for this polling
1555 * cycle. An intr event is the completion of a
1556 * a WQ or RQ packet.
1559 work_done = rq_work_done + wq_work_done;
1562 vnic_intr_return_credits(&enic->intr[intr],
1564 0 /* don't unmask intr */,
1565 0 /* don't reset intr timer */);
1567 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1569 /* Buffer allocation failed. Stay in polling
1570 * mode so we can try to fill the ring again.
1574 rq_work_done = rq_work_to_do;
1575 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1576 /* Call the function which refreshes the intr coalescing timer
1577 * value based on the traffic.
1579 enic_calc_int_moderation(enic, &enic->rq[0]);
1581 if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
1583 /* Some work done, but not enough to stay in polling,
1587 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1588 enic_set_int_moderation(enic, &enic->rq[0]);
1589 vnic_intr_unmask(&enic->intr[intr]);
1592 return rq_work_done;
1595 #ifdef CONFIG_RFS_ACCEL
1596 static void enic_free_rx_cpu_rmap(struct enic *enic)
1598 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
1599 enic->netdev->rx_cpu_rmap = NULL;
1602 static void enic_set_rx_cpu_rmap(struct enic *enic)
1606 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
1607 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
1608 if (unlikely(!enic->netdev->rx_cpu_rmap))
1610 for (i = 0; i < enic->rq_count; i++) {
1611 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
1612 enic->msix_entry[i].vector);
1613 if (unlikely(res)) {
1614 enic_free_rx_cpu_rmap(enic);
1623 static void enic_free_rx_cpu_rmap(struct enic *enic)
1627 static void enic_set_rx_cpu_rmap(struct enic *enic)
1631 #endif /* CONFIG_RFS_ACCEL */
1633 static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
1635 struct net_device *netdev = napi->dev;
1636 struct enic *enic = netdev_priv(netdev);
1637 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
1638 struct vnic_wq *wq = &enic->wq[wq_index];
1641 unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
1642 unsigned int wq_work_done;
1643 unsigned int wq_irq;
1646 cq = enic_cq_wq(enic, wq_irq);
1647 intr = enic_msix_wq_intr(enic, wq_irq);
1648 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
1649 enic_wq_service, NULL);
1651 vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
1652 0 /* don't unmask intr */,
1653 1 /* reset intr timer */);
1654 if (!wq_work_done) {
1655 napi_complete(napi);
1656 vnic_intr_unmask(&enic->intr[intr]);
1663 static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
1665 struct net_device *netdev = napi->dev;
1666 struct enic *enic = netdev_priv(netdev);
1667 unsigned int rq = (napi - &enic->napi[0]);
1668 unsigned int cq = enic_cq_rq(enic, rq);
1669 unsigned int intr = enic_msix_rq_intr(enic, rq);
1670 unsigned int work_to_do = budget;
1671 unsigned int work_done = 0;
1678 work_done = vnic_cq_service(&enic->cq[cq],
1679 work_to_do, enic_rq_service, NULL);
1681 /* Return intr event credits for this polling
1682 * cycle. An intr event is the completion of a
1687 vnic_intr_return_credits(&enic->intr[intr],
1689 0 /* don't unmask intr */,
1690 0 /* don't reset intr timer */);
1692 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
1694 /* Buffer allocation failed. Stay in polling mode
1695 * so we can try to fill the ring again.
1699 work_done = work_to_do;
1700 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1701 /* Call the function which refreshes the intr coalescing timer
1702 * value based on the traffic.
1704 enic_calc_int_moderation(enic, &enic->rq[rq]);
1706 if ((work_done < budget) && napi_complete_done(napi, work_done)) {
1708 /* Some work done, but not enough to stay in polling,
1712 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
1713 enic_set_int_moderation(enic, &enic->rq[rq]);
1714 vnic_intr_unmask(&enic->intr[intr]);
1720 static void enic_notify_timer(struct timer_list *t)
1722 struct enic *enic = from_timer(enic, t, notify_timer);
1724 enic_notify_check(enic);
1726 mod_timer(&enic->notify_timer,
1727 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1730 static void enic_free_intr(struct enic *enic)
1732 struct net_device *netdev = enic->netdev;
1735 enic_free_rx_cpu_rmap(enic);
1736 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1737 case VNIC_DEV_INTR_MODE_INTX:
1738 free_irq(enic->pdev->irq, netdev);
1740 case VNIC_DEV_INTR_MODE_MSI:
1741 free_irq(enic->pdev->irq, enic);
1743 case VNIC_DEV_INTR_MODE_MSIX:
1744 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1745 if (enic->msix[i].requested)
1746 free_irq(enic->msix_entry[i].vector,
1747 enic->msix[i].devid);
1754 static int enic_request_intr(struct enic *enic)
1756 struct net_device *netdev = enic->netdev;
1757 unsigned int i, intr;
1760 enic_set_rx_cpu_rmap(enic);
1761 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1763 case VNIC_DEV_INTR_MODE_INTX:
1765 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1766 IRQF_SHARED, netdev->name, netdev);
1769 case VNIC_DEV_INTR_MODE_MSI:
1771 err = request_irq(enic->pdev->irq, enic_isr_msi,
1772 0, netdev->name, enic);
1775 case VNIC_DEV_INTR_MODE_MSIX:
1777 for (i = 0; i < enic->rq_count; i++) {
1778 intr = enic_msix_rq_intr(enic, i);
1779 snprintf(enic->msix[intr].devname,
1780 sizeof(enic->msix[intr].devname),
1781 "%s-rx-%u", netdev->name, i);
1782 enic->msix[intr].isr = enic_isr_msix;
1783 enic->msix[intr].devid = &enic->napi[i];
1786 for (i = 0; i < enic->wq_count; i++) {
1787 int wq = enic_cq_wq(enic, i);
1789 intr = enic_msix_wq_intr(enic, i);
1790 snprintf(enic->msix[intr].devname,
1791 sizeof(enic->msix[intr].devname),
1792 "%s-tx-%u", netdev->name, i);
1793 enic->msix[intr].isr = enic_isr_msix;
1794 enic->msix[intr].devid = &enic->napi[wq];
1797 intr = enic_msix_err_intr(enic);
1798 snprintf(enic->msix[intr].devname,
1799 sizeof(enic->msix[intr].devname),
1800 "%s-err", netdev->name);
1801 enic->msix[intr].isr = enic_isr_msix_err;
1802 enic->msix[intr].devid = enic;
1804 intr = enic_msix_notify_intr(enic);
1805 snprintf(enic->msix[intr].devname,
1806 sizeof(enic->msix[intr].devname),
1807 "%s-notify", netdev->name);
1808 enic->msix[intr].isr = enic_isr_msix_notify;
1809 enic->msix[intr].devid = enic;
1811 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1812 enic->msix[i].requested = 0;
1814 for (i = 0; i < enic->intr_count; i++) {
1815 err = request_irq(enic->msix_entry[i].vector,
1816 enic->msix[i].isr, 0,
1817 enic->msix[i].devname,
1818 enic->msix[i].devid);
1820 enic_free_intr(enic);
1823 enic->msix[i].requested = 1;
1835 static void enic_synchronize_irqs(struct enic *enic)
1839 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1840 case VNIC_DEV_INTR_MODE_INTX:
1841 case VNIC_DEV_INTR_MODE_MSI:
1842 synchronize_irq(enic->pdev->irq);
1844 case VNIC_DEV_INTR_MODE_MSIX:
1845 for (i = 0; i < enic->intr_count; i++)
1846 synchronize_irq(enic->msix_entry[i].vector);
1853 static void enic_set_rx_coal_setting(struct enic *enic)
1857 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
1859 /* 1. Read the link speed from fw
1860 * 2. Pick the default range for the speed
1861 * 3. Update it in enic->rx_coalesce_setting
1863 speed = vnic_dev_port_speed(enic->vdev);
1864 if (ENIC_LINK_SPEED_10G < speed)
1865 index = ENIC_LINK_40G_INDEX;
1866 else if (ENIC_LINK_SPEED_4G < speed)
1867 index = ENIC_LINK_10G_INDEX;
1869 index = ENIC_LINK_4G_INDEX;
1871 rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
1872 rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
1873 rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
1875 /* Start with the value provided by UCSM */
1876 for (index = 0; index < enic->rq_count; index++)
1877 enic->cq[index].cur_rx_coal_timeval =
1878 enic->config.intr_timer_usec;
1880 rx_coal->use_adaptive_rx_coalesce = 1;
1883 static int enic_dev_notify_set(struct enic *enic)
1887 spin_lock_bh(&enic->devcmd_lock);
1888 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1889 case VNIC_DEV_INTR_MODE_INTX:
1890 err = vnic_dev_notify_set(enic->vdev,
1891 enic_legacy_notify_intr());
1893 case VNIC_DEV_INTR_MODE_MSIX:
1894 err = vnic_dev_notify_set(enic->vdev,
1895 enic_msix_notify_intr(enic));
1898 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1901 spin_unlock_bh(&enic->devcmd_lock);
1906 static void enic_notify_timer_start(struct enic *enic)
1908 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1909 case VNIC_DEV_INTR_MODE_MSI:
1910 mod_timer(&enic->notify_timer, jiffies);
1913 /* Using intr for notification for INTx/MSI-X */
1918 /* rtnl lock is held, process context */
1919 static int enic_open(struct net_device *netdev)
1921 struct enic *enic = netdev_priv(netdev);
1925 err = enic_request_intr(enic);
1927 netdev_err(netdev, "Unable to request irq.\n");
1930 enic_init_affinity_hint(enic);
1931 enic_set_affinity_hint(enic);
1933 err = enic_dev_notify_set(enic);
1936 "Failed to alloc notify buffer, aborting.\n");
1937 goto err_out_free_intr;
1940 for (i = 0; i < enic->rq_count; i++) {
1941 /* enable rq before updating rq desc */
1942 vnic_rq_enable(&enic->rq[i]);
1943 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1944 /* Need at least one buffer on ring to get going */
1945 if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
1946 netdev_err(netdev, "Unable to alloc receive buffers\n");
1948 goto err_out_free_rq;
1952 for (i = 0; i < enic->wq_count; i++)
1953 vnic_wq_enable(&enic->wq[i]);
1955 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
1956 enic_dev_add_station_addr(enic);
1958 enic_set_rx_mode(netdev);
1960 netif_tx_wake_all_queues(netdev);
1962 for (i = 0; i < enic->rq_count; i++)
1963 napi_enable(&enic->napi[i]);
1965 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
1966 for (i = 0; i < enic->wq_count; i++)
1967 napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
1968 enic_dev_enable(enic);
1970 for (i = 0; i < enic->intr_count; i++)
1971 vnic_intr_unmask(&enic->intr[i]);
1973 enic_notify_timer_start(enic);
1974 enic_rfs_timer_start(enic);
1979 for (i = 0; i < enic->rq_count; i++) {
1980 ret = vnic_rq_disable(&enic->rq[i]);
1982 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1984 enic_dev_notify_unset(enic);
1986 enic_unset_affinity_hint(enic);
1987 enic_free_intr(enic);
1992 /* rtnl lock is held, process context */
1993 static int enic_stop(struct net_device *netdev)
1995 struct enic *enic = netdev_priv(netdev);
1999 for (i = 0; i < enic->intr_count; i++) {
2000 vnic_intr_mask(&enic->intr[i]);
2001 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
2004 enic_synchronize_irqs(enic);
2006 del_timer_sync(&enic->notify_timer);
2007 enic_rfs_flw_tbl_free(enic);
2009 enic_dev_disable(enic);
2011 for (i = 0; i < enic->rq_count; i++)
2012 napi_disable(&enic->napi[i]);
2014 netif_carrier_off(netdev);
2015 netif_tx_disable(netdev);
2016 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2017 for (i = 0; i < enic->wq_count; i++)
2018 napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
2020 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
2021 enic_dev_del_station_addr(enic);
2023 for (i = 0; i < enic->wq_count; i++) {
2024 err = vnic_wq_disable(&enic->wq[i]);
2028 for (i = 0; i < enic->rq_count; i++) {
2029 err = vnic_rq_disable(&enic->rq[i]);
2034 enic_dev_notify_unset(enic);
2035 enic_unset_affinity_hint(enic);
2036 enic_free_intr(enic);
2038 for (i = 0; i < enic->wq_count; i++)
2039 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
2040 for (i = 0; i < enic->rq_count; i++)
2041 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
2042 for (i = 0; i < enic->cq_count; i++)
2043 vnic_cq_clean(&enic->cq[i]);
2044 for (i = 0; i < enic->intr_count; i++)
2045 vnic_intr_clean(&enic->intr[i]);
2050 static int enic_change_mtu(struct net_device *netdev, int new_mtu)
2052 struct enic *enic = netdev_priv(netdev);
2053 int running = netif_running(netdev);
2055 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2061 netdev->mtu = new_mtu;
2063 if (netdev->mtu > enic->port_mtu)
2065 "interface MTU (%d) set higher than port MTU (%d)\n",
2066 netdev->mtu, enic->port_mtu);
2074 static void enic_change_mtu_work(struct work_struct *work)
2076 struct enic *enic = container_of(work, struct enic, change_mtu_work);
2077 struct net_device *netdev = enic->netdev;
2078 int new_mtu = vnic_dev_mtu(enic->vdev);
2082 new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
2087 del_timer_sync(&enic->notify_timer);
2089 for (i = 0; i < enic->rq_count; i++)
2090 napi_disable(&enic->napi[i]);
2092 vnic_intr_mask(&enic->intr[0]);
2093 enic_synchronize_irqs(enic);
2094 err = vnic_rq_disable(&enic->rq[0]);
2097 netdev_err(netdev, "Unable to disable RQ.\n");
2100 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
2101 vnic_cq_clean(&enic->cq[0]);
2102 vnic_intr_clean(&enic->intr[0]);
2104 /* Fill RQ with new_mtu-sized buffers */
2105 netdev->mtu = new_mtu;
2106 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
2107 /* Need at least one buffer on ring to get going */
2108 if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
2110 netdev_err(netdev, "Unable to alloc receive buffers.\n");
2115 vnic_rq_enable(&enic->rq[0]);
2116 napi_enable(&enic->napi[0]);
2117 vnic_intr_unmask(&enic->intr[0]);
2118 enic_notify_timer_start(enic);
2122 netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
2125 #ifdef CONFIG_NET_POLL_CONTROLLER
2126 static void enic_poll_controller(struct net_device *netdev)
2128 struct enic *enic = netdev_priv(netdev);
2129 struct vnic_dev *vdev = enic->vdev;
2130 unsigned int i, intr;
2132 switch (vnic_dev_get_intr_mode(vdev)) {
2133 case VNIC_DEV_INTR_MODE_MSIX:
2134 for (i = 0; i < enic->rq_count; i++) {
2135 intr = enic_msix_rq_intr(enic, i);
2136 enic_isr_msix(enic->msix_entry[intr].vector,
2140 for (i = 0; i < enic->wq_count; i++) {
2141 intr = enic_msix_wq_intr(enic, i);
2142 enic_isr_msix(enic->msix_entry[intr].vector,
2143 &enic->napi[enic_cq_wq(enic, i)]);
2147 case VNIC_DEV_INTR_MODE_MSI:
2148 enic_isr_msi(enic->pdev->irq, enic);
2150 case VNIC_DEV_INTR_MODE_INTX:
2151 enic_isr_legacy(enic->pdev->irq, netdev);
2159 static int enic_dev_wait(struct vnic_dev *vdev,
2160 int (*start)(struct vnic_dev *, int),
2161 int (*finished)(struct vnic_dev *, int *),
2168 BUG_ON(in_interrupt());
2170 err = start(vdev, arg);
2174 /* Wait for func to complete...2 seconds max
2177 time = jiffies + (HZ * 2);
2180 err = finished(vdev, &done);
2187 schedule_timeout_uninterruptible(HZ / 10);
2189 } while (time_after(time, jiffies));
2194 static int enic_dev_open(struct enic *enic)
2197 u32 flags = CMD_OPENF_IG_DESCCACHE;
2199 err = enic_dev_wait(enic->vdev, vnic_dev_open,
2200 vnic_dev_open_done, flags);
2202 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
2208 static int enic_dev_soft_reset(struct enic *enic)
2212 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
2213 vnic_dev_soft_reset_done, 0);
2215 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
2221 static int enic_dev_hang_reset(struct enic *enic)
2225 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset,
2226 vnic_dev_hang_reset_done, 0);
2228 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n",
2234 int __enic_set_rsskey(struct enic *enic)
2236 union vnic_rss_key *rss_key_buf_va;
2237 dma_addr_t rss_key_buf_pa;
2238 int i, kidx, bidx, err;
2240 rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
2241 sizeof(union vnic_rss_key),
2243 if (!rss_key_buf_va)
2246 for (i = 0; i < ENIC_RSS_LEN; i++) {
2247 kidx = i / ENIC_RSS_BYTES_PER_KEY;
2248 bidx = i % ENIC_RSS_BYTES_PER_KEY;
2249 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i];
2251 spin_lock_bh(&enic->devcmd_lock);
2252 err = enic_set_rss_key(enic,
2254 sizeof(union vnic_rss_key));
2255 spin_unlock_bh(&enic->devcmd_lock);
2257 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
2258 rss_key_buf_va, rss_key_buf_pa);
2263 static int enic_set_rsskey(struct enic *enic)
2265 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN);
2267 return __enic_set_rsskey(enic);
2270 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2272 dma_addr_t rss_cpu_buf_pa;
2273 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2277 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
2278 sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
2279 if (!rss_cpu_buf_va)
2282 for (i = 0; i < (1 << rss_hash_bits); i++)
2283 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
2285 spin_lock_bh(&enic->devcmd_lock);
2286 err = enic_set_rss_cpu(enic,
2288 sizeof(union vnic_rss_cpu));
2289 spin_unlock_bh(&enic->devcmd_lock);
2291 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
2292 rss_cpu_buf_va, rss_cpu_buf_pa);
2297 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
2298 u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable)
2300 const u8 tso_ipid_split_en = 0;
2301 const u8 ig_vlan_strip_en = 1;
2304 /* Enable VLAN tag stripping.
2307 spin_lock_bh(&enic->devcmd_lock);
2308 err = enic_set_nic_cfg(enic,
2309 rss_default_cpu, rss_hash_type,
2310 rss_hash_bits, rss_base_cpu,
2311 rss_enable, tso_ipid_split_en,
2313 spin_unlock_bh(&enic->devcmd_lock);
2318 static int enic_set_rss_nic_cfg(struct enic *enic)
2320 struct device *dev = enic_get_dev(enic);
2321 const u8 rss_default_cpu = 0;
2322 const u8 rss_hash_bits = 7;
2323 const u8 rss_base_cpu = 0;
2326 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
2328 spin_lock_bh(&enic->devcmd_lock);
2329 res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
2330 spin_unlock_bh(&enic->devcmd_lock);
2332 /* defaults for old adapters
2334 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
2335 NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
2336 NIC_CFG_RSS_HASH_TYPE_IPV6 |
2337 NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
2341 if (!enic_set_rsskey(enic)) {
2342 if (enic_set_rsscpu(enic, rss_hash_bits)) {
2344 dev_warn(dev, "RSS disabled, "
2345 "Failed to set RSS cpu indirection table.");
2349 dev_warn(dev, "RSS disabled, Failed to set RSS key.\n");
2353 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
2354 rss_hash_bits, rss_base_cpu, rss_enable);
2357 static void enic_reset(struct work_struct *work)
2359 struct enic *enic = container_of(work, struct enic, reset);
2361 if (!netif_running(enic->netdev))
2366 spin_lock(&enic->enic_api_lock);
2367 enic_stop(enic->netdev);
2368 enic_dev_soft_reset(enic);
2369 enic_reset_addr_lists(enic);
2370 enic_init_vnic_resources(enic);
2371 enic_set_rss_nic_cfg(enic);
2372 enic_dev_set_ig_vlan_rewrite_mode(enic);
2373 enic_open(enic->netdev);
2374 spin_unlock(&enic->enic_api_lock);
2375 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2380 static void enic_tx_hang_reset(struct work_struct *work)
2382 struct enic *enic = container_of(work, struct enic, tx_hang_reset);
2386 spin_lock(&enic->enic_api_lock);
2387 enic_dev_hang_notify(enic);
2388 enic_stop(enic->netdev);
2389 enic_dev_hang_reset(enic);
2390 enic_reset_addr_lists(enic);
2391 enic_init_vnic_resources(enic);
2392 enic_set_rss_nic_cfg(enic);
2393 enic_dev_set_ig_vlan_rewrite_mode(enic);
2394 enic_open(enic->netdev);
2395 spin_unlock(&enic->enic_api_lock);
2396 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
2401 static int enic_set_intr_mode(struct enic *enic)
2403 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
2404 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
2407 /* Set interrupt mode (INTx, MSI, MSI-X) depending
2408 * on system capabilities.
2412 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
2413 * (the second to last INTR is used for WQ/RQ errors)
2414 * (the last INTR is used for notifications)
2417 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
2418 for (i = 0; i < n + m + 2; i++)
2419 enic->msix_entry[i].entry = i;
2421 /* Use multiple RQs if RSS is enabled
2424 if (ENIC_SETTING(enic, RSS) &&
2425 enic->config.intr_mode < 1 &&
2426 enic->rq_count >= n &&
2427 enic->wq_count >= m &&
2428 enic->cq_count >= n + m &&
2429 enic->intr_count >= n + m + 2) {
2431 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2432 n + m + 2, n + m + 2) > 0) {
2436 enic->cq_count = n + m;
2437 enic->intr_count = n + m + 2;
2439 vnic_dev_set_intr_mode(enic->vdev,
2440 VNIC_DEV_INTR_MODE_MSIX);
2446 if (enic->config.intr_mode < 1 &&
2447 enic->rq_count >= 1 &&
2448 enic->wq_count >= m &&
2449 enic->cq_count >= 1 + m &&
2450 enic->intr_count >= 1 + m + 2) {
2451 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
2452 1 + m + 2, 1 + m + 2) > 0) {
2456 enic->cq_count = 1 + m;
2457 enic->intr_count = 1 + m + 2;
2459 vnic_dev_set_intr_mode(enic->vdev,
2460 VNIC_DEV_INTR_MODE_MSIX);
2468 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
2471 if (enic->config.intr_mode < 2 &&
2472 enic->rq_count >= 1 &&
2473 enic->wq_count >= 1 &&
2474 enic->cq_count >= 2 &&
2475 enic->intr_count >= 1 &&
2476 !pci_enable_msi(enic->pdev)) {
2481 enic->intr_count = 1;
2483 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
2490 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
2491 * (the first INTR is used for WQ/RQ)
2492 * (the second INTR is used for WQ/RQ errors)
2493 * (the last INTR is used for notifications)
2496 if (enic->config.intr_mode < 3 &&
2497 enic->rq_count >= 1 &&
2498 enic->wq_count >= 1 &&
2499 enic->cq_count >= 2 &&
2500 enic->intr_count >= 3) {
2505 enic->intr_count = 3;
2507 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
2512 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2517 static void enic_clear_intr_mode(struct enic *enic)
2519 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2520 case VNIC_DEV_INTR_MODE_MSIX:
2521 pci_disable_msix(enic->pdev);
2523 case VNIC_DEV_INTR_MODE_MSI:
2524 pci_disable_msi(enic->pdev);
2530 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
2533 static const struct net_device_ops enic_netdev_dynamic_ops = {
2534 .ndo_open = enic_open,
2535 .ndo_stop = enic_stop,
2536 .ndo_start_xmit = enic_hard_start_xmit,
2537 .ndo_get_stats64 = enic_get_stats,
2538 .ndo_validate_addr = eth_validate_addr,
2539 .ndo_set_rx_mode = enic_set_rx_mode,
2540 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2541 .ndo_change_mtu = enic_change_mtu,
2542 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2543 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2544 .ndo_tx_timeout = enic_tx_timeout,
2545 .ndo_set_vf_port = enic_set_vf_port,
2546 .ndo_get_vf_port = enic_get_vf_port,
2547 .ndo_set_vf_mac = enic_set_vf_mac,
2548 #ifdef CONFIG_NET_POLL_CONTROLLER
2549 .ndo_poll_controller = enic_poll_controller,
2551 #ifdef CONFIG_RFS_ACCEL
2552 .ndo_rx_flow_steer = enic_rx_flow_steer,
2554 .ndo_udp_tunnel_add = enic_udp_tunnel_add,
2555 .ndo_udp_tunnel_del = enic_udp_tunnel_del,
2556 .ndo_features_check = enic_features_check,
2559 static const struct net_device_ops enic_netdev_ops = {
2560 .ndo_open = enic_open,
2561 .ndo_stop = enic_stop,
2562 .ndo_start_xmit = enic_hard_start_xmit,
2563 .ndo_get_stats64 = enic_get_stats,
2564 .ndo_validate_addr = eth_validate_addr,
2565 .ndo_set_mac_address = enic_set_mac_address,
2566 .ndo_set_rx_mode = enic_set_rx_mode,
2567 .ndo_change_mtu = enic_change_mtu,
2568 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2569 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2570 .ndo_tx_timeout = enic_tx_timeout,
2571 .ndo_set_vf_port = enic_set_vf_port,
2572 .ndo_get_vf_port = enic_get_vf_port,
2573 .ndo_set_vf_mac = enic_set_vf_mac,
2574 #ifdef CONFIG_NET_POLL_CONTROLLER
2575 .ndo_poll_controller = enic_poll_controller,
2577 #ifdef CONFIG_RFS_ACCEL
2578 .ndo_rx_flow_steer = enic_rx_flow_steer,
2580 .ndo_udp_tunnel_add = enic_udp_tunnel_add,
2581 .ndo_udp_tunnel_del = enic_udp_tunnel_del,
2582 .ndo_features_check = enic_features_check,
2585 static void enic_dev_deinit(struct enic *enic)
2589 for (i = 0; i < enic->rq_count; i++) {
2590 napi_hash_del(&enic->napi[i]);
2591 netif_napi_del(&enic->napi[i]);
2593 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
2594 for (i = 0; i < enic->wq_count; i++)
2595 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
2597 enic_free_vnic_resources(enic);
2598 enic_clear_intr_mode(enic);
2599 enic_free_affinity_hint(enic);
2602 static void enic_kdump_kernel_config(struct enic *enic)
2604 if (is_kdump_kernel()) {
2605 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n");
2608 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS;
2609 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS;
2610 enic->config.mtu = min_t(u16, 1500, enic->config.mtu);
2614 static int enic_dev_init(struct enic *enic)
2616 struct device *dev = enic_get_dev(enic);
2617 struct net_device *netdev = enic->netdev;
2621 /* Get interrupt coalesce timer info */
2622 err = enic_dev_intr_coal_timer_info(enic);
2624 dev_warn(dev, "Using default conversion factor for "
2625 "interrupt coalesce timer\n");
2626 vnic_dev_intr_coal_timer_info_default(enic->vdev);
2629 /* Get vNIC configuration
2632 err = enic_get_vnic_config(enic);
2634 dev_err(dev, "Get vNIC configuration failed, aborting\n");
2638 /* Get available resource counts
2641 enic_get_res_counts(enic);
2643 /* modify resource count if we are in kdump_kernel
2645 enic_kdump_kernel_config(enic);
2647 /* Set interrupt mode based on resource counts and system
2651 err = enic_set_intr_mode(enic);
2653 dev_err(dev, "Failed to set intr mode based on resource "
2654 "counts and system capabilities, aborting\n");
2658 /* Allocate and configure vNIC resources
2661 err = enic_alloc_vnic_resources(enic);
2663 dev_err(dev, "Failed to alloc vNIC resources, aborting\n");
2664 goto err_out_free_vnic_resources;
2667 enic_init_vnic_resources(enic);
2669 err = enic_set_rss_nic_cfg(enic);
2671 dev_err(dev, "Failed to config nic, aborting\n");
2672 goto err_out_free_vnic_resources;
2675 switch (vnic_dev_get_intr_mode(enic->vdev)) {
2677 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
2679 case VNIC_DEV_INTR_MODE_MSIX:
2680 for (i = 0; i < enic->rq_count; i++) {
2681 netif_napi_add(netdev, &enic->napi[i],
2682 enic_poll_msix_rq, NAPI_POLL_WEIGHT);
2684 for (i = 0; i < enic->wq_count; i++)
2685 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
2686 enic_poll_msix_wq, NAPI_POLL_WEIGHT);
2692 err_out_free_vnic_resources:
2693 enic_free_affinity_hint(enic);
2694 enic_clear_intr_mode(enic);
2695 enic_free_vnic_resources(enic);
2700 static void enic_iounmap(struct enic *enic)
2704 for (i = 0; i < ARRAY_SIZE(enic->bar); i++)
2705 if (enic->bar[i].vaddr)
2706 iounmap(enic->bar[i].vaddr);
2709 static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2711 struct device *dev = &pdev->dev;
2712 struct net_device *netdev;
2717 #ifdef CONFIG_PCI_IOV
2722 /* Allocate net device structure and initialize. Private
2723 * instance data is initialized to zero.
2726 netdev = alloc_etherdev_mqs(sizeof(struct enic),
2727 ENIC_RQ_MAX, ENIC_WQ_MAX);
2731 pci_set_drvdata(pdev, netdev);
2733 SET_NETDEV_DEV(netdev, &pdev->dev);
2735 enic = netdev_priv(netdev);
2736 enic->netdev = netdev;
2739 /* Setup PCI resources
2742 err = pci_enable_device_mem(pdev);
2744 dev_err(dev, "Cannot enable PCI device, aborting\n");
2745 goto err_out_free_netdev;
2748 err = pci_request_regions(pdev, DRV_NAME);
2750 dev_err(dev, "Cannot request PCI regions, aborting\n");
2751 goto err_out_disable_device;
2754 pci_set_master(pdev);
2756 /* Query PCI controller on system for DMA addressing
2757 * limitation for the device. Try 47-bit first, and
2761 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
2763 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2765 dev_err(dev, "No usable DMA configuration, aborting\n");
2766 goto err_out_release_regions;
2768 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2770 dev_err(dev, "Unable to obtain %u-bit DMA "
2771 "for consistent allocations, aborting\n", 32);
2772 goto err_out_release_regions;
2775 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
2777 dev_err(dev, "Unable to obtain %u-bit DMA "
2778 "for consistent allocations, aborting\n", 47);
2779 goto err_out_release_regions;
2784 /* Map vNIC resources from BAR0-5
2787 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) {
2788 if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
2790 enic->bar[i].len = pci_resource_len(pdev, i);
2791 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len);
2792 if (!enic->bar[i].vaddr) {
2793 dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i);
2795 goto err_out_iounmap;
2797 enic->bar[i].bus_addr = pci_resource_start(pdev, i);
2800 /* Register vNIC device
2803 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar,
2804 ARRAY_SIZE(enic->bar));
2806 dev_err(dev, "vNIC registration failed, aborting\n");
2808 goto err_out_iounmap;
2811 err = vnic_devcmd_init(enic->vdev);
2814 goto err_out_vnic_unregister;
2816 #ifdef CONFIG_PCI_IOV
2817 /* Get number of subvnics */
2818 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
2820 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
2822 if (enic->num_vfs) {
2823 err = pci_enable_sriov(pdev, enic->num_vfs);
2825 dev_err(dev, "SRIOV enable failed, aborting."
2826 " pci_enable_sriov() returned %d\n",
2828 goto err_out_vnic_unregister;
2830 enic->priv_flags |= ENIC_SRIOV_ENABLED;
2831 num_pps = enic->num_vfs;
2836 /* Allocate structure for port profiles */
2837 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
2840 goto err_out_disable_sriov_pp;
2843 /* Issue device open to get device in known state
2846 err = enic_dev_open(enic);
2848 dev_err(dev, "vNIC dev open failed, aborting\n");
2849 goto err_out_disable_sriov;
2852 /* Setup devcmd lock
2855 spin_lock_init(&enic->devcmd_lock);
2856 spin_lock_init(&enic->enic_api_lock);
2859 * Set ingress vlan rewrite mode before vnic initialization
2862 err = enic_dev_set_ig_vlan_rewrite_mode(enic);
2865 "Failed to set ingress vlan rewrite mode, aborting.\n");
2866 goto err_out_dev_close;
2869 /* Issue device init to initialize the vnic-to-switch link.
2870 * We'll start with carrier off and wait for link UP
2871 * notification later to turn on carrier. We don't need
2872 * to wait here for the vnic-to-switch link initialization
2873 * to complete; link UP notification is the indication that
2874 * the process is complete.
2877 netif_carrier_off(netdev);
2879 /* Do not call dev_init for a dynamic vnic.
2880 * For a dynamic vnic, init_prov_info will be
2881 * called later by an upper layer.
2884 if (!enic_is_dynamic(enic)) {
2885 err = vnic_dev_init(enic->vdev, 0);
2887 dev_err(dev, "vNIC dev init failed, aborting\n");
2888 goto err_out_dev_close;
2892 err = enic_dev_init(enic);
2894 dev_err(dev, "Device initialization failed, aborting\n");
2895 goto err_out_dev_close;
2898 netif_set_real_num_tx_queues(netdev, enic->wq_count);
2899 netif_set_real_num_rx_queues(netdev, enic->rq_count);
2901 /* Setup notification timer, HW reset task, and wq locks
2904 timer_setup(&enic->notify_timer, enic_notify_timer, 0);
2906 enic_rfs_flw_tbl_init(enic);
2907 enic_set_rx_coal_setting(enic);
2908 INIT_WORK(&enic->reset, enic_reset);
2909 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
2910 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
2912 for (i = 0; i < enic->wq_count; i++)
2913 spin_lock_init(&enic->wq_lock[i]);
2915 /* Register net device
2918 enic->port_mtu = enic->config.mtu;
2919 (void)enic_change_mtu(netdev, enic->port_mtu);
2921 err = enic_set_mac_addr(netdev, enic->mac_addr);
2923 dev_err(dev, "Invalid MAC address, aborting\n");
2924 goto err_out_dev_deinit;
2927 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2928 /* rx coalesce time already got initialized. This gets used
2929 * if adaptive coal is turned off
2931 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2933 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
2934 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2936 netdev->netdev_ops = &enic_netdev_ops;
2938 netdev->watchdog_timeo = 2 * HZ;
2939 enic_set_ethtool_ops(netdev);
2941 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2942 if (ENIC_SETTING(enic, LOOP)) {
2943 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2944 enic->loop_enable = 1;
2945 enic->loop_tag = enic->config.loop_tag;
2946 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
2948 if (ENIC_SETTING(enic, TXCSUM))
2949 netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2950 if (ENIC_SETTING(enic, TSO))
2951 netdev->hw_features |= NETIF_F_TSO |
2952 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
2953 if (ENIC_SETTING(enic, RSS))
2954 netdev->hw_features |= NETIF_F_RXHASH;
2955 if (ENIC_SETTING(enic, RXCSUM))
2956 netdev->hw_features |= NETIF_F_RXCSUM;
2957 if (ENIC_SETTING(enic, VXLAN)) {
2961 netdev->hw_enc_features |= NETIF_F_RXCSUM |
2965 NETIF_F_GSO_UDP_TUNNEL |
2967 NETIF_F_GSO_UDP_TUNNEL_CSUM;
2968 netdev->hw_features |= netdev->hw_enc_features;
2969 /* get bit mask from hw about supported offload bit level
2970 * BIT(0) = fw supports patch_level 0
2972 * fcoe_fc_crc_ok = outer csum ok
2973 * BIT(1) = always set by fw
2974 * BIT(2) = fw supports patch_level 2
2975 * BIT(0) in rss_hash = encap
2976 * BIT(1,2) in rss_hash = outer_ip_csum_ok/
2978 * used in enic_rq_indicate_buf
2980 err = vnic_dev_get_supported_feature_ver(enic->vdev,
2985 enic->vxlan.flags = (u8)a1;
2986 /* mask bits that are supported by driver
2988 patch_level &= BIT_ULL(0) | BIT_ULL(2);
2989 patch_level = fls(patch_level);
2990 patch_level = patch_level ? patch_level - 1 : 0;
2991 enic->vxlan.patch_level = patch_level;
2994 netdev->features |= netdev->hw_features;
2995 netdev->vlan_features |= netdev->features;
2997 #ifdef CONFIG_RFS_ACCEL
2998 netdev->hw_features |= NETIF_F_NTUPLE;
3002 netdev->features |= NETIF_F_HIGHDMA;
3004 netdev->priv_flags |= IFF_UNICAST_FLT;
3006 /* MTU range: 68 - 9000 */
3007 netdev->min_mtu = ENIC_MIN_MTU;
3008 netdev->max_mtu = ENIC_MAX_MTU;
3010 err = register_netdev(netdev);
3012 dev_err(dev, "Cannot register net device, aborting\n");
3013 goto err_out_dev_deinit;
3015 enic->rx_copybreak = RX_COPYBREAK_DEFAULT;
3020 enic_dev_deinit(enic);
3022 vnic_dev_close(enic->vdev);
3023 err_out_disable_sriov:
3025 err_out_disable_sriov_pp:
3026 #ifdef CONFIG_PCI_IOV
3027 if (enic_sriov_enabled(enic)) {
3028 pci_disable_sriov(pdev);
3029 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
3032 err_out_vnic_unregister:
3033 vnic_dev_unregister(enic->vdev);
3036 err_out_release_regions:
3037 pci_release_regions(pdev);
3038 err_out_disable_device:
3039 pci_disable_device(pdev);
3040 err_out_free_netdev:
3041 free_netdev(netdev);
3046 static void enic_remove(struct pci_dev *pdev)
3048 struct net_device *netdev = pci_get_drvdata(pdev);
3051 struct enic *enic = netdev_priv(netdev);
3053 cancel_work_sync(&enic->reset);
3054 cancel_work_sync(&enic->change_mtu_work);
3055 unregister_netdev(netdev);
3056 enic_dev_deinit(enic);
3057 vnic_dev_close(enic->vdev);
3058 #ifdef CONFIG_PCI_IOV
3059 if (enic_sriov_enabled(enic)) {
3060 pci_disable_sriov(pdev);
3061 enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
3065 vnic_dev_unregister(enic->vdev);
3067 pci_release_regions(pdev);
3068 pci_disable_device(pdev);
3069 free_netdev(netdev);
3073 static struct pci_driver enic_driver = {
3075 .id_table = enic_id_table,
3076 .probe = enic_probe,
3077 .remove = enic_remove,
3080 static int __init enic_init_module(void)
3082 pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
3084 return pci_register_driver(&enic_driver);
3087 static void __exit enic_cleanup_module(void)
3089 pci_unregister_driver(&enic_driver);
3092 module_init(enic_init_module);
3093 module_exit(enic_cleanup_module);