Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Feb 2013 01:40:58 +0000 (17:40 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Feb 2013 01:40:58 +0000 (17:40 -0800)
Pull trivial tree from Jiri Kosina:
 "Assorted tiny fixes queued in trivial tree"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial: (22 commits)
  DocBook: update EXPORT_SYMBOL entry to point at export.h
  Documentation: update top level 00-INDEX file with new additions
  ARM: at91/ide: remove unsused at91-ide Kconfig entry
  percpu_counter.h: comment code for better readability
  x86, efi: fix comment typo in head_32.S
  IB: cxgb3: delay freeing mem untill entirely done with it
  net: mvneta: remove unneeded version.h include
  time: x86: report_lost_ticks doesn't exist any more
  pcmcia: avoid static analysis complaint about use-after-free
  fs/jfs: Fix typo in comment : 'how may' -> 'how many'
  of: add missing documentation for of_platform_populate()
  btrfs: remove unnecessary cur_trans set before goto loop in join_transaction
  sound: soc: Fix typo in sound/codecs
  treewide: Fix typo in various drivers
  btrfs: fix comment typos
  Update ibmvscsi module name in Kconfig.
  powerpc: fix typo (utilties -> utilities)
  of: fix spelling mistake in comment
  h8300: Fix home page URL in h8300/README
  xtensa: Fix home page URL in Kconfig
  ...

18 files changed:
1  2 
Documentation/DocBook/kernel-hacking.tmpl
arch/xtensa/Kconfig
drivers/gpu/drm/i915/i915_debugfs.c
drivers/ide/Kconfig
drivers/media/platform/soc_camera/mx2_camera.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/scsi/Kconfig
drivers/staging/android/binder.h
drivers/staging/octeon/ethernet.c
fs/btrfs/extent-tree.c
fs/btrfs/transaction.c
fs/ocfs2/cluster/tcp.c
net/bluetooth/hci_core.c
net/sctp/sm_make_chunk.c
sound/soc/codecs/wm8983.c
sound/soc/codecs/wm8985.c

@@@ -945,7 -945,7 +945,7 @@@ printk(KERN_INFO "my ip: %pI4\n", &amp;
  
    <sect1 id="sym-exportsymbols">
     <title><function>EXPORT_SYMBOL()</function>
-     <filename class="headerfile">include/linux/module.h</filename></title>
+     <filename class="headerfile">include/linux/export.h</filename></title>
  
     <para>
      This is the classic method of exporting a symbol: dynamically
  
    <sect1 id="sym-exportsymbols-gpl">
     <title><function>EXPORT_SYMBOL_GPL()</function>
-     <filename class="headerfile">include/linux/module.h</filename></title>
+     <filename class="headerfile">include/linux/export.h</filename></title>
  
     <para>
      Similar to <function>EXPORT_SYMBOL()</function> except that the
@@@ -1184,6 -1184,13 +1184,6 @@@ static struct block_device_operations o
       <filename>Documentation/kbuild/kconfig-language.txt</filename>.
      </para>
  
 -    <para>
 -     You may well want to make your CONFIG option only visible if
 -     <symbol>CONFIG_EXPERIMENTAL</symbol> is enabled: this serves as a
 -     warning to users.  There many other fancy things you can do: see
 -     the various <filename>Kconfig</filename> files for ideas.
 -    </para>
 -
      <para>
       In your description of the option, make sure you address both the
       expert user and the user who knows nothing about your feature.  Mention
diff --combined arch/xtensa/Kconfig
@@@ -22,7 -22,7 +22,7 @@@ config XTENS
          configurable and extensible.  The Linux port to the Xtensa
          architecture supports all processor configurations and extensions,
          with reasonable minimum requirements.  The Xtensa Linux project has
-         a home page at <http://xtensa.sourceforge.net/>.
+         a home page at <http://www.linux-xtensa.org/>.
  
  config RWSEM_XCHGADD_ALGORITHM
        def_bool y
@@@ -132,7 -132,6 +132,7 @@@ choic
  
  config XTENSA_PLATFORM_ISS
        bool "ISS"
 +      depends on TTY
        select XTENSA_CALIBRATE_CCOUNT
        select SERIAL_CONSOLE
        select XTENSA_ISS_NETWORK
@@@ -30,7 -30,6 +30,7 @@@
  #include <linux/debugfs.h>
  #include <linux/slab.h>
  #include <linux/export.h>
 +#include <generated/utsrelease.h>
  #include <drm/drmP.h>
  #include "intel_drv.h"
  #include "intel_ringbuffer.h"
@@@ -691,7 -690,6 +691,7 @@@ static int i915_error_state(struct seq_
  
        seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
 +      seq_printf(m, "Kernel: " UTS_RELEASE);
        seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
        seq_printf(m, "EIR: 0x%08x\n", error->eir);
        seq_printf(m, "IER: 0x%08x\n", error->ier);
@@@ -1460,7 -1458,7 +1460,7 @@@ static const char *swizzle_string(unsig
        case I915_BIT_6_SWIZZLE_9_10_17:
                return "bit9/bit10/bit17";
        case I915_BIT_6_SWIZZLE_UNKNOWN:
-               return "unkown";
+               return "unknown";
        }
  
        return "bug";
diff --combined drivers/ide/Kconfig
@@@ -322,7 -322,8 +322,7 @@@ config BLK_DEV_GENERI
            which otherwise might not be supported.
  
  config BLK_DEV_OPTI621
 -      tristate "OPTi 82C621 chipset enhanced support (EXPERIMENTAL)"
 -      depends on EXPERIMENTAL
 +      tristate "OPTi 82C621 chipset enhanced support"
        select BLK_DEV_IDEPCI
        help
          This is a driver for the OPTi 82C621 EIDE controller.
@@@ -416,6 -417,7 +416,6 @@@ config BLK_DEV_CY82C69
  
  config BLK_DEV_CS5520
        tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
 -      depends on EXPERIMENTAL
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@@ -700,11 -702,6 +700,6 @@@ config BLK_DEV_IDE_TX493
        depends on SOC_TX4939
        select BLK_DEV_IDEDMA_SFF
  
- config BLK_DEV_IDE_AT91
-       tristate "Atmel AT91 (SAM9, CAP9, AT572D940HF) IDE support"
-       depends on ARM && ARCH_AT91 && !ARCH_AT91RM9200 && !ARCH_AT91X40
-       select IDE_TIMINGS
  config BLK_DEV_IDE_ICSIDE
        tristate "ICS IDE interface support"
        depends on ARM && ARCH_ACORN
@@@ -759,8 -756,8 +754,8 @@@ config BLK_DEV_GAYL
          use Gayle IDE interfaces on the Zorro expansion bus.
  
  config BLK_DEV_BUDDHA
 -      tristate "Buddha/Catweasel/X-Surf IDE interface support (EXPERIMENTAL)"
 -      depends on ZORRO && EXPERIMENTAL
 +      tristate "Buddha/Catweasel/X-Surf IDE interface support"
 +      depends on ZORRO
        help
          This is the IDE driver for the IDE interfaces on the Buddha, Catweasel
          and X-Surf expansion boards.  It supports up to two interfaces on the
@@@ -345,7 -345,7 +345,7 @@@ static struct mx2_fmt_cfg mx27_emma_prp
        /*
         * This is a generic configuration which is valid for most
         * prp input-output format combinations.
-        * We set the incomming and outgoing pixelformat to a
+        * We set the incoming and outgoing pixelformat to a
         * 16 Bit wide format and adjust the bytesperline
         * accordingly. With this configuration the inputdata
         * will not be changed by the emma and could be any type
@@@ -1707,9 -1707,9 +1707,9 @@@ static int mx27_camera_emma_init(struc
                goto out;
        }
  
 -      pcdev->base_emma = devm_request_and_ioremap(pcdev->dev, res_emma);
 -      if (!pcdev->base_emma) {
 -              err = -EADDRNOTAVAIL;
 +      pcdev->base_emma = devm_ioremap_resource(pcdev->dev, res_emma);
 +      if (IS_ERR(pcdev->base_emma)) {
 +              err = PTR_ERR(pcdev->base_emma);
                goto out;
        }
  
@@@ -1824,9 -1824,9 +1824,9 @@@ static int mx2_camera_probe(struct plat
        INIT_LIST_HEAD(&pcdev->discard);
        spin_lock_init(&pcdev->lock);
  
 -      pcdev->base_csi = devm_request_and_ioremap(&pdev->dev, res_csi);
 -      if (!pcdev->base_csi) {
 -              err = -EADDRNOTAVAIL;
 +      pcdev->base_csi = devm_ioremap_resource(&pdev->dev, res_csi);
 +      if (IS_ERR(pcdev->base_csi)) {
 +              err = PTR_ERR(pcdev->base_csi);
                goto exit;
        }
  
@@@ -1,6 -1,6 +1,6 @@@
  /* bnx2x_main.c: Broadcom Everest network driver.
   *
 - * Copyright (c) 2007-2012 Broadcom Corporation
 + * Copyright (c) 2007-2013 Broadcom Corporation
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
@@@ -59,7 -59,6 +59,7 @@@
  #include "bnx2x_init.h"
  #include "bnx2x_init_ops.h"
  #include "bnx2x_cmn.h"
 +#include "bnx2x_vfpf.h"
  #include "bnx2x_dcb.h"
  #include "bnx2x_sp.h"
  
@@@ -145,49 -144,39 +145,49 @@@ enum bnx2x_board_type 
        BCM57711E,
        BCM57712,
        BCM57712_MF,
 +      BCM57712_VF,
        BCM57800,
        BCM57800_MF,
 +      BCM57800_VF,
        BCM57810,
        BCM57810_MF,
 -      BCM57840_O,
 +      BCM57810_VF,
        BCM57840_4_10,
        BCM57840_2_20,
 -      BCM57840_MFO,
        BCM57840_MF,
 +      BCM57840_VF,
        BCM57811,
 -      BCM57811_MF
 +      BCM57811_MF,
 +      BCM57840_O,
 +      BCM57840_MFO,
 +      BCM57811_VF
  };
  
  /* indexed by board_type, above */
  static struct {
        char *name;
  } board_info[] = {
 -      { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
 -      { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
 -      { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
 -      { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
 -      { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
 -      { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
 -      { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
 -      { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 -      { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 -      { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
 -      { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
 -      { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
 -      { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
 -      { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
 -      { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
 -      { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
 +      [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
 +      [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
 +      [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
 +      [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
 +      [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
 +      [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
 +      [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
 +      [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
 +      [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
 +      [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
 +      [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
 +      [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
 +      [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
 +      [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
 +      [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
 +      [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
 +      [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
 +      [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
 +      [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
 +      [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
 +      [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
  };
  
  #ifndef PCI_DEVICE_ID_NX2_57710
  #ifndef PCI_DEVICE_ID_NX2_57712_MF
  #define PCI_DEVICE_ID_NX2_57712_MF    CHIP_NUM_57712_MF
  #endif
 +#ifndef PCI_DEVICE_ID_NX2_57712_VF
 +#define PCI_DEVICE_ID_NX2_57712_VF    CHIP_NUM_57712_VF
 +#endif
  #ifndef PCI_DEVICE_ID_NX2_57800
  #define PCI_DEVICE_ID_NX2_57800               CHIP_NUM_57800
  #endif
  #ifndef PCI_DEVICE_ID_NX2_57800_MF
  #define PCI_DEVICE_ID_NX2_57800_MF    CHIP_NUM_57800_MF
  #endif
 +#ifndef PCI_DEVICE_ID_NX2_57800_VF
 +#define PCI_DEVICE_ID_NX2_57800_VF    CHIP_NUM_57800_VF
 +#endif
  #ifndef PCI_DEVICE_ID_NX2_57810
  #define PCI_DEVICE_ID_NX2_57810               CHIP_NUM_57810
  #endif
  #ifndef PCI_DEVICE_ID_NX2_57840_O
  #define PCI_DEVICE_ID_NX2_57840_O     CHIP_NUM_57840_OBSOLETE
  #endif
 +#ifndef PCI_DEVICE_ID_NX2_57810_VF
 +#define PCI_DEVICE_ID_NX2_57810_VF    CHIP_NUM_57810_VF
 +#endif
  #ifndef PCI_DEVICE_ID_NX2_57840_4_10
  #define PCI_DEVICE_ID_NX2_57840_4_10  CHIP_NUM_57840_4_10
  #endif
  #ifndef PCI_DEVICE_ID_NX2_57840_MF
  #define PCI_DEVICE_ID_NX2_57840_MF    CHIP_NUM_57840_MF
  #endif
 +#ifndef PCI_DEVICE_ID_NX2_57840_VF
 +#define PCI_DEVICE_ID_NX2_57840_VF    CHIP_NUM_57840_VF
 +#endif
  #ifndef PCI_DEVICE_ID_NX2_57811
  #define PCI_DEVICE_ID_NX2_57811               CHIP_NUM_57811
  #endif
  #ifndef PCI_DEVICE_ID_NX2_57811_MF
  #define PCI_DEVICE_ID_NX2_57811_MF    CHIP_NUM_57811_MF
  #endif
 +#ifndef PCI_DEVICE_ID_NX2_57811_VF
 +#define PCI_DEVICE_ID_NX2_57811_VF    CHIP_NUM_57811_VF
 +#endif
 +
  static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 +      { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
        { 0 }
  };
  
@@@ -378,65 -346,6 +378,65 @@@ static u32 bnx2x_reg_rd_ind(struct bnx2
  #define DMAE_DP_DST_PCI               "pci dst_addr [%x:%08x]"
  #define DMAE_DP_DST_NONE      "dst_addr [none]"
  
 +void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
 +{
 +      u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
 +
 +      switch (dmae->opcode & DMAE_COMMAND_DST) {
 +      case DMAE_CMD_DST_PCI:
 +              if (src_type == DMAE_CMD_SRC_PCI)
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +                         dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              else
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%08x], len [%d*4], dst [%x:%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_lo >> 2,
 +                         dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              break;
 +      case DMAE_CMD_DST_GRC:
 +              if (src_type == DMAE_CMD_SRC_PCI)
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +                         dmae->len, dmae->dst_addr_lo >> 2,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              else
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src [%08x], len [%d*4], dst [%08x]\n"
 +                         "comp_addr [%x:%08x], comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_lo >> 2,
 +                         dmae->len, dmae->dst_addr_lo >> 2,
 +                         dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              break;
 +      default:
 +              if (src_type == DMAE_CMD_SRC_PCI)
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
 +                         "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 +                         dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              else
 +                      DP(msglvl, "DMAE: opcode 0x%08x\n"
 +                         "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
 +                         "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 +                         dmae->opcode, dmae->src_addr_lo >> 2,
 +                         dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 +                         dmae->comp_val);
 +              break;
 +      }
 +}
  
  /* copy command into DMAE command memory and set DMAE command go */
  void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@@ -487,7 -396,7 +487,7 @@@ u32 bnx2x_dmae_opcode(struct bnx2x *bp
        return opcode;
  }
  
 -static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 +void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
                                      struct dmae_command *dmae,
                                      u8 src_type, u8 dst_type)
  {
        dmae->comp_val = DMAE_COMP_VAL;
  }
  
 -/* issue a dmae command over the init-channel and wailt for completion */
 -static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
 -                                    struct dmae_command *dmae)
 +/* issue a dmae command over the init-channel and wait for completion */
 +int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
  {
        u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
@@@ -782,16 -692,12 +782,16 @@@ void bnx2x_fw_dump_lvl(struct bnx2x *bp
        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
  
        printk("%s", lvl);
 +
 +      /* dump buffer after the mark */
        for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, offset + 4*word));
                data[8] = 0x0;
                pr_cont("%s", (char *)data);
        }
 +
 +      /* dump buffer before the mark */
        for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, offset + 4*word));
@@@ -806,71 -712,7 +806,71 @@@ static void bnx2x_fw_dump(struct bnx2x 
        bnx2x_fw_dump_lvl(bp, KERN_ERR);
  }
  
 -void bnx2x_panic_dump(struct bnx2x *bp)
 +static void bnx2x_hc_int_disable(struct bnx2x *bp)
 +{
 +      int port = BP_PORT(bp);
 +      u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 +      u32 val = REG_RD(bp, addr);
 +
 +      /* in E1 we must use only PCI configuration space to disable
 +       * MSI/MSIX capablility
 +       * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
 +       */
 +      if (CHIP_IS_E1(bp)) {
 +              /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
 +               * Use mask register to prevent from HC sending interrupts
 +               * after we exit the function
 +               */
 +              REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
 +
 +              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                       HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +                       HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +      } else
 +              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 +                       HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 +                       HC_CONFIG_0_REG_INT_LINE_EN_0 |
 +                       HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 +
 +      DP(NETIF_MSG_IFDOWN,
 +         "write %x to HC %d (addr 0x%x)\n",
 +         val, port, addr);
 +
 +      /* flush all outstanding writes */
 +      mmiowb();
 +
 +      REG_WR(bp, addr, val);
 +      if (REG_RD(bp, addr) != val)
 +              BNX2X_ERR("BUG! proper val not read from IGU!\n");
 +}
 +
 +static void bnx2x_igu_int_disable(struct bnx2x *bp)
 +{
 +      u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 +
 +      val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
 +               IGU_PF_CONF_INT_LINE_EN |
 +               IGU_PF_CONF_ATTN_BIT_EN);
 +
 +      DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
 +
 +      /* flush all outstanding writes */
 +      mmiowb();
 +
 +      REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +      if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
 +              BNX2X_ERR("BUG! proper val not read from IGU!\n");
 +}
 +
 +static void bnx2x_int_disable(struct bnx2x *bp)
 +{
 +      if (bp->common.int_block == INT_BLOCK_HC)
 +              bnx2x_hc_int_disable(bp);
 +      else
 +              bnx2x_igu_int_disable(bp);
 +}
 +
 +void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
  {
        int i;
        u16 j;
        u16 start = 0, end = 0;
        u8 cos;
  #endif
 +      if (disable_int)
 +              bnx2x_int_disable(bp);
  
        bp->stats_state = STATS_STATE_DISABLED;
        bp->eth_stats.unrecoverable_error++;
        }
  
  #ifdef BNX2X_STOP_ON_ERROR
 +
 +      /* event queue */
 +      for (i = 0; i < NUM_EQ_DESC; i++) {
 +              u32 *data = (u32 *)&bp->eq_ring[i].message.data;
 +
 +              BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
 +                        i, bp->eq_ring[i].message.opcode,
 +                        bp->eq_ring[i].message.error);
 +              BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
 +      }
 +
        /* Rings */
        /* Rx */
        for_each_valid_rx_queue(bp, i) {
@@@ -1209,8 -1038,8 +1209,8 @@@ static u32 bnx2x_flr_clnup_reg_poll(str
        return val;
  }
  
 -static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
 -                                         char *msg, u32 poll_cnt)
 +int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
 +                                  char *msg, u32 poll_cnt)
  {
        u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
        if (val != 0) {
        return 0;
  }
  
 -static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
 +/* Common routines with VF FLR cleanup */
 +u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
  {
        /* adjust polling timeout */
        if (CHIP_REV_IS_EMUL(bp))
        return FLR_POLL_CNT;
  }
  
 -static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
 +void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
  {
        struct pbf_pN_cmd_regs cmd_regs[] = {
                {0, (CHIP_IS_E3B0(bp)) ?
        (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
  
  
 -static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
 -                                       u32 poll_cnt)
 +int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
  {
 -      struct sdm_op_gen op_gen = {0};
 +      u32 op_gen_command = 0;
  
        u32 comp_addr = BAR_CSTRORM_INTMEM +
                        CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
                return 1;
        }
  
 -      op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
 -      op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
 -      op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
 -      op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
 +      op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
 +      op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
 +      op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
 +      op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
  
        DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
 -      REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
 +      REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
  
        if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
                BNX2X_ERR("FW final cleanup did not succeed\n");
                DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
                   (REG_RD(bp, comp_addr)));
 -              ret = 1;
 +              bnx2x_panic();
 +              return 1;
        }
        /* Zero completion for nxt FLR */
        REG_WR(bp, comp_addr, 0);
        return ret;
  }
  
 -static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
 +u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
  {
        u16 status;
  
@@@ -1554,31 -1382,26 +1554,31 @@@ static void bnx2x_igu_int_enable(struc
        if (msix) {
                val &= ~(IGU_PF_CONF_INT_LINE_EN |
                         IGU_PF_CONF_SINGLE_ISR_EN);
 -              val |= (IGU_PF_CONF_FUNC_EN |
 -                      IGU_PF_CONF_MSI_MSIX_EN |
 +              val |= (IGU_PF_CONF_MSI_MSIX_EN |
                        IGU_PF_CONF_ATTN_BIT_EN);
  
                if (single_msix)
                        val |= IGU_PF_CONF_SINGLE_ISR_EN;
        } else if (msi) {
                val &= ~IGU_PF_CONF_INT_LINE_EN;
 -              val |= (IGU_PF_CONF_FUNC_EN |
 -                      IGU_PF_CONF_MSI_MSIX_EN |
 +              val |= (IGU_PF_CONF_MSI_MSIX_EN |
                        IGU_PF_CONF_ATTN_BIT_EN |
                        IGU_PF_CONF_SINGLE_ISR_EN);
        } else {
                val &= ~IGU_PF_CONF_MSI_MSIX_EN;
 -              val |= (IGU_PF_CONF_FUNC_EN |
 -                      IGU_PF_CONF_INT_LINE_EN |
 +              val |= (IGU_PF_CONF_INT_LINE_EN |
                        IGU_PF_CONF_ATTN_BIT_EN |
                        IGU_PF_CONF_SINGLE_ISR_EN);
        }
  
 +      /* Clean previous status - need to configure igu prior to ack*/
 +      if ((!msix) || single_msix) {
 +              REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 +              bnx2x_ack_int(bp);
 +      }
 +
 +      val |= IGU_PF_CONF_FUNC_EN;
 +
        DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
           val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
  
@@@ -1613,6 -1436,71 +1613,6 @@@ void bnx2x_int_enable(struct bnx2x *bp
                bnx2x_igu_int_enable(bp);
  }
  
 -static void bnx2x_hc_int_disable(struct bnx2x *bp)
 -{
 -      int port = BP_PORT(bp);
 -      u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 -      u32 val = REG_RD(bp, addr);
 -
 -      /*
 -       * in E1 we must use only PCI configuration space to disable
 -       * MSI/MSIX capablility
 -       * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
 -       */
 -      if (CHIP_IS_E1(bp)) {
 -              /*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
 -               *  Use mask register to prevent from HC sending interrupts
 -               *  after we exit the function
 -               */
 -              REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
 -
 -              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 -                       HC_CONFIG_0_REG_INT_LINE_EN_0 |
 -                       HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 -      } else
 -              val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 -                       HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 -                       HC_CONFIG_0_REG_INT_LINE_EN_0 |
 -                       HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 -
 -      DP(NETIF_MSG_IFDOWN,
 -         "write %x to HC %d (addr 0x%x)\n",
 -         val, port, addr);
 -
 -      /* flush all outstanding writes */
 -      mmiowb();
 -
 -      REG_WR(bp, addr, val);
 -      if (REG_RD(bp, addr) != val)
 -              BNX2X_ERR("BUG! proper val not read from IGU!\n");
 -}
 -
 -static void bnx2x_igu_int_disable(struct bnx2x *bp)
 -{
 -      u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 -
 -      val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
 -               IGU_PF_CONF_INT_LINE_EN |
 -               IGU_PF_CONF_ATTN_BIT_EN);
 -
 -      DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
 -
 -      /* flush all outstanding writes */
 -      mmiowb();
 -
 -      REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 -      if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
 -              BNX2X_ERR("BUG! proper val not read from IGU!\n");
 -}
 -
 -static void bnx2x_int_disable(struct bnx2x *bp)
 -{
 -      if (bp->common.int_block == INT_BLOCK_HC)
 -              bnx2x_hc_int_disable(bp);
 -      else
 -              bnx2x_igu_int_disable(bp);
 -}
 -
  void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
  {
        int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@@ -1698,11 -1586,11 +1698,11 @@@ static int bnx2x_get_leader_lock_resour
  }
  
  /**
 - * bnx2x_trylock_leader_lock- try to aquire a leader lock.
 + * bnx2x_trylock_leader_lock- try to acquire a leader lock.
   *
   * @bp: driver handle
   *
 - * Tries to aquire a leader lock for current engine.
 + * Tries to acquire a leader lock for current engine.
   */
  static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
  {
  
  static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
  
 +/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
 +static int bnx2x_schedule_sp_task(struct bnx2x *bp)
 +{
 +      /* Set the interrupt occurred bit for the sp-task to recognize it
 +       * must ack the interrupt and transition according to the IGU
 +       * state machine.
 +       */
 +      atomic_set(&bp->interrupt_occurred, 1);
 +
 +      /* The sp_task must execute only after this bit
 +       * is set, otherwise we will get out of sync and miss all
 +       * further interrupts. Hence, the barrier.
 +       */
 +      smp_wmb();
 +
 +      /* schedule sp_task to workqueue */
 +      return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +}
  
  void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
  {
           fp->index, cid, command, bp->state,
           rr_cqe->ramrod_cqe.ramrod_type);
  
 +      /* If cid is within VF range, replace the slowpath object with the
 +       * one corresponding to this VF
 +       */
 +      if (cid >= BNX2X_FIRST_VF_CID  &&
 +          cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
 +              bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
 +
        switch (command) {
        case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
                DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
  #else
                return;
  #endif
 +      /* SRIOV: reschedule any 'in_progress' operations */
 +      bnx2x_iov_sp_event(bp, cid, true);
  
        smp_mb__before_atomic_inc();
        atomic_inc(&bp->cq_spq_left);
                 * mark pending ACK to MCP bit.
                 * prevent case that both bits are cleared.
                 * At the end of load/unload driver checks that
 -               * sp_state is cleaerd, and this order prevents
 +               * sp_state is cleared, and this order prevents
                 * races
                 */
                smp_mb__before_clear_bit();
                clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
                smp_mb__after_clear_bit();
  
 -              /* schedule workqueue to send ack to MCP */
 -              queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +              /* schedule the sp task as mcp ack is required */
 +              bnx2x_schedule_sp_task(bp);
        }
  
        return;
  }
  
 -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 -                      u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
 -{
 -      u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
 -
 -      bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
 -                               start);
 -}
 -
  irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
  {
        struct bnx2x *bp = netdev_priv(dev_instance);
                if (status & (mask | 0x1)) {
                        struct cnic_ops *c_ops = NULL;
  
 -                      if (likely(bp->state == BNX2X_STATE_OPEN)) {
 -                              rcu_read_lock();
 -                              c_ops = rcu_dereference(bp->cnic_ops);
 -                              if (c_ops)
 -                                      c_ops->cnic_handler(bp->cnic_data,
 -                                                          NULL);
 -                              rcu_read_unlock();
 -                      }
 +                      rcu_read_lock();
 +                      c_ops = rcu_dereference(bp->cnic_ops);
 +                      if (c_ops && (bp->cnic_eth_dev.drv_state &
 +                                    CNIC_DRV_STATE_HANDLES_IRQ))
 +                              c_ops->cnic_handler(bp->cnic_data, NULL);
 +                      rcu_read_unlock();
  
                        status &= ~mask;
                }
        }
  
        if (unlikely(status & 0x1)) {
 -              queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +
 +              /* schedule sp task to perform default status block work, ack
 +               * attentions and enable interrupts.
 +               */
 +              bnx2x_schedule_sp_task(bp);
  
                status &= ~0x1;
                if (!status)
@@@ -2591,55 -2459,23 +2591,55 @@@ void bnx2x__link_status_update(struct b
                return;
  
        /* read updated dcb configuration */
 -      bnx2x_dcbx_pmf_update(bp);
 -
 -      bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 +      if (IS_PF(bp)) {
 +              bnx2x_dcbx_pmf_update(bp);
 +              bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
 +              if (bp->link_vars.link_up)
 +                      bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 +              else
 +                      bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 +                      /* indicate link status */
 +              bnx2x_link_report(bp);
  
 -      if (bp->link_vars.link_up)
 +      } else { /* VF */
 +              bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
 +                                        SUPPORTED_10baseT_Full |
 +                                        SUPPORTED_100baseT_Half |
 +                                        SUPPORTED_100baseT_Full |
 +                                        SUPPORTED_1000baseT_Full |
 +                                        SUPPORTED_2500baseX_Full |
 +                                        SUPPORTED_10000baseT_Full |
 +                                        SUPPORTED_TP |
 +                                        SUPPORTED_FIBRE |
 +                                        SUPPORTED_Autoneg |
 +                                        SUPPORTED_Pause |
 +                                        SUPPORTED_Asym_Pause);
 +              bp->port.advertising[0] = bp->port.supported[0];
 +
 +              bp->link_params.bp = bp;
 +              bp->link_params.port = BP_PORT(bp);
 +              bp->link_params.req_duplex[0] = DUPLEX_FULL;
 +              bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
 +              bp->link_params.req_line_speed[0] = SPEED_10000;
 +              bp->link_params.speed_cap_mask[0] = 0x7f0000;
 +              bp->link_params.switch_cfg = SWITCH_CFG_10G;
 +              bp->link_vars.mac_type = MAC_TYPE_BMAC;
 +              bp->link_vars.line_speed = SPEED_10000;
 +              bp->link_vars.link_status =
 +                      (LINK_STATUS_LINK_UP |
 +                       LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
 +              bp->link_vars.link_up = 1;
 +              bp->link_vars.duplex = DUPLEX_FULL;
 +              bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
 +              __bnx2x_link_report(bp);
                bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
 -      else
 -              bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 -
 -      /* indicate link status */
 -      bnx2x_link_report(bp);
 +      }
  }
  
  static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
                                  u16 vlan_val, u8 allowed_prio)
  {
 -      struct bnx2x_func_state_params func_params = {0};
 +      struct bnx2x_func_state_params func_params = {NULL};
        struct bnx2x_func_afex_update_params *f_update_params =
                &func_params.params.afex_update;
  
  static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
                                          u16 vif_index, u8 func_bit_map)
  {
 -      struct bnx2x_func_state_params func_params = {0};
 +      struct bnx2x_func_state_params func_params = {NULL};
        struct bnx2x_func_afex_viflists_params *update_params =
                &func_params.params.afex_viflists;
        int rc;
  
        /* set parameters according to cmd_type */
        update_params->afex_vif_list_command = cmd_type;
 -      update_params->vif_list_index = cpu_to_le16(vif_index);
 +      update_params->vif_list_index = vif_index;
        update_params->func_bit_map =
                (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
        update_params->func_to_clear = 0;
@@@ -2964,10 -2800,6 +2964,10 @@@ static unsigned long bnx2x_get_common_f
                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
  
  
 +#ifdef BNX2X_STOP_ON_ERROR
 +      __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
 +#endif
 +
        return flags;
  }
  
@@@ -3043,12 -2875,15 +3043,12 @@@ static void bnx2x_pf_rx_q_prep(struct b
                                pause->sge_th_hi + FW_PREFETCH_CNT >
                                MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
  
 -              tpa_agg_size = min_t(u32,
 -                      (min_t(u32, 8, MAX_SKB_FRAGS) *
 -                      SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
 +              tpa_agg_size = TPA_AGG_SIZE;
                max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
                        SGE_PAGE_SHIFT;
                max_sge = ((max_sge + PAGES_PER_SGE - 1) &
                          (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
 -              sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
 -                                  0xffff);
 +              sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
        }
  
        /* pause - not for e1 */
  
        /* Maximum number or simultaneous TPA aggregation for this Queue.
         *
 -       * For PF Clients it should be the maximum avaliable number.
 +       * For PF Clients it should be the maximum available number.
         * VF driver(s) may want to define it to a smaller value.
         */
        rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
@@@ -3187,7 -3022,7 +3187,7 @@@ static void bnx2x_pf_init(struct bnx2x 
        if (bp->port.pmf)
                storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  
 -      /* init Event Queue */
 +      /* init Event Queue - PCI bus guarantees correct endianity*/
        eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
        eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
        eq_data.producer = bp->eq_prod;
@@@ -3277,75 -3112,65 +3277,75 @@@ static void bnx2x_drv_info_fcoe_stat(st
                struct fcoe_statistics_params *fw_fcoe_stat =
                        &bp->fw_stats_data->fcoe;
  
 -              ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
 -                     fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
 +              ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
 +                        fcoe_stat->rx_bytes_lo,
 +                        fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
  
 -              ADD_64(fcoe_stat->rx_bytes_hi,
 -                     fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
 -                     fcoe_stat->rx_bytes_lo,
 -                     fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
 +              ADD_64_LE(fcoe_stat->rx_bytes_hi,
 +                        fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
 +                        fcoe_stat->rx_bytes_lo,
 +                        fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
  
 -              ADD_64(fcoe_stat->rx_bytes_hi,
 -                     fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
 -                     fcoe_stat->rx_bytes_lo,
 -                     fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
 +              ADD_64_LE(fcoe_stat->rx_bytes_hi,
 +                        fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
 +                        fcoe_stat->rx_bytes_lo,
 +                        fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
  
 -              ADD_64(fcoe_stat->rx_bytes_hi,
 -                     fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
 -                     fcoe_stat->rx_bytes_lo,
 -                     fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
 +              ADD_64_LE(fcoe_stat->rx_bytes_hi,
 +                        fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
 +                        fcoe_stat->rx_bytes_lo,
 +                        fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
  
 -              ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
 -                     fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
 +              ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
 +                        fcoe_stat->rx_frames_lo,
 +                        fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
  
 -              ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
 -                     fcoe_q_tstorm_stats->rcv_ucast_pkts);
 +              ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
 +                        fcoe_stat->rx_frames_lo,
 +                        fcoe_q_tstorm_stats->rcv_ucast_pkts);
  
 -              ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
 -                     fcoe_q_tstorm_stats->rcv_bcast_pkts);
 +              ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
 +                        fcoe_stat->rx_frames_lo,
 +                        fcoe_q_tstorm_stats->rcv_bcast_pkts);
  
 -              ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
 -                     fcoe_q_tstorm_stats->rcv_mcast_pkts);
 +              ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
 +                        fcoe_stat->rx_frames_lo,
 +                        fcoe_q_tstorm_stats->rcv_mcast_pkts);
  
 -              ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
 -                     fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
 +              ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
 +                        fcoe_stat->tx_bytes_lo,
 +                        fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
  
 -              ADD_64(fcoe_stat->tx_bytes_hi,
 -                     fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
 -                     fcoe_stat->tx_bytes_lo,
 -                     fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
 +              ADD_64_LE(fcoe_stat->tx_bytes_hi,
 +                        fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
 +                        fcoe_stat->tx_bytes_lo,
 +                        fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
  
 -              ADD_64(fcoe_stat->tx_bytes_hi,
 -                     fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
 -                     fcoe_stat->tx_bytes_lo,
 -                     fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
 +              ADD_64_LE(fcoe_stat->tx_bytes_hi,
 +                        fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
 +                        fcoe_stat->tx_bytes_lo,
 +                        fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
  
 -              ADD_64(fcoe_stat->tx_bytes_hi,
 -                     fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
 -                     fcoe_stat->tx_bytes_lo,
 -                     fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
 +              ADD_64_LE(fcoe_stat->tx_bytes_hi,
 +                        fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
 +                        fcoe_stat->tx_bytes_lo,
 +                        fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
  
 -              ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
 -                     fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
 +              ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
 +                        fcoe_stat->tx_frames_lo,
 +                        fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
  
 -              ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
 -                     fcoe_q_xstorm_stats->ucast_pkts_sent);
 +              ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
 +                        fcoe_stat->tx_frames_lo,
 +                        fcoe_q_xstorm_stats->ucast_pkts_sent);
  
 -              ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
 -                     fcoe_q_xstorm_stats->bcast_pkts_sent);
 +              ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
 +                        fcoe_stat->tx_frames_lo,
 +                        fcoe_q_xstorm_stats->bcast_pkts_sent);
  
 -              ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
 -                     fcoe_q_xstorm_stats->mcast_pkts_sent);
 +              ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
 +                        fcoe_stat->tx_frames_lo,
 +                        fcoe_q_xstorm_stats->mcast_pkts_sent);
        }
  
        /* ask L5 driver to add data to the struct */
@@@ -3816,7 -3641,7 +3816,7 @@@ static void bnx2x_fan_failure(struct bn
                            "Please contact OEM Support for assistance\n");
  
        /*
 -       * Scheudle device reset (unload)
 +       * Schedule device reset (unload)
         * This is due to some boards consuming sufficient power when driver is
         * up to overheat if fan fails.
         */
@@@ -3966,10 -3791,6 +3966,10 @@@ static void bnx2x_attn_int_deasserted3(
  
                        if (val & DRV_STATUS_DRV_INFO_REQ)
                                bnx2x_handle_drv_info_req(bp);
 +
 +                      if (val & DRV_STATUS_VF_DISABLED)
 +                              bnx2x_vf_handle_flr_event(bp);
 +
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
  
@@@ -4766,8 -4587,8 +4766,8 @@@ static void bnx2x_attn_int(struct bnx2
  void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
                      u16 index, u8 op, u8 update)
  {
 -      u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
 -
 +      u32 igu_addr = bp->igu_base_addr;
 +      igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
        bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
                             igu_addr);
  }
@@@ -4795,7 -4616,7 +4795,7 @@@ static int  bnx2x_cnic_handle_cfc_del(s
  
                BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
                          cid);
 -              bnx2x_panic_dump(bp);
 +              bnx2x_panic_dump(bp, false);
        }
        bnx2x_cnic_cfc_comp(bp, cid, err);
        return 0;
@@@ -4837,8 -4658,7 +4837,8 @@@ static void bnx2x_handle_classification
        /* Always push next commands out, don't wait here */
        __set_bit(RAMROD_CONT, &ramrod_flags);
  
 -      switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
 +      switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
 +                          >> BNX2X_SWCID_SHIFT) {
        case BNX2X_FILTER_MAC_PENDING:
                DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
                if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@@ -4915,7 -4735,7 +4915,7 @@@ static void bnx2x_after_function_update
        struct bnx2x_queue_update_params *q_update_params =
                &queue_params.params.update;
  
 -      /* Send Q update command with afex vlan removal values  for all Qs */
 +      /* Send Q update command with afex vlan removal values for all Qs */
        queue_params.cmd = BNX2X_Q_CMD_UPDATE;
  
        /* set silent vlan removal values according to vlan mode */
@@@ -4989,7 -4809,7 +4989,7 @@@ static void bnx2x_eq_int(struct bnx2x *
        u8 echo;
        u32 cid;
        u8 opcode;
 -      int spqe_cnt = 0;
 +      int rc, spqe_cnt = 0;
        struct bnx2x_queue_sp_obj *q_obj;
        struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
        struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
        for (; sw_cons != hw_cons;
              sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
  
 -
                elem = &bp->eq_ring[EQ_DESC(sw_cons)];
  
 -              cid = SW_CID(elem->message.data.cfc_del_event.cid);
 -              opcode = elem->message.opcode;
 +              rc = bnx2x_iov_eq_sp_event(bp, elem);
 +              if (!rc) {
 +                      DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
 +                         rc);
 +                      goto next_spqe;
 +              }
  
 +              /* elem CID originates from FW; actually LE */
 +              cid = SW_CID((__force __le32)
 +                           elem->message.data.cfc_del_event.cid);
 +              opcode = elem->message.opcode;
  
                /* handle eq element */
                switch (opcode) {
 +              case EVENT_RING_OPCODE_VF_PF_CHANNEL:
 +                      DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
 +                      bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
 +                      continue;
 +
                case EVENT_RING_OPCODE_STAT_QUERY:
                        DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
                           "got statistics comp event %d\n",
@@@ -5203,65 -5011,50 +5203,65 @@@ next_spqe
  static void bnx2x_sp_task(struct work_struct *work)
  {
        struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
 -      u16 status;
  
 -      status = bnx2x_update_dsb_idx(bp);
 -/*    if (status == 0)                                     */
 -/*            BNX2X_ERR("spurious slowpath interrupt!\n"); */
 +      DP(BNX2X_MSG_SP, "sp task invoked\n");
  
 -      DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
 +      /* make sure the atomic interupt_occurred has been written */
 +      smp_rmb();
 +      if (atomic_read(&bp->interrupt_occurred)) {
  
 -      /* HW attentions */
 -      if (status & BNX2X_DEF_SB_ATT_IDX) {
 -              bnx2x_attn_int(bp);
 -              status &= ~BNX2X_DEF_SB_ATT_IDX;
 -      }
 +              /* what work needs to be performed? */
 +              u16 status = bnx2x_update_dsb_idx(bp);
 +
 +              DP(BNX2X_MSG_SP, "status %x\n", status);
 +              DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
 +              atomic_set(&bp->interrupt_occurred, 0);
  
 -      /* SP events: STAT_QUERY and others */
 -      if (status & BNX2X_DEF_SB_IDX) {
 -              struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
 +              /* HW attentions */
 +              if (status & BNX2X_DEF_SB_ATT_IDX) {
 +                      bnx2x_attn_int(bp);
 +                      status &= ~BNX2X_DEF_SB_ATT_IDX;
 +              }
 +
 +              /* SP events: STAT_QUERY and others */
 +              if (status & BNX2X_DEF_SB_IDX) {
 +                      struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  
                if (FCOE_INIT(bp) &&
 -                  (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 -                      /*
 -                       * Prevent local bottom-halves from running as
 -                       * we are going to change the local NAPI list.
 -                       */
 -                      local_bh_disable();
 -                      napi_schedule(&bnx2x_fcoe(bp, napi));
 -                      local_bh_enable();
 +                          (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
 +                              /* Prevent local bottom-halves from running as
 +                               * we are going to change the local NAPI list.
 +                               */
 +                              local_bh_disable();
 +                              napi_schedule(&bnx2x_fcoe(bp, napi));
 +                              local_bh_enable();
 +                      }
 +
 +                      /* Handle EQ completions */
 +                      bnx2x_eq_int(bp);
 +                      bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
 +                                   le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
 +
 +                      status &= ~BNX2X_DEF_SB_IDX;
                }
  
 -              /* Handle EQ completions */
 -              bnx2x_eq_int(bp);
 +              /* if status is non zero then perhaps something went wrong */
 +              if (unlikely(status))
 +                      DP(BNX2X_MSG_SP,
 +                         "got an unknown interrupt! (status 0x%x)\n", status);
  
 -              bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
 -                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
 +              /* ack status block only if something was actually handled */
 +              bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
 +                           le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
  
 -              status &= ~BNX2X_DEF_SB_IDX;
        }
  
 -      if (unlikely(status))
 -              DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
 -                 status);
 -
 -      bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
 -           le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
 +      /* must be called after the EQ processing (since eq leads to sriov
 +       * ramrod completion flows).
 +       * This flow may have been scheduled by the arrival of a ramrod
 +       * completion, or by the sriov code rescheduling itself.
 +       */
 +      bnx2x_iov_sp_task(bp);
  
        /* afex - poll to check if VIFSET_ACK should be sent to MFW */
        if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
@@@ -5294,10 -5087,7 +5294,10 @@@ irqreturn_t bnx2x_msix_sp_int(int irq, 
                rcu_read_unlock();
        }
  
 -      queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
 +      /* schedule sp task to perform default status block work, ack
 +       * attentions and enable interrupts.
 +       */
 +      bnx2x_schedule_sp_task(bp);
  
        return IRQ_HANDLED;
  }
@@@ -5311,6 -5101,7 +5311,6 @@@ void bnx2x_drv_pulse(struct bnx2x *bp
                 bp->fw_drv_pulse_wr_seq);
  }
  
 -
  static void bnx2x_timer(unsigned long data)
  {
        struct bnx2x *bp = (struct bnx2x *) data;
        if (!netif_running(bp->dev))
                return;
  
 -      if (!BP_NOMCP(bp)) {
 +      if (IS_PF(bp) &&
 +          !BP_NOMCP(bp)) {
                int mb_idx = BP_FW_MB_IDX(bp);
                u32 drv_pulse;
                u32 mcp_pulse;
        if (bp->state == BNX2X_STATE_OPEN)
                bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
  
 +      /* sample pf vf bulletin board for new posts from pf */
 +      if (IS_VF(bp))
 +              bnx2x_sample_bulletin(bp);
 +
        mod_timer(&bp->timer, jiffies + bp->current_interval);
  }
  
@@@ -5492,7 -5278,7 +5492,7 @@@ static void bnx2x_map_sb_state_machines
                SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
  }
  
 -static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
 +void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
                          u8 vf_valid, int fw_sb_id, int igu_sb_id)
  {
        int igu_seg_id;
  
        DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
  
 -      /* write indecies to HW */
 +      /* write indices to HW - PCI guarantees endianity of regpairs */
        bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
  }
  
@@@ -5636,7 -5422,6 +5636,7 @@@ static void bnx2x_init_def_sb(struct bn
  
        bnx2x_zero_sp_sb(bp);
  
 +      /* PCI guarantees endianity of regpairs */
        sp_sb_data.state                = SB_ENABLED;
        sp_sb_data.host_sb_addr.lo      = U64_LO(section);
        sp_sb_data.host_sb_addr.hi      = U64_HI(section);
@@@ -5693,12 -5478,13 +5693,12 @@@ static void bnx2x_init_eq_ring(struct b
                min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
  }
  
 -
  /* called with netif_addr_lock_bh() */
 -void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 -                       unsigned long rx_mode_flags,
 -                       unsigned long rx_accept_flags,
 -                       unsigned long tx_accept_flags,
 -                       unsigned long ramrod_flags)
 +int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
 +                      unsigned long rx_mode_flags,
 +                      unsigned long rx_accept_flags,
 +                      unsigned long tx_accept_flags,
 +                      unsigned long ramrod_flags)
  {
        struct bnx2x_rx_mode_ramrod_params ramrod_param;
        int rc;
        rc = bnx2x_config_rx_mode(bp, &ramrod_param);
        if (rc < 0) {
                BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
 -              return;
 +              return rc;
        }
 +
 +      return 0;
  }
  
 -/* called with netif_addr_lock_bh() */
 -void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 +static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
 +                                 unsigned long *rx_accept_flags,
 +                                 unsigned long *tx_accept_flags)
  {
 -      unsigned long rx_mode_flags = 0, ramrod_flags = 0;
 -      unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
 -
 -      if (!NO_FCOE(bp))
 -
 -              /* Configure rx_mode of FCoE Queue */
 -              __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
 +      /* Clear the flags first */
 +      *rx_accept_flags = 0;
 +      *tx_accept_flags = 0;
  
 -      switch (bp->rx_mode) {
 +      switch (rx_mode) {
        case BNX2X_RX_MODE_NONE:
                /*
                 * 'drop all' supersedes any accept flags that may have been
                 */
                break;
        case BNX2X_RX_MODE_NORMAL:
 -              __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
  
                /* internal switching mode */
 -              __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
  
                break;
        case BNX2X_RX_MODE_ALLMULTI:
 -              __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
  
                /* internal switching mode */
 -              __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
  
                break;
        case BNX2X_RX_MODE_PROMISC:
                 * should receive matched and unmatched (in resolution of port)
                 * unicast packets.
                 */
 -              __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
  
                /* internal switching mode */
 -              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
  
                if (IS_MF_SI(bp))
 -                      __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
 +                      __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
                else
 -                      __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
 +                      __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
  
                break;
        default:
 -              BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
 -              return;
 +              BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
 +              return -EINVAL;
        }
  
 +      /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
        if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
 -              __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
 -              __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
 +              __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
        }
  
 +      return 0;
 +}
 +
 +/* called with netif_addr_lock_bh() */
 +int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
 +{
 +      unsigned long rx_mode_flags = 0, ramrod_flags = 0;
 +      unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
 +      int rc;
 +
 +      if (!NO_FCOE(bp))
 +              /* Configure rx_mode of FCoE Queue */
 +              __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
 +
 +      rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
 +                                   &tx_accept_flags);
 +      if (rc)
 +              return rc;
 +
        __set_bit(RAMROD_RX, &ramrod_flags);
        __set_bit(RAMROD_TX, &ramrod_flags);
  
 -      bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
 -                          tx_accept_flags, ramrod_flags);
 +      return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
 +                                 rx_accept_flags, tx_accept_flags,
 +                                 ramrod_flags);
  }
  
  static void bnx2x_init_internal_common(struct bnx2x *bp)
@@@ -5933,13 -5699,6 +5933,13 @@@ static void bnx2x_init_eth_fp(struct bn
                cids[cos] = fp->txdata_ptr[cos]->cid;
        }
  
 +      /* nothing more for vf to do here */
 +      if (IS_VF(bp))
 +              return;
 +
 +      bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
 +                    fp->fw_sb_id, fp->igu_sb_id);
 +      bnx2x_update_fpsb_idx(fp);
        bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
                             fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
                             bnx2x_sp_mapping(bp, q_rdata), q_type);
         */
        bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
  
 -      DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
 -                 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 -                 fp->igu_sb_id);
 -      bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
 -                    fp->fw_sb_id, fp->igu_sb_id);
 -
 -      bnx2x_update_fpsb_idx(fp);
 +      DP(NETIF_MSG_IFUP,
 +         "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
 +         fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
 +         fp->igu_sb_id);
  }
  
  static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
@@@ -6024,22 -5786,17 +6024,22 @@@ void bnx2x_nic_init(struct bnx2x *bp, u
  
        for_each_eth_queue(bp, i)
                bnx2x_init_eth_fp(bp, i);
 -      /* Initialize MOD_ABS interrupts */
 -      bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 -                             bp->common.shmem_base, bp->common.shmem2_base,
 -                             BP_PORT(bp));
 +
        /* ensure status block indices were read */
        rmb();
 +      bnx2x_init_rx_rings(bp);
 +      bnx2x_init_tx_rings(bp);
 +
 +      if (IS_VF(bp))
 +              return;
 +
 +      /* Initialize MOD_ABS interrupts */
 +      bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
 +                             bp->common.shmem_base, bp->common.shmem2_base,
 +                             BP_PORT(bp));
  
        bnx2x_init_def_sb(bp);
        bnx2x_update_dsb_idx(bp);
 -      bnx2x_init_rx_rings(bp);
 -      bnx2x_init_tx_rings(bp);
        bnx2x_init_sp_ring(bp);
        bnx2x_init_eq_ring(bp);
        bnx2x_init_internal(bp, load_code);
@@@ -6479,6 -6236,49 +6479,6 @@@ static void bnx2x_setup_fan_failure_det
        REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
  }
  
 -static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
 -{
 -      u32 offset = 0;
 -
 -      if (CHIP_IS_E1(bp))
 -              return;
 -      if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
 -              return;
 -
 -      switch (BP_ABS_FUNC(bp)) {
 -      case 0:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
 -              break;
 -      case 1:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
 -              break;
 -      case 2:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
 -              break;
 -      case 3:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
 -              break;
 -      case 4:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
 -              break;
 -      case 5:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
 -              break;
 -      case 6:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
 -              break;
 -      case 7:
 -              offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
 -              break;
 -      default:
 -              return;
 -      }
 -
 -      REG_WR(bp, offset, pretend_func_num);
 -      REG_RD(bp, offset);
 -      DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
 -}
 -
  void bnx2x_pf_disable(struct bnx2x *bp)
  {
        u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
@@@ -6522,7 -6322,7 +6522,7 @@@ static int bnx2x_init_hw_common(struct 
        DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
  
        /*
 -       * take the UNDI lock to protect undi_unload flow from accessing
 +       * take the RESET lock to protect undi_unload flow from accessing
         * registers while we're resetting the chip
         */
        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
   *                queues with "old" ILT addresses.
   *            c.  PF enable in the PGLC.
   *            d.  Clear the was_error of the PF in the PGLC. (could have
 - *                occured while driver was down)
 + *                occurred while driver was down)
   *            e.  PF enable in the CFC (WEAK + STRONG)
   *            f.  Timers scan enable
   *    3.  PF driver unload flow:
                /* Step 1: set zeroes to all ilt page entries with valid bit on
                 * Step 2: set the timers first/last ilt entry to point
                 * to the entire range to prevent ILT range error for 3rd/4th
 -               * vnic (this code assumes existance of the vnic)
 +               * vnic (this code assumes existence of the vnic)
                 *
                 * both steps performed by call to bnx2x_ilt_client_init_op()
                 * with dummy TM client
                REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
        }
  
 -
        REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
        REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
  
  
        bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
  
 +      bnx2x_iov_init_dmae(bp);
 +
        /* clean the DMAE memory */
        bp->dmae_ready = 1;
        bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
@@@ -7192,6 -6991,7 +7192,6 @@@ static int bnx2x_init_hw_port(struct bn
                }
        }
  
 -
        /* If SPIO5 is set to generate interrupts, enable it for this port */
        val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
        if (val & MISC_SPIO_SPIO5) {
@@@ -7220,14 -7020,15 +7220,14 @@@ static void bnx2x_ilt_wr(struct bnx2x *
        REG_WR_DMAE(bp, reg, wb_write, 2);
  }
  
 -static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
 -                                 u8 idu_sb_id, bool is_Pf)
 +void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
  {
        u32 data, ctl, cnt = 100;
        u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
        u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
        u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
        u32 sb_bit =  1 << (idu_sb_id%32);
 -      u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
 +      u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
        u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
  
        /* Not supported in BC mode */
@@@ -7418,10 -7219,8 +7418,10 @@@ static int bnx2x_init_hw_func(struct bn
        /* FLR cleanup - hmmm */
        if (!CHIP_IS_E1x(bp)) {
                rc = bnx2x_pf_flr_clnup(bp);
 -              if (rc)
 +              if (rc) {
 +                      bnx2x_fw_dump(bp);
                        return rc;
 +              }
        }
  
        /* set MSI reconfigure capability */
        ilt = BP_ILT(bp);
        cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
  
 +      if (IS_SRIOV(bp))
 +              cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
 +      cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
 +
 +      /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
 +       * those of the VFs, so start line should be reset
 +       */
 +      cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
        for (i = 0; i < L2_ILT_LINES(bp); i++) {
                ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
                ilt->lines[cdu_ilt_start + i].page_mapping =
                        bp->context[i].cxt_mapping;
                ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
        }
 +
        bnx2x_ilt_init_op(bp, INITOP_SET);
  
        if (!CONFIGURE_NIC_MODE(bp)) {
  
        bnx2x_init_block(bp, BLOCK_TM, init_phase);
        bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
 +
 +      bnx2x_iov_init_dq(bp);
 +
        bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
        bnx2x_init_block(bp, BLOCK_PRS, init_phase);
        bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
@@@ -7736,6 -7523,10 +7736,6 @@@ void bnx2x_free_mem(struct bnx2x *bp
  {
        int i;
  
 -      /* fastpath */
 -      bnx2x_free_fp_mem(bp);
 -      /* end of fastpath */
 -
        BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
                       sizeof(struct host_sp_status_block));
  
  
        BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 -}
 -
 -static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
 -{
 -      int num_groups;
 -      int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
  
 -      /* number of queues for statistics is number of eth queues + FCoE */
 -      u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
 -
 -      /* Total number of FW statistics requests =
 -       * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
 -       * num of queues
 -       */
 -      bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
 -
 -
 -      /* Request is built from stats_query_header and an array of
 -       * stats_query_cmd_group each of which contains
 -       * STATS_QUERY_CMD_COUNT rules. The real number or requests is
 -       * configured in the stats_query_header.
 -       */
 -      num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
 -                   (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
 -
 -      bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
 -                      num_groups * sizeof(struct stats_query_cmd_group);
 -
 -      /* Data for statistics requests + stats_conter
 -       *
 -       * stats_counter holds per-STORM counters that are incremented
 -       * when STORM has finished with the current request.
 -       *
 -       * memory for FCoE offloaded statistics are counted anyway,
 -       * even if they will not be sent.
 -       */
 -      bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
 -              sizeof(struct per_pf_stats) +
 -              sizeof(struct fcoe_statistics_params) +
 -              sizeof(struct per_queue_stats) * num_queue_stats +
 -              sizeof(struct stats_counter);
 -
 -      BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
 -                      bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 -
 -      /* Set shortcuts */
 -      bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
 -      bp->fw_stats_req_mapping = bp->fw_stats_mapping;
 -
 -      bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
 -              ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
 -
 -      bp->fw_stats_data_mapping = bp->fw_stats_mapping +
 -                                 bp->fw_stats_req_sz;
 -      return 0;
 -
 -alloc_mem_err:
 -      BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
 -                     bp->fw_stats_data_sz + bp->fw_stats_req_sz);
 -      BNX2X_ERR("Can't allocate memory\n");
 -      return -ENOMEM;
 +      bnx2x_iov_free_mem(bp);
  }
  
 +
  int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
  {
        if (!CHIP_IS_E1x(bp))
@@@ -7806,6 -7655,10 +7806,6 @@@ int bnx2x_alloc_mem(struct bnx2x *bp
        BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
                        sizeof(struct bnx2x_slowpath));
  
 -      /* Allocated memory for FW statistics  */
 -      if (bnx2x_alloc_fw_stats_mem(bp))
 -              goto alloc_mem_err;
 -
        /* Allocate memory for CDU context:
         * This memory is allocated separately and not in the generic ILT
         * functions because CDU differs in few aspects:
        if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
                goto alloc_mem_err;
  
 +      if (bnx2x_iov_alloc_mem(bp))
 +              goto alloc_mem_err;
 +
        /* Slow path ring */
        BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
  
        BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
  
 -
 -      /* fastpath */
 -      /* need to be done at the end, since it's self adjusting to amount
 -       * of memory available for RSS queues
 -       */
 -      if (bnx2x_alloc_fp_mem(bp))
 -              goto alloc_mem_err;
        return 0;
  
  alloc_mem_err:
@@@ -7946,53 -7803,43 +7946,53 @@@ int bnx2x_setup_leading(struct bnx2x *b
   *
   * In case of MSI-X it will also try to enable MSI-X.
   */
 -void bnx2x_set_int_mode(struct bnx2x *bp)
 +int bnx2x_set_int_mode(struct bnx2x *bp)
  {
 +      int rc = 0;
 +
 +      if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
 +              return -EINVAL;
 +
        switch (int_mode) {
 -      case INT_MODE_MSI:
 +      case BNX2X_INT_MODE_MSIX:
 +              /* attempt to enable msix */
 +              rc = bnx2x_enable_msix(bp);
 +
 +              /* msix attained */
 +              if (!rc)
 +                      return 0;
 +
 +              /* vfs use only msix */
 +              if (rc && IS_VF(bp))
 +                      return rc;
 +
 +              /* failed to enable multiple MSI-X */
 +              BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
 +                             bp->num_queues,
 +                             1 + bp->num_cnic_queues);
 +
 +              /* falling through... */
 +      case BNX2X_INT_MODE_MSI:
                bnx2x_enable_msi(bp);
 +
                /* falling through... */
 -      case INT_MODE_INTx:
 +      case BNX2X_INT_MODE_INTX:
                bp->num_ethernet_queues = 1;
                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
                BNX2X_DEV_INFO("set number of queues to 1\n");
                break;
        default:
 -              /* if we can't use MSI-X we only need one fp,
 -               * so try to enable MSI-X with the requested number of fp's
 -               * and fallback to MSI or legacy INTx with one fp
 -               */
 -              if (bnx2x_enable_msix(bp) ||
 -                  bp->flags & USING_SINGLE_MSIX_FLAG) {
 -                      /* failed to enable multiple MSI-X */
 -                      BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
 -                                     bp->num_queues,
 -                                     1 + bp->num_cnic_queues);
 -
 -                      bp->num_queues = 1 + bp->num_cnic_queues;
 -
 -                      /* Try to enable MSI */
 -                      if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
 -                          !(bp->flags & DISABLE_MSI_FLAG))
 -                              bnx2x_enable_msi(bp);
 -              }
 -              break;
 +              BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
 +              return -EINVAL;
        }
 +      return 0;
  }
  
 -/* must be called prioir to any HW initializations */
 +/* must be called prior to any HW initializations */
  static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
  {
 +      if (IS_SRIOV(bp))
 +              return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
        return L2_ILT_LINES(bp);
  }
  
@@@ -8375,8 -8222,8 +8375,8 @@@ static void bnx2x_reset_func(struct bnx
  
        /* SP SB */
        REG_WR8(bp, BAR_CSTRORM_INTMEM +
 -                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
 -                 SB_DISABLED);
 +              CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
 +              SB_DISABLED);
  
        for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
                REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@@ -8677,7 -8524,7 +8677,7 @@@ void bnx2x_chip_cleanup(struct bnx2x *b
        }
  
        /* Give HW time to discard old tx messages */
 -      usleep_range(1000, 1000);
 +      usleep_range(1000, 2000);
  
        /* Clean all ETH MACs */
        rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
  
        netif_addr_unlock_bh(bp->dev);
  
 +      bnx2x_iov_chip_cleanup(bp);
  
  
        /*
@@@ -8843,7 -8689,7 +8843,7 @@@ static void bnx2x_set_234_gates(struct 
                       (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
                       (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
        } else {
-               /* Prevent incomming interrupts in IGU */
+               /* Prevent incoming interrupts in IGU */
                val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
  
                REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
@@@ -9101,7 -8947,7 +9101,7 @@@ static int bnx2x_er_poll_igu_vq(struct 
                if (pend_bits == 0)
                        break;
  
 -              usleep_range(1000, 1000);
 +              usleep_range(1000, 2000);
        } while (cnt-- > 0);
  
        if (cnt <= 0) {
@@@ -9118,7 -8964,8 +9118,7 @@@ static int bnx2x_process_kill(struct bn
        int cnt = 1000;
        u32 val = 0;
        u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
 -              u32 tags_63_32 = 0;
 -
 +      u32 tags_63_32 = 0;
  
        /* Empty the Tetris buffer, wait for 1s */
        do {
                    (pgl_exp_rom2 == 0xffffffff) &&
                    (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
                        break;
 -              usleep_range(1000, 1000);
 +              usleep_range(1000, 2000);
        } while (cnt-- > 0);
  
        if (cnt <= 0) {
        /* Wait for 1ms to empty GLUE and PCI-E core queues,
         * PSWHST, GRC and PSWRD Tetris buffer.
         */
 -      usleep_range(1000, 1000);
 +      usleep_range(1000, 2000);
  
        /* Prepare to chip reset: */
        /* MCP */
@@@ -9452,10 -9299,8 +9452,10 @@@ static void bnx2x_sp_rtnl_task(struct w
  
        rtnl_lock();
  
 -      if (!netif_running(bp->dev))
 -              goto sp_rtnl_exit;
 +      if (!netif_running(bp->dev)) {
 +              rtnl_unlock();
 +              return;
 +      }
  
        /* if stop on error is defined no recovery flows should be executed */
  #ifdef BNX2X_STOP_ON_ERROR
  
                bnx2x_parity_recover(bp);
  
 -              goto sp_rtnl_exit;
 +              rtnl_unlock();
 +              return;
        }
  
        if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                bnx2x_nic_load(bp, LOAD_NORMAL);
  
 -              goto sp_rtnl_exit;
 +              rtnl_unlock();
 +              return;
        }
  #ifdef BNX2X_STOP_ON_ERROR
  sp_rtnl_not_reset:
                DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
                netif_device_detach(bp->dev);
                bnx2x_close(bp->dev);
 +              rtnl_unlock();
 +              return;
 +      }
 +
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
 +              DP(BNX2X_MSG_SP,
 +                 "sending set mcast vf pf channel message from rtnl sp-task\n");
 +              bnx2x_vfpf_set_mcast(bp->dev);
 +      }
 +
 +      if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
 +                             &bp->sp_rtnl_state)) {
 +              DP(BNX2X_MSG_SP,
 +                 "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
 +              bnx2x_vfpf_storm_rx_mode(bp);
        }
  
 -sp_rtnl_exit:
 +      /* work which needs rtnl lock not-taken (as it takes the lock itself and
 +       * can be called from other contexts as well)
 +       */
        rtnl_unlock();
 -}
  
 -/* end of nic load/unload */
 +      /* enable SR-IOV if applicable */
 +      if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
 +                                             &bp->sp_rtnl_state))
 +              bnx2x_enable_sriov(bp);
 +}
  
  static void bnx2x_period_task(struct work_struct *work)
  {
@@@ -9571,13 -9394,43 +9571,13 @@@ period_task_exit
   * Init service functions
   */
  
 -static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
 +u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
  {
        u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
        u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
        return base + (BP_ABS_FUNC(bp)) * stride;
  }
  
 -static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
 -{
 -      u32 reg = bnx2x_get_pretend_reg(bp);
 -
 -      /* Flush all outstanding writes */
 -      mmiowb();
 -
 -      /* Pretend to be function 0 */
 -      REG_WR(bp, reg, 0);
 -      REG_RD(bp, reg);        /* Flush the GRC transaction (in the chip) */
 -
 -      /* From now we are in the "like-E1" mode */
 -      bnx2x_int_disable(bp);
 -
 -      /* Flush all outstanding writes */
 -      mmiowb();
 -
 -      /* Restore the original function */
 -      REG_WR(bp, reg, BP_ABS_FUNC(bp));
 -      REG_RD(bp, reg);
 -}
 -
 -static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
 -{
 -      if (CHIP_IS_E1(bp))
 -              bnx2x_int_disable(bp);
 -      else
 -              bnx2x_undi_int_disable_e1h(bp);
 -}
 -
  static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
                                        struct bnx2x_mac_vals *vals)
  {
@@@ -9805,13 -9658,11 +9805,13 @@@ static int bnx2x_prev_unload_uncommon(s
        if (bnx2x_prev_is_path_marked(bp))
                return bnx2x_prev_mcp_done(bp);
  
 +      BNX2X_DEV_INFO("Path is unmarked\n");
 +
        /* If function has FLR capabilities, and existing FW version matches
         * the one required, then FLR will be sufficient to clean any residue
         * left by previous driver
         */
 -      rc = bnx2x_test_firmware_version(bp, false);
 +      rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
  
        if (!rc) {
                /* fw version is good */
@@@ -9867,6 -9718,7 +9867,6 @@@ static int bnx2x_prev_unload_common(str
                /* Check if the UNDI driver was previously loaded
                 * UNDI driver initializes CID offset for normal bell to 0x7
                 */
 -              reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
                if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
                        tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
                        if (tmp_reg == 0x7) {
                                prev_undi = true;
                                /* clear the UNDI indication */
                                REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
 +                              /* clear possible idle check errors */
 +                              REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
                        }
                }
                /* wait until BRB is empty */
@@@ -9942,8 -9792,7 +9942,8 @@@ static void bnx2x_prev_interrupted_dmae
        if (!CHIP_IS_E1x(bp)) {
                u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
                if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
 -                      BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
 +                      DP(BNX2X_MSG_SP,
 +                         "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
                        REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
                               1 << BP_FUNC(bp));
                }
@@@ -9985,6 -9834,7 +9985,6 @@@ static int bnx2x_prev_unload(struct bnx
                REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
        }
  
 -
        do {
                /* Lock MCP using an unload request */
                fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
@@@ -10551,10 -10401,10 +10551,10 @@@ static void bnx2x_link_settings_request
  
  static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
  {
 -      mac_hi = cpu_to_be16(mac_hi);
 -      mac_lo = cpu_to_be32(mac_lo);
 -      memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
 -      memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
 +      __be16 mac_hi_be = cpu_to_be16(mac_hi);
 +      __be32 mac_lo_be = cpu_to_be32(mac_lo);
 +      memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
 +      memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
  }
  
  static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
        bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
                   (config & PORT_FEATURE_WOL_ENABLED));
  
 +      if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
 +          PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
 +              bp->flags |= NO_ISCSI_FLAG;
 +      if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
 +          PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
 +              bp->flags |= NO_FCOE_FLAG;
 +
        BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
                       bp->link_params.lane_config,
                       bp->link_params.speed_cap_mask[0],
@@@ -10704,21 -10547,21 +10704,21 @@@ static void bnx2x_get_fcoe_info(struct 
                /* Port info */
                bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
                        SHMEM_RD(bp,
 -                              dev_info.port_hw_config[port].
 +                               dev_info.port_hw_config[port].
                                 fcoe_wwn_port_name_upper);
                bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
                        SHMEM_RD(bp,
 -                              dev_info.port_hw_config[port].
 +                               dev_info.port_hw_config[port].
                                 fcoe_wwn_port_name_lower);
  
                /* Node info */
                bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
                        SHMEM_RD(bp,
 -                              dev_info.port_hw_config[port].
 +                               dev_info.port_hw_config[port].
                                 fcoe_wwn_node_name_upper);
                bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
                        SHMEM_RD(bp,
 -                              dev_info.port_hw_config[port].
 +                               dev_info.port_hw_config[port].
                                 fcoe_wwn_node_name_lower);
        } else if (!IS_MF_SD(bp)) {
                /*
@@@ -10816,7 -10659,7 +10816,7 @@@ static void bnx2x_get_cnic_mac_hwinfo(s
                        /* Zero primary MAC configuration */
                        memset(bp->dev->dev_addr, 0, ETH_ALEN);
  
 -              if (IS_MF_FCOE_AFEX(bp))
 +              if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
                        /* use FIP MAC as primary MAC */
                        memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
  
@@@ -10879,6 -10722,7 +10879,6 @@@ static void bnx2x_get_mac_hwinfo(struc
        }
  
        memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 -      memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  
        if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
                dev_err(&bp->pdev->dev,
@@@ -10943,7 -10787,7 +10943,7 @@@ static int bnx2x_get_hwinfo(struct bnx2
  
                        while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
                                tout--;
 -                              usleep_range(1000, 1000);
 +                              usleep_range(1000, 2000);
                        }
  
                        if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
@@@ -11281,13 -11125,9 +11281,13 @@@ static int bnx2x_init_bp(struct bnx2x *
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
        INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
 -      rc = bnx2x_get_hwinfo(bp);
 -      if (rc)
 -              return rc;
 +      if (IS_PF(bp)) {
 +              rc = bnx2x_get_hwinfo(bp);
 +              if (rc)
 +                      return rc;
 +      } else {
 +              random_ether_addr(bp->dev->dev_addr);
 +      }
  
        bnx2x_set_modes_bitmap(bp);
  
        func = BP_FUNC(bp);
  
        /* need to reset chip if undi was active */
 -      if (!BP_NOMCP(bp)) {
 +      if (IS_PF(bp) && !BP_NOMCP(bp)) {
                /* init fw_seq */
                bp->fw_seq =
                        SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
        bp->mrrs = mrrs;
  
        bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
 +      if (IS_VF(bp))
 +              bp->rx_ring_size = MAX_RX_AVAIL;
  
        /* make sure that the numbers are in the right granularity */
        bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
                bp->cnic_base_cl_id = FP_SB_MAX_E2;
  
        /* multiple tx priority */
 -      if (CHIP_IS_E1x(bp))
 +      if (IS_VF(bp))
 +              bp->max_cos = 1;
 +      else if (CHIP_IS_E1x(bp))
                bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
 -      if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
 +      else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
                bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
 -      if (CHIP_IS_E3B0(bp))
 +      else if (CHIP_IS_E3B0(bp))
                bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
 +      else
 +              BNX2X_ERR("unknown chip %x revision %x\n",
 +                        CHIP_NUM(bp), CHIP_REV(bp));
 +      BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
  
        /* We need at least one default status block for slow-path events,
         * second status block for the L2 queue, and a third status block for
   * net_device service functions
   */
  
 +static int bnx2x_open_epilog(struct bnx2x *bp)
 +{
 +      /* Enable sriov via delayed work. This must be done via delayed work
 +       * because it causes the probe of the vf devices to be run, which invoke
 +       * register_netdevice which must have rtnl lock taken. As we are holding
 +       * the lock right now, that could only work if the probe would not take
 +       * the lock. However, as the probe of the vf may be called from other
 +       * contexts as well (such as passthrough to vm failes) it can't assume
 +       * the lock is being held for it. Using delayed work here allows the
 +       * probe code to simply take the lock (i.e. wait for it to be released
 +       * if it is being held).
 +       */
 +      smp_mb__before_clear_bit();
 +      set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
 +      smp_mb__after_clear_bit();
 +      schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +
 +      return 0;
 +}
 +
  /* called with rtnl_lock */
  static int bnx2x_open(struct net_device *dev)
  {
        bool global = false;
        int other_engine = BP_PATH(bp) ? 0 : 1;
        bool other_load_status, load_status;
 +      int rc;
  
        bp->stats_init = true;
  
  
        bnx2x_set_power_state(bp, PCI_D0);
  
 -      other_load_status = bnx2x_get_load_status(bp, other_engine);
 -      load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
 -
 -      /*
 -       * If parity had happen during the unload, then attentions
 +      /* If parity had happen during the unload, then attentions
         * and/or RECOVERY_IN_PROGRES may still be set. In this case we
         * want the first function loaded on the current engine to
         * complete the recovery.
 +       * Parity recovery is only relevant for PF driver.
         */
 -      if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
 -          bnx2x_chk_parity_attn(bp, &global, true))
 -              do {
 -                      /*
 -                       * If there are attentions and they are in a global
 -                       * blocks, set the GLOBAL_RESET bit regardless whether
 -                       * it will be this function that will complete the
 -                       * recovery or not.
 -                       */
 -                      if (global)
 -                              bnx2x_set_reset_global(bp);
 +      if (IS_PF(bp)) {
 +              other_load_status = bnx2x_get_load_status(bp, other_engine);
 +              load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
 +              if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
 +                  bnx2x_chk_parity_attn(bp, &global, true)) {
 +                      do {
 +                              /* If there are attentions and they are in a
 +                               * global blocks, set the GLOBAL_RESET bit
 +                               * regardless whether it will be this function
 +                               * that will complete the recovery or not.
 +                               */
 +                              if (global)
 +                                      bnx2x_set_reset_global(bp);
  
 -                      /*
 -                       * Only the first function on the current engine should
 -                       * try to recover in open. In case of attentions in
 -                       * global blocks only the first in the chip should try
 -                       * to recover.
 -                       */
 -                      if ((!load_status &&
 -                           (!global || !other_load_status)) &&
 -                          bnx2x_trylock_leader_lock(bp) &&
 -                          !bnx2x_leader_reset(bp)) {
 -                              netdev_info(bp->dev, "Recovered in open\n");
 -                              break;
 -                      }
 +                              /* Only the first function on the current
 +                               * engine should try to recover in open. In case
 +                               * of attentions in global blocks only the first
 +                               * in the chip should try to recover.
 +                               */
 +                              if ((!load_status &&
 +                                   (!global || !other_load_status)) &&
 +                                    bnx2x_trylock_leader_lock(bp) &&
 +                                    !bnx2x_leader_reset(bp)) {
 +                                      netdev_info(bp->dev,
 +                                                  "Recovered in open\n");
 +                                      break;
 +                              }
  
 -                      /* recovery has failed... */
 -                      bnx2x_set_power_state(bp, PCI_D3hot);
 -                      bp->recovery_state = BNX2X_RECOVERY_FAILED;
 +                              /* recovery has failed... */
 +                              bnx2x_set_power_state(bp, PCI_D3hot);
 +                              bp->recovery_state = BNX2X_RECOVERY_FAILED;
  
 -                      BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
 -                                "If you still see this message after a few retries then power cycle is required.\n");
 +                              BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
 +                                        "If you still see this message after a few retries then power cycle is required.\n");
  
 -                      return -EAGAIN;
 -              } while (0);
 +                              return -EAGAIN;
 +                      } while (0);
 +              }
 +      }
  
        bp->recovery_state = BNX2X_RECOVERY_DONE;
 -      return bnx2x_nic_load(bp, LOAD_OPEN);
 +      rc = bnx2x_nic_load(bp, LOAD_OPEN);
 +      if (rc)
 +              return rc;
 +      return bnx2x_open_epilog(bp);
  }
  
  /* called with rtnl_lock */
@@@ -11621,6 -11428,7 +11621,6 @@@ static int bnx2x_set_mc_list(struct bnx
        return rc;
  }
  
 -
  /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
  void bnx2x_set_rx_mode(struct net_device *dev)
  {
                  CHIP_IS_E1(bp)))
                rx_mode = BNX2X_RX_MODE_ALLMULTI;
        else {
 -              /* some multicasts */
 -              if (bnx2x_set_mc_list(bp) < 0)
 -                      rx_mode = BNX2X_RX_MODE_ALLMULTI;
 +              if (IS_PF(bp)) {
 +                      /* some multicasts */
 +                      if (bnx2x_set_mc_list(bp) < 0)
 +                              rx_mode = BNX2X_RX_MODE_ALLMULTI;
  
 -              if (bnx2x_set_uc_list(bp) < 0)
 -                      rx_mode = BNX2X_RX_MODE_PROMISC;
 +                      if (bnx2x_set_uc_list(bp) < 0)
 +                              rx_mode = BNX2X_RX_MODE_PROMISC;
 +              } else {
 +                      /* configuring mcast to a vf involves sleeping (when we
 +                       * wait for the pf's response). Since this function is
 +                       * called from non sleepable context we must schedule
 +                       * a work item for this purpose
 +                       */
 +                      smp_mb__before_clear_bit();
 +                      set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
 +                              &bp->sp_rtnl_state);
 +                      smp_mb__after_clear_bit();
 +                      schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +              }
        }
  
        bp->rx_mode = rx_mode;
                return;
        }
  
 -      bnx2x_set_storm_rx_mode(bp);
 +      if (IS_PF(bp)) {
 +              bnx2x_set_storm_rx_mode(bp);
 +      } else {
 +              /* configuring rx mode to storms in a vf involves sleeping (when
 +               * we wait for the pf's response). Since this function is
 +               * called from non sleepable context we must schedule
 +               * a work item for this purpose
 +               */
 +              smp_mb__before_clear_bit();
 +              set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
 +                      &bp->sp_rtnl_state);
 +              smp_mb__after_clear_bit();
 +              schedule_delayed_work(&bp->sp_rtnl_task, 0);
 +      }
  }
  
  /* called with rtnl_lock */
@@@ -11789,9 -11571,7 +11789,9 @@@ static const struct net_device_ops bnx2
        .ndo_poll_controller    = poll_bnx2x,
  #endif
        .ndo_setup_tc           = bnx2x_setup_tc,
 -
 +#ifdef CONFIG_BNX2X_SRIOV
 +      .ndo_set_vf_mac         = bnx2x_set_vf_mac,
 +#endif
  #ifdef NETDEV_FCOE_WWNN
        .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
  #endif
@@@ -11815,9 -11595,10 +11815,9 @@@ static int bnx2x_set_coherency_mask(str
        return 0;
  }
  
 -static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
 -                        unsigned long board_type)
 +static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 +                        struct net_device *dev, unsigned long board_type)
  {
 -      struct bnx2x *bp;
        int rc;
        u32 pci_cfg_dword;
        bool chip_is_e1x = (board_type == BCM57710 ||
                            board_type == BCM57711E);
  
        SET_NETDEV_DEV(dev, &pdev->dev);
 -      bp = netdev_priv(dev);
  
        bp->dev = dev;
        bp->pdev = pdev;
 -      bp->flags = 0;
  
        rc = pci_enable_device(pdev);
        if (rc) {
                goto err_out_disable;
        }
  
 -      if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 -              dev_err(&bp->pdev->dev, "Cannot find second PCI device"
 -                     " base address, aborting\n");
 +      if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
 +              dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
                rc = -ENODEV;
                goto err_out_disable;
        }
                pci_save_state(pdev);
        }
  
 -      bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 -      if (bp->pm_cap == 0) {
 -              dev_err(&bp->pdev->dev,
 -                      "Cannot find power management capability, aborting\n");
 -              rc = -EIO;
 -              goto err_out_release;
 +      if (IS_PF(bp)) {
 +              bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
 +              if (bp->pm_cap == 0) {
 +                      dev_err(&bp->pdev->dev,
 +                              "Cannot find power management capability, aborting\n");
 +                      rc = -EIO;
 +                      goto err_out_release;
 +              }
        }
  
        if (!pci_is_pcie(pdev)) {
         * support Physical Device Assignment where kernel BDF maybe arbitrary
         * (depending on hypervisor).
         */
 -      if (chip_is_e1x)
 +      if (chip_is_e1x) {
                bp->pf_num = PCI_FUNC(pdev->devfn);
 -      else {/* chip is E2/3*/
 +      } else {
 +              /* chip is E2/3*/
                pci_read_config_dword(bp->pdev,
                                      PCICFG_ME_REGISTER, &pci_cfg_dword);
                bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
 -                  ME_REG_ABS_PF_NUM_SHIFT);
 +                                ME_REG_ABS_PF_NUM_SHIFT);
        }
        BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
  
         * Clean the following indirect addresses for all functions since it
         * is not used by the driver.
         */
 -      REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
 -      REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
 -      REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
 -      REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
 +      if (IS_PF(bp)) {
 +              REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
 +              REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
 +              REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
 +              REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
 +
 +              if (chip_is_e1x) {
 +                      REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
 +                      REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
 +                      REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
 +                      REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
 +              }
  
 -      if (chip_is_e1x) {
 -              REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
 -              REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
 -              REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
 -              REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
 +              /* Enable internal target-read (in case we are probed after PF
 +               * FLR). Must be done prior to any BAR read access. Only for
 +               * 57712 and up
 +               */
 +              if (!chip_is_e1x)
 +                      REG_WR(bp,
 +                             PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
        }
  
 -      /*
 -       * Enable internal target-read (in case we are probed after PF FLR).
 -       * Must be done prior to any BAR read access. Only for 57712 and up
 -       */
 -      if (!chip_is_e1x)
 -              REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 -
        dev->watchdog_timeo = TX_TIMEOUT;
  
        dev->netdev_ops = &bnx2x_netdev_ops;
@@@ -12000,9 -11778,8 +12000,9 @@@ err_out
  
  static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
  {
 -      u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
 +      u32 val = 0;
  
 +      pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
        *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
  
        /* return value of 1=2.5GHz 2=5GHz */
@@@ -12015,7 -11792,7 +12015,7 @@@ static int bnx2x_check_firmware(struct 
        struct bnx2x_fw_file_hdr *fw_hdr;
        struct bnx2x_fw_file_section *sections;
        u32 offset, len, num_ops;
 -      u16 *ops_offsets;
 +      __be16 *ops_offsets;
        int i;
        const u8 *fw_ver;
  
  
        /* Likewise for the init_ops offsets */
        offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
 -      ops_offsets = (u16 *)(firmware->data + offset);
 +      ops_offsets = (__force __be16 *)(firmware->data + offset);
        num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
  
        for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
@@@ -12267,12 -12044,8 +12267,12 @@@ static int bnx2x_set_qm_cid_count(struc
  {
        int cid_count = BNX2X_L2_MAX_CID(bp);
  
 +      if (IS_SRIOV(bp))
 +              cid_count += BNX2X_VF_CIDS;
 +
        if (CNIC_SUPPORT(bp))
                cid_count += CNIC_CID_MAX;
 +
        return roundup(cid_count, QM_CID_ROUND);
  }
  
   *
   */
  static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
 -                                   int cnic_cnt)
 +                                   int cnic_cnt, bool is_vf)
  {
 -      int pos;
 -      u16 control;
 +      int pos, index;
 +      u16 control = 0;
  
        pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
  
         * If MSI-X is not supported - return number of SBs needed to support
         * one fast path queue: one FP queue + SB for CNIC
         */
 -      if (!pos)
 +      if (!pos) {
 +              dev_info(&pdev->dev, "no msix capability found\n");
                return 1 + cnic_cnt;
 +      }
 +      dev_info(&pdev->dev, "msix capability found\n");
  
        /*
         * The value in the PCI configuration space is the index of the last
         * entry, namely one less than the actual size of the table, which is
         * exactly what we want to return from this function: number of all SBs
         * without the default SB.
 +       * For VFs there is no default SB, then we return (index+1).
         */
        pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
 -      return control & PCI_MSIX_FLAGS_QSIZE;
 -}
  
 -struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
 +      index = control & PCI_MSIX_FLAGS_QSIZE;
  
 -static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 -{
 -      struct net_device *dev = NULL;
 -      struct bnx2x *bp;
 -      int pcie_width, pcie_speed;
 -      int rc, max_non_def_sbs;
 -      int rx_count, tx_count, rss_count, doorbell_size;
 -      int cnic_cnt;
 -      /*
 -       * An estimated maximum supported CoS number according to the chip
 -       * version.
 -       * We will try to roughly estimate the maximum number of CoSes this chip
 -       * may support in order to minimize the memory allocated for Tx
 -       * netdev_queue's. This number will be accurately calculated during the
 -       * initialization of bp->max_cos based on the chip versions AND chip
 -       * revision in the bnx2x_init_bp().
 -       */
 -      u8 max_cos_est = 0;
 +      return is_vf ? index + 1 : index;
 +}
  
 -      switch (ent->driver_data) {
 +static int set_max_cos_est(int chip_id)
 +{
 +      switch (chip_id) {
        case BCM57710:
        case BCM57711:
        case BCM57711E:
 -              max_cos_est = BNX2X_MULTI_TX_COS_E1X;
 -              break;
 -
 +              return BNX2X_MULTI_TX_COS_E1X;
        case BCM57712:
        case BCM57712_MF:
 -              max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
 -              break;
 -
 +      case BCM57712_VF:
 +              return BNX2X_MULTI_TX_COS_E2_E3A0;
        case BCM57800:
        case BCM57800_MF:
 +      case BCM57800_VF:
        case BCM57810:
        case BCM57810_MF:
 -      case BCM57840_O:
        case BCM57840_4_10:
        case BCM57840_2_20:
 +      case BCM57840_O:
        case BCM57840_MFO:
 +      case BCM57810_VF:
        case BCM57840_MF:
 +      case BCM57840_VF:
        case BCM57811:
        case BCM57811_MF:
 -              max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
 -              break;
 -
 +      case BCM57811_VF:
 +              return BNX2X_MULTI_TX_COS_E3B0;
 +              return 1;
        default:
 -              pr_err("Unknown board_type (%ld), aborting\n",
 -                         ent->driver_data);
 +              pr_err("Unknown board_type (%d), aborting\n", chip_id);
                return -ENODEV;
        }
 +}
  
 -      cnic_cnt = 1;
 -      max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
 +static int set_is_vf(int chip_id)
 +{
 +      switch (chip_id) {
 +      case BCM57712_VF:
 +      case BCM57800_VF:
 +      case BCM57810_VF:
 +      case BCM57840_VF:
 +      case BCM57811_VF:
 +              return true;
 +      default:
 +              return false;
 +      }
 +}
  
 -      WARN_ON(!max_non_def_sbs);
 +struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
 +
 +static int bnx2x_init_one(struct pci_dev *pdev,
 +                                  const struct pci_device_id *ent)
 +{
 +      struct net_device *dev = NULL;
 +      struct bnx2x *bp;
 +      int pcie_width, pcie_speed;
 +      int rc, max_non_def_sbs;
 +      int rx_count, tx_count, rss_count, doorbell_size;
 +      int max_cos_est;
 +      bool is_vf;
 +      int cnic_cnt;
 +
 +      /* An estimated maximum supported CoS number according to the chip
 +       * version.
 +       * We will try to roughly estimate the maximum number of CoSes this chip
 +       * may support in order to minimize the memory allocated for Tx
 +       * netdev_queue's. This number will be accurately calculated during the
 +       * initialization of bp->max_cos based on the chip versions AND chip
 +       * revision in the bnx2x_init_bp().
 +       */
 +      max_cos_est = set_max_cos_est(ent->driver_data);
 +      if (max_cos_est < 0)
 +              return max_cos_est;
 +      is_vf = set_is_vf(ent->driver_data);
 +      cnic_cnt = is_vf ? 0 : 1;
 +
 +      max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
  
        /* Maximum number of RSS queues: one IGU SB goes to CNIC */
 -      rss_count = max_non_def_sbs - cnic_cnt;
 +      rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
 +
 +      if (rss_count < 1)
 +              return -EINVAL;
  
        /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
        rx_count = rss_count + cnic_cnt;
  
 -      /*
 -       * Maximum number of netdev Tx queues:
 +      /* Maximum number of netdev Tx queues:
         * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
         */
        tx_count = rss_count * max_cos_est + cnic_cnt;
  
        bp = netdev_priv(dev);
  
 +      bp->flags = 0;
 +      if (is_vf)
 +              bp->flags |= IS_VF_FLAG;
 +
        bp->igu_sb_cnt = max_non_def_sbs;
 +      bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
        bp->msg_enable = debug;
        bp->cnic_support = cnic_cnt;
        bp->cnic_probe = bnx2x_cnic_probe;
  
        pci_set_drvdata(pdev, dev);
  
 -      rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
 +      rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
        if (rc < 0) {
                free_netdev(dev);
                return rc;
        }
  
 +      BNX2X_DEV_INFO("This is a %s function\n",
 +                     IS_PF(bp) ? "physical" : "virtual");
        BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
 -      BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
 -
 +      BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
        BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
 -                        tx_count, rx_count);
 +                     tx_count, rx_count);
  
        rc = bnx2x_init_bp(bp);
        if (rc)
                goto init_one_exit;
  
 -      /*
 -       * Map doorbels here as we need the real value of bp->max_cos which
 -       * is initialized in bnx2x_init_bp().
 +      /* Map doorbells here as we need the real value of bp->max_cos which
 +       * is initialized in bnx2x_init_bp() to determine the number of
 +       * l2 connections.
         */
 -      doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
 -      if (doorbell_size > pci_resource_len(pdev, 2)) {
 -              dev_err(&bp->pdev->dev,
 -                      "Cannot map doorbells, bar size too small, aborting\n");
 -              rc = -ENOMEM;
 -              goto init_one_exit;
 +      if (IS_VF(bp)) {
 +              bnx2x_vf_map_doorbells(bp);
 +              rc = bnx2x_vf_pci_alloc(bp);
 +              if (rc)
 +                      goto init_one_exit;
 +      } else {
 +              doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
 +              if (doorbell_size > pci_resource_len(pdev, 2)) {
 +                      dev_err(&bp->pdev->dev,
 +                              "Cannot map doorbells, bar size too small, aborting\n");
 +                      rc = -ENOMEM;
 +                      goto init_one_exit;
 +              }
 +              bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
 +                                              doorbell_size);
        }
 -      bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
 -                                      doorbell_size);
        if (!bp->doorbells) {
                dev_err(&bp->pdev->dev,
                        "Cannot map doorbell space, aborting\n");
                goto init_one_exit;
        }
  
 +      if (IS_VF(bp)) {
 +              rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
 +              if (rc)
 +                      goto init_one_exit;
 +      }
 +
 +      /* Enable SRIOV if capability found in configuration space.
 +       * Once the generic SR-IOV framework makes it in from the
 +       * pci tree this will be revised, to allow dynamic control
 +       * over the number of VFs. Right now, change the num of vfs
 +       * param below to enable SR-IOV.
 +       */
 +      rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
 +      if (rc)
 +              goto init_one_exit;
 +
        /* calc qm_cid_count */
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
 +      BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
  
        /* disable FCOE L2 queue for E1x*/
        if (CHIP_IS_E1x(bp))
        /* Configure interrupt mode: try to enable MSI-X/MSI if
         * needed.
         */
 -      bnx2x_set_int_mode(bp);
 +      rc = bnx2x_set_int_mode(bp);
 +      if (rc) {
 +              dev_err(&pdev->dev, "Cannot set interrupts\n");
 +              goto init_one_exit;
 +      }
 +      BNX2X_DEV_INFO("set interrupts successfully\n");
  
 +      /* register the net device */
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
                goto init_one_exit;
        }
 +      BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
  
  
        if (!NO_FCOE(bp)) {
        }
  
        bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 +      BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
 +                     pcie_width, pcie_speed);
  
        BNX2X_DEV_INFO(
                "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
@@@ -12552,7 -12257,7 +12552,7 @@@ init_one_exit
        if (bp->regview)
                iounmap(bp->regview);
  
 -      if (bp->doorbells)
 +      if (IS_PF(bp) && bp->doorbells)
                iounmap(bp->doorbells);
  
        free_netdev(dev);
@@@ -12592,37 -12297,25 +12592,37 @@@ static void bnx2x_remove_one(struct pci
        unregister_netdev(dev);
  
        /* Power on: we can't let PCI layer write to us while we are in D3 */
 -      bnx2x_set_power_state(bp, PCI_D0);
 +      if (IS_PF(bp))
 +              bnx2x_set_power_state(bp, PCI_D0);
  
        /* Disable MSI/MSI-X */
        bnx2x_disable_msi(bp);
  
        /* Power off */
 -      bnx2x_set_power_state(bp, PCI_D3hot);
 +      if (IS_PF(bp))
 +              bnx2x_set_power_state(bp, PCI_D3hot);
  
        /* Make sure RESET task is not scheduled before continuing */
        cancel_delayed_work_sync(&bp->sp_rtnl_task);
  
 +      bnx2x_iov_remove_one(bp);
 +
 +      /* send message via vfpf channel to release the resources of this vf */
 +      if (IS_VF(bp))
 +              bnx2x_vfpf_release(bp);
 +
        if (bp->regview)
                iounmap(bp->regview);
  
 -      if (bp->doorbells)
 -              iounmap(bp->doorbells);
 -
 -      bnx2x_release_firmware(bp);
 +      /* for vf doorbells are part of the regview and were unmapped along with
 +       * it. FW is only loaded by PF.
 +       */
 +      if (IS_PF(bp)) {
 +              if (bp->doorbells)
 +                      iounmap(bp->doorbells);
  
 +              bnx2x_release_firmware(bp);
 +      }
        bnx2x_free_mem_bp(bp);
  
        free_netdev(dev);
@@@ -13410,36 -13103,4 +13410,36 @@@ struct cnic_eth_dev *bnx2x_cnic_probe(s
        return cp;
  }
  
 +u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u32 offset = BAR_USTRORM_INTMEM;
 +
 +      if (IS_VF(bp))
 +              return bnx2x_vf_ustorm_prods_offset(bp, fp);
 +      else if (!CHIP_IS_E1x(bp))
 +              offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
 +      else
 +              offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
  
 +      return offset;
 +}
 +
 +/* called only on E1H or E2.
 + * When pretending to be PF, the pretend value is the function number 0...7
 + * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
 + * combination
 + */
 +int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
 +{
 +      u32 pretend_reg;
 +
 +      if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
 +              return -1;
 +
 +      /* get my own pretend register */
 +      pretend_reg = bnx2x_get_pretend_reg(bp);
 +      REG_WR(bp, pretend_reg, pretend_func_val);
 +      REG_RD(bp, pretend_reg);
 +      return 0;
 +}
@@@ -64,7 -64,7 +64,7 @@@ static const char mlx4_en_version[] 
  
  /* Enable RSS UDP traffic */
  MLX4_EN_PARM_INT(udp_rss, 1,
-                "Enable RSS for incomming UDP traffic or disabled (0)");
+                "Enable RSS for incoming UDP traffic or disabled (0)");
  
  /* Priority pausing */
  MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
@@@ -95,28 -95,6 +95,28 @@@ int en_print(const char *level, const s
        return i;
  }
  
 +void mlx4_en_update_loopback_state(struct net_device *dev,
 +                                 netdev_features_t features)
 +{
 +      struct mlx4_en_priv *priv = netdev_priv(dev);
 +
 +      priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
 +                      MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
 +
 +      /* Drop the packet if SRIOV is not enabled
 +       * and not performing the selftest or flb disabled
 +       */
 +      if (mlx4_is_mfunc(priv->mdev->dev) &&
 +          !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
 +              priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
 +
 +      /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
 +       * is requested
 +       */
 +      if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
 +              priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
 +}
 +
  static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
  {
        struct mlx4_en_profile *params = &mdev->profile;
@@@ -213,8 -191,10 +213,8 @@@ static void *mlx4_en_add(struct mlx4_de
  
        printk_once(KERN_INFO "%s", mlx4_en_version);
  
 -      mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
 +      mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
        if (!mdev) {
 -              dev_err(&dev->pdev->dev, "Device struct alloc failed, "
 -                      "aborting.\n");
                err = -ENOMEM;
                goto err_free_res;
        }
@@@ -1,25 -1,24 +1,25 @@@
  /*
   * QLogic qlcnic NIC Driver
 - * Copyright (c)  2009-2010 QLogic Corporation
 + * Copyright (c) 2009-2013 QLogic Corporation
   *
   * See LICENSE.qlcnic for copyright and licensing details.
   */
  
 -#include <linux/slab.h>
  #include <linux/vmalloc.h>
  #include <linux/interrupt.h>
  
  #include "qlcnic.h"
 +#include "qlcnic_hw.h"
  
  #include <linux/swab.h>
  #include <linux/dma-mapping.h>
 +#include <linux/if_vlan.h>
  #include <net/ip.h>
  #include <linux/ipv6.h>
  #include <linux/inetdevice.h>
 -#include <linux/sysfs.h>
  #include <linux/aer.h>
  #include <linux/log2.h>
 +#include <linux/pci.h>
  
  MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
  MODULE_LICENSE("GPL");
@@@ -30,28 -29,28 +30,28 @@@ char qlcnic_driver_name[] = "qlcnic"
  static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
        "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
  
 -static struct workqueue_struct *qlcnic_wq;
  static int qlcnic_mac_learn;
  module_param(qlcnic_mac_learn, int, 0444);
 -MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
 +MODULE_PARM_DESC(qlcnic_mac_learn,
 +               "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
  
 -static int qlcnic_use_msi = 1;
 +int qlcnic_use_msi = 1;
  MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
  module_param_named(use_msi, qlcnic_use_msi, int, 0444);
  
 -static int qlcnic_use_msi_x = 1;
 +int qlcnic_use_msi_x = 1;
  MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
  module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
  
 -static int qlcnic_auto_fw_reset = 1;
 +int qlcnic_auto_fw_reset = 1;
  MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
  module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
  
 -static int qlcnic_load_fw_file;
 +int qlcnic_load_fw_file;
  MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
  module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
  
 -static int qlcnic_config_npars;
 +int qlcnic_config_npars;
  module_param(qlcnic_config_npars, int, 0444);
  MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
  
@@@ -63,6 -62,9 +63,6 @@@ static void qlcnic_tx_timeout(struct ne
  static void qlcnic_attach_work(struct work_struct *work);
  static void qlcnic_fwinit_work(struct work_struct *work);
  static void qlcnic_fw_poll_work(struct work_struct *work);
 -static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 -              work_func_t func, int delay);
 -static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
  #ifdef CONFIG_NET_POLL_CONTROLLER
  static void qlcnic_poll_controller(struct net_device *netdev);
  #endif
@@@ -75,9 -77,9 +75,9 @@@ static irqreturn_t qlcnic_tmp_intr(int 
  static irqreturn_t qlcnic_intr(int irq, void *data);
  static irqreturn_t qlcnic_msi_intr(int irq, void *data);
  static irqreturn_t qlcnic_msix_intr(int irq, void *data);
 +static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
  
  static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
 -static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
  static int qlcnic_start_firmware(struct qlcnic_adapter *);
  
  static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
@@@ -91,24 -93,15 +91,24 @@@ static int qlcnic_vlan_rx_del(struct ne
  #define QLCNIC_IS_TSO_CAPABLE(adapter)        \
        ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
  
 +static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +
 +      if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
 +              return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
 +      else
 +              return 1;
 +}
 +
  /*  PCI Device ID Table  */
  #define ENTRY(device) \
        {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
        .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
  
 -#define PCI_DEVICE_ID_QLOGIC_QLE824X  0x8020
 -
  static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
        ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
 +      ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
        {0,}
  };
  
@@@ -127,32 -120,6 +127,32 @@@ static const u32 msi_tgt_status[8] = 
        ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
  };
  
 +static const u32 qlcnic_reg_tbl[] = {
 +      0x1B20A8,       /* PEG_HALT_STAT1 */
 +      0x1B20AC,       /* PEG_HALT_STAT2 */
 +      0x1B20B0,       /* FW_HEARTBEAT */
 +      0x1B2100,       /* LOCK ID */
 +      0x1B2128,       /* FW_CAPABILITIES */
 +      0x1B2138,       /* drv active */
 +      0x1B2140,       /* dev state */
 +      0x1B2144,       /* drv state */
 +      0x1B2148,       /* drv scratch */
 +      0x1B214C,       /* dev partition info */
 +      0x1B2174,       /* drv idc ver */
 +      0x1B2150,       /* fw version major */
 +      0x1B2154,       /* fw version minor */
 +      0x1B2158,       /* fw version sub */
 +      0x1B219C,       /* npar state */
 +      0x1B21FC,       /* FW_IMG_VALID */
 +      0x1B2250,       /* CMD_PEG_STATE */
 +      0x1B233C,       /* RCV_PEG_STATE */
 +      0x1B23B4,       /* ASIC TEMP */
 +      0x1B216C,       /* FW api */
 +      0x1B2170,       /* drv op mode */
 +      0x13C010,       /* flash lock */
 +      0x13C014,       /* flash unlock */
 +};
 +
  static const struct qlcnic_board_info qlcnic_boards[] = {
        {0x1077, 0x8020, 0x1077, 0x203,
         "8200 Series Single Port 10GbE Converged Network Adapter"
  };
  
  #define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
 +#define QLC_MAX_SDS_RINGS     8
  
  static const
  struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@@ -198,6 -164,35 +198,6 @@@ void qlcnic_free_sds_rings(struct qlcni
        recv_ctx->sds_rings = NULL;
  }
  
 -static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
 -{
 -      memset(&adapter->stats, 0, sizeof(adapter->stats));
 -}
 -
 -static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
 -{
 -      u32 control;
 -      int pos;
 -
 -      pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
 -      if (pos) {
 -              pci_read_config_dword(pdev, pos, &control);
 -              if (enable)
 -                      control |= PCI_MSIX_FLAGS_ENABLE;
 -              else
 -                      control = 0;
 -              pci_write_config_dword(pdev, pos, control);
 -      }
 -}
 -
 -static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
 -{
 -      int i;
 -
 -      for (i = 0; i < count; i++)
 -              adapter->msix_entries[i].entry = i;
 -}
 -
  static int
  qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
  {
                return -EIO;
  
        memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
 -      memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
        memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
  
        /* set station address */
  
 -      if (!is_valid_ether_addr(netdev->perm_addr))
 +      if (!is_valid_ether_addr(netdev->dev_addr))
                dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
                                        netdev->dev_addr);
  
@@@ -229,7 -225,7 +229,7 @@@ static int qlcnic_set_mac(struct net_de
                return -EOPNOTSUPP;
  
        if (!is_valid_ether_addr(addr->sa_data))
 -              return -EADDRNOTAVAIL;
 +              return -EINVAL;
  
        if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                netif_device_detach(netdev);
        return 0;
  }
  
 +static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
 +                      struct net_device *netdev, const unsigned char *addr)
 +{
 +      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +      int err = -EOPNOTSUPP;
 +
 +      if (!adapter->fdb_mac_learn) {
 +              pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
 +                      __func__);
 +              return err;
 +      }
 +
 +      if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
 +              if (is_unicast_ether_addr(addr))
 +                      err = qlcnic_nic_del_mac(adapter, addr);
 +              else if (is_multicast_ether_addr(addr))
 +                      err = dev_mc_del(netdev, addr);
 +              else
 +                      err =  -EINVAL;
 +      }
 +      return err;
 +}
 +
 +static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 +                      struct net_device *netdev,
 +                      const unsigned char *addr, u16 flags)
 +{
 +      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +      int err = 0;
 +
 +      if (!adapter->fdb_mac_learn) {
 +              pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
 +                      __func__);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
 +              pr_info("%s: FDB e-switch is not enabled\n", __func__);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      if (ether_addr_equal(addr, adapter->mac_addr))
 +              return err;
 +
 +      if (is_unicast_ether_addr(addr))
 +              err = qlcnic_nic_add_mac(adapter, addr);
 +      else if (is_multicast_ether_addr(addr))
 +              err = dev_mc_add_excl(netdev, addr);
 +      else
 +              err = -EINVAL;
 +
 +      return err;
 +}
 +
 +static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
 +                      struct net_device *netdev, int idx)
 +{
 +      struct qlcnic_adapter *adapter = netdev_priv(netdev);
 +
 +      if (!adapter->fdb_mac_learn) {
 +              pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
 +                      __func__);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
 +              idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
 +
 +      return idx;
 +}
 +
 +static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
 +{
 +      while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
 +              usleep_range(10000, 11000);
 +
 +      cancel_delayed_work_sync(&adapter->fw_work);
 +}
 +
  static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_open          = qlcnic_open,
        .ndo_stop          = qlcnic_close,
        .ndo_tx_timeout    = qlcnic_tx_timeout,
        .ndo_vlan_rx_add_vid    = qlcnic_vlan_rx_add,
        .ndo_vlan_rx_kill_vid   = qlcnic_vlan_rx_del,
 +      .ndo_fdb_add            = qlcnic_fdb_add,
 +      .ndo_fdb_del            = qlcnic_fdb_del,
 +      .ndo_fdb_dump           = qlcnic_fdb_dump,
  #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = qlcnic_poll_controller,
  #endif
@@@ -353,125 -267,50 +353,125 @@@ static const struct net_device_ops qlcn
  };
  
  static struct qlcnic_nic_template qlcnic_ops = {
 -      .config_bridged_mode = qlcnic_config_bridged_mode,
 -      .config_led = qlcnic_config_led,
 -      .start_firmware = qlcnic_start_firmware
 +      .config_bridged_mode    = qlcnic_config_bridged_mode,
 +      .config_led             = qlcnic_82xx_config_led,
 +      .start_firmware         = qlcnic_82xx_start_firmware,
 +      .request_reset          = qlcnic_82xx_dev_request_reset,
 +      .cancel_idc_work        = qlcnic_82xx_cancel_idc_work,
 +      .napi_add               = qlcnic_82xx_napi_add,
 +      .napi_del               = qlcnic_82xx_napi_del,
 +      .config_ipaddr          = qlcnic_82xx_config_ipaddr,
 +      .clear_legacy_intr      = qlcnic_82xx_clear_legacy_intr,
  };
  
 -static struct qlcnic_nic_template qlcnic_vf_ops = {
 -      .config_bridged_mode = qlcnicvf_config_bridged_mode,
 -      .config_led = qlcnicvf_config_led,
 -      .start_firmware = qlcnicvf_start_firmware
 +struct qlcnic_nic_template qlcnic_vf_ops = {
 +      .config_bridged_mode    = qlcnicvf_config_bridged_mode,
 +      .config_led             = qlcnicvf_config_led,
 +      .start_firmware         = qlcnicvf_start_firmware
  };
  
 -static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 +static struct qlcnic_hardware_ops qlcnic_hw_ops = {
 +      .read_crb                       = qlcnic_82xx_read_crb,
 +      .write_crb                      = qlcnic_82xx_write_crb,
 +      .read_reg                       = qlcnic_82xx_hw_read_wx_2M,
 +      .write_reg                      = qlcnic_82xx_hw_write_wx_2M,
 +      .get_mac_address                = qlcnic_82xx_get_mac_address,
 +      .setup_intr                     = qlcnic_82xx_setup_intr,
 +      .alloc_mbx_args                 = qlcnic_82xx_alloc_mbx_args,
 +      .mbx_cmd                        = qlcnic_82xx_issue_cmd,
 +      .get_func_no                    = qlcnic_82xx_get_func_no,
 +      .api_lock                       = qlcnic_82xx_api_lock,
 +      .api_unlock                     = qlcnic_82xx_api_unlock,
 +      .add_sysfs                      = qlcnic_82xx_add_sysfs,
 +      .remove_sysfs                   = qlcnic_82xx_remove_sysfs,
 +      .process_lb_rcv_ring_diag       = qlcnic_82xx_process_rcv_ring_diag,
 +      .create_rx_ctx                  = qlcnic_82xx_fw_cmd_create_rx_ctx,
 +      .create_tx_ctx                  = qlcnic_82xx_fw_cmd_create_tx_ctx,
 +      .setup_link_event               = qlcnic_82xx_linkevent_request,
 +      .get_nic_info                   = qlcnic_82xx_get_nic_info,
 +      .get_pci_info                   = qlcnic_82xx_get_pci_info,
 +      .set_nic_info                   = qlcnic_82xx_set_nic_info,
 +      .change_macvlan                 = qlcnic_82xx_sre_macaddr_change,
 +      .napi_enable                    = qlcnic_82xx_napi_enable,
 +      .napi_disable                   = qlcnic_82xx_napi_disable,
 +      .config_intr_coal               = qlcnic_82xx_config_intr_coalesce,
 +      .config_rss                     = qlcnic_82xx_config_rss,
 +      .config_hw_lro                  = qlcnic_82xx_config_hw_lro,
 +      .config_loopback                = qlcnic_82xx_set_lb_mode,
 +      .clear_loopback                 = qlcnic_82xx_clear_lb_mode,
 +      .config_promisc_mode            = qlcnic_82xx_nic_set_promisc,
 +      .change_l2_filter               = qlcnic_82xx_change_filter,
 +      .get_board_info                 = qlcnic_82xx_get_board_info,
 +};
 +
 +int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
  {
        struct pci_dev *pdev = adapter->pdev;
 -      int err = -1;
 +      int err = -1, i;
 +      int max_tx_rings;
 +
 +      if (!adapter->msix_entries) {
 +              adapter->msix_entries = kcalloc(num_msix,
 +                                              sizeof(struct msix_entry),
 +                                              GFP_KERNEL);
 +              if (!adapter->msix_entries)
 +                      return -ENOMEM;
 +      }
  
        adapter->max_sds_rings = 1;
        adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
 -      qlcnic_set_msix_bit(pdev, 0);
  
        if (adapter->ahw->msix_supported) {
   enable_msix:
 -              qlcnic_init_msix_entries(adapter, num_msix);
 +              for (i = 0; i < num_msix; i++)
 +                      adapter->msix_entries[i].entry = i;
                err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
                if (err == 0) {
                        adapter->flags |= QLCNIC_MSIX_ENABLED;
 -                      qlcnic_set_msix_bit(pdev, 1);
 -
 -                      adapter->max_sds_rings = num_msix;
 -
 +                      if (qlcnic_83xx_check(adapter)) {
 +                              adapter->ahw->num_msix = num_msix;
 +                              /* subtract mail box and tx ring vectors */
 +                              max_tx_rings = adapter->max_drv_tx_rings;
 +                              adapter->max_sds_rings = num_msix -
 +                                                       max_tx_rings - 1;
 +                      } else {
 +                              adapter->max_sds_rings = num_msix;
 +                      }
                        dev_info(&pdev->dev, "using msi-x interrupts\n");
                        return err;
 -              }
 -              if (err > 0) {
 -                      num_msix = rounddown_pow_of_two(err);
 -                      if (num_msix)
 +              } else if (err > 0) {
 +                      dev_info(&pdev->dev,
 +                               "Unable to allocate %d MSI-X interrupt vectors\n",
 +                               num_msix);
 +                      if (qlcnic_83xx_check(adapter)) {
 +                              if (err < QLC_83XX_MINIMUM_VECTOR)
 +                                      return err;
 +                              err -= (adapter->max_drv_tx_rings + 1);
 +                              num_msix = rounddown_pow_of_two(err);
 +                              num_msix += (adapter->max_drv_tx_rings + 1);
 +                      } else {
 +                              num_msix = rounddown_pow_of_two(err);
 +                      }
 +
 +                      if (num_msix) {
 +                              dev_info(&pdev->dev,
 +                                       "Trying to allocate %d MSI-X interrupt vectors\n",
 +                                       num_msix);
                                goto enable_msix;
 +                      }
 +              } else {
 +                      dev_info(&pdev->dev,
 +                               "Unable to allocate %d MSI-X interrupt vectors\n",
 +                               num_msix);
                }
        }
 +
        return err;
  }
  
 -static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
 +static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
  {
 +      int err = 0;
        u32 offset, mask_reg;
        const struct qlcnic_legacy_intr_set *legacy_intrp;
        struct qlcnic_hardware_context *ahw = adapter->ahw;
                                                            offset);
                dev_info(&pdev->dev, "using msi interrupts\n");
                adapter->msix_entries[0].vector = pdev->irq;
 -              return;
 +              return err;
        }
 +      if (qlcnic_use_msi || qlcnic_use_msi_x)
 +              return -EOPNOTSUPP;
  
        legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
        adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
        adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
        dev_info(&pdev->dev, "using legacy interrupts\n");
        adapter->msix_entries[0].vector = pdev->irq;
 +      return err;
  }
  
 -static void
 -qlcnic_setup_intr(struct qlcnic_adapter *adapter)
 +int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
  {
 -      int num_msix;
 +      int num_msix, err = 0;
  
 -      if (adapter->ahw->msix_supported) {
 +      if (!num_intr)
 +              num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
 +
 +      if (adapter->ahw->msix_supported)
                num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
 -                              QLCNIC_DEF_NUM_STS_DESC_RINGS));
 -      else
 +                                              num_intr));
 +      else
                num_msix = 1;
  
 -      if (!qlcnic_enable_msix(adapter, num_msix))
 -              return;
 +      err = qlcnic_enable_msix(adapter, num_msix);
 +      if (err == -ENOMEM || !err)
 +              return err;
  
 -      qlcnic_enable_msi_legacy(adapter);
 +      err = qlcnic_enable_msi_legacy(adapter);
 +      if (!err)
 +              return err;
 +
 +      return -EIO;
  }
  
 -static void
 -qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
 +void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
  {
        if (adapter->flags & QLCNIC_MSIX_ENABLED)
                pci_disable_msix(adapter->pdev);
        if (adapter->flags & QLCNIC_MSI_ENABLED)
                pci_disable_msi(adapter->pdev);
 +
 +      kfree(adapter->msix_entries);
 +      adapter->msix_entries = NULL;
 +
 +      if (adapter->ahw->intr_tbl) {
 +              vfree(adapter->ahw->intr_tbl);
 +              adapter->ahw->intr_tbl = NULL;
 +      }
  }
  
  static void
@@@ -549,36 -371,7 +549,36 @@@ qlcnic_cleanup_pci_map(struct qlcnic_ad
                iounmap(adapter->ahw->pci_base0);
  }
  
 -static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 +static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
 +{
 +      struct qlcnic_pci_info *pci_info;
 +      int ret;
 +
 +      if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
 +              switch (adapter->ahw->port_type) {
 +              case QLCNIC_GBE:
 +                      adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
 +                      break;
 +              case QLCNIC_XGBE:
 +                      adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
 +                      break;
 +              }
 +              return 0;
 +      }
 +
 +      if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
 +              return 0;
 +
 +      pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
 +      if (!pci_info)
 +              return -ENOMEM;
 +
 +      ret = qlcnic_get_pci_info(adapter, pci_info);
 +      kfree(pci_info);
 +      return ret;
 +}
 +
 +int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_pci_info *pci_info;
        int i, ret = 0, j = 0;
                j++;
        }
  
 -      for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
 +      for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
                adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
 +              if (qlcnic_83xx_check(adapter))
 +                      qlcnic_enable_eswitch(adapter, i, 1);
 +      }
  
        kfree(pci_info);
        return 0;
@@@ -672,31 -462,40 +672,31 @@@ qlcnic_set_function_modes(struct qlcnic
                                        QLC_DEV_SET_DRV(0xf, id));
                }
        } else {
 -              data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
 +              data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
                data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
                        (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
                                         ahw->pci_func));
        }
 -      QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
        qlcnic_api_unlock(adapter);
  err_lock:
        return ret;
  }
  
 -static void
 -qlcnic_check_vf(struct qlcnic_adapter *adapter)
 +static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
 +                          const struct pci_device_id *ent)
  {
 -      void __iomem *msix_base_addr;
 -      void __iomem *priv_op;
 -      u32 func;
 -      u32 msix_base;
        u32 op_mode, priv_level;
  
        /* Determine FW API version */
 -      adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
 -                                           QLCNIC_FW_API);
 +      adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
 +                                                         QLCNIC_FW_API);
  
        /* Find PCI function number */
 -      pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
 -      msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
 -      msix_base = readl(msix_base_addr);
 -      func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
 -      adapter->ahw->pci_func = func;
 +      qlcnic_get_func_no(adapter);
  
        /* Determine function privilege level */
 -      priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
 -      op_mode = readl(priv_op);
 +      op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
        if (op_mode == QLC_DEV_DRV_DEFAULT)
                priv_level = QLCNIC_MGMT_FUNC;
        else
  }
  
  #define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
 +#define QLCNIC_83XX_BAR0_LENGTH 0x4000
  static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
  {
        switch (dev_id) {
        case PCI_DEVICE_ID_QLOGIC_QLE824X:
                *bar = QLCNIC_82XX_BAR0_LENGTH;
                break;
 +      case PCI_DEVICE_ID_QLOGIC_QLE834X:
 +              *bar = QLCNIC_83XX_BAR0_LENGTH;
 +              break;
        default:
                *bar = 0;
        }
@@@ -752,7 -547,6 +752,7 @@@ static int qlcnic_setup_pci_map(struct 
        }
  
        dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
 +
        ahw->pci_base0 = mem_ptr0;
        ahw->pci_len0 = pci_len0;
        offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
@@@ -787,26 -581,19 +787,26 @@@ static void qlcnic_get_board_name(struc
  static void
  qlcnic_check_options(struct qlcnic_adapter *adapter)
  {
 +      int err;
        u32 fw_major, fw_minor, fw_build, prev_fw_version;
        struct pci_dev *pdev = adapter->pdev;
 -      struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
 +      struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
  
        prev_fw_version = adapter->fw_version;
  
 -      fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
 -      fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
 -      fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
 +      fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
 +      fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
 +      fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
  
        adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
  
 -      if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
 +      err = qlcnic_get_board_info(adapter);
 +      if (err) {
 +              dev_err(&pdev->dev, "Error getting board config info.\n");
 +              return;
 +      }
 +      if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
                if (fw_dump->tmpl_hdr == NULL ||
                                adapter->fw_version > prev_fw_version) {
                        if (fw_dump->tmpl_hdr)
                }
        }
  
 -      dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
 -                      fw_major, fw_minor, fw_build);
 +      dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
 +               QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
 +
        if (adapter->ahw->port_type == QLCNIC_XGBE) {
                if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
                        adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
@@@ -862,19 -648,9 +862,19 @@@ qlcnic_initialize_nic(struct qlcnic_ada
        adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
        adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
        adapter->ahw->capabilities = nic_info.capabilities;
 +
 +      if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
 +              u32 temp;
 +              temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
 +              adapter->ahw->capabilities2 = temp;
 +      }
        adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
        adapter->ahw->max_mtu = nic_info.max_mtu;
  
 +      /* Disable NPAR for 83XX */
 +      if (qlcnic_83xx_check(adapter))
 +              return err;
 +
        if (adapter->ahw->capabilities & BIT_6)
                adapter->flags |= QLCNIC_ESWITCH_ENABLED;
        else
@@@ -933,7 -709,7 +933,7 @@@ void qlcnic_set_eswitch_port_features(s
        qlcnic_set_netdev_features(adapter, esw_cfg);
  }
  
 -static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
 +int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_esw_func_cfg esw_cfg;
  
@@@ -954,17 -730,14 +954,17 @@@ qlcnic_set_netdev_features(struct qlcni
                struct qlcnic_esw_func_cfg *esw_cfg)
  {
        struct net_device *netdev = adapter->netdev;
 -      netdev_features_t features, vlan_features;
 +      unsigned long features, vlan_features;
 +
 +      if (qlcnic_83xx_check(adapter))
 +              return;
  
        features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
 -                      NETIF_F_IPV6_CSUM | NETIF_F_GRO);
 +                  NETIF_F_IPV6_CSUM | NETIF_F_GRO);
        vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
 -                      NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
 +                      NETIF_F_IPV6_CSUM);
  
 -      if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
 +      if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
                features |= (NETIF_F_TSO | NETIF_F_TSO6);
                vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
        }
  
        if (esw_cfg->offload_flags & BIT_0) {
                netdev->features |= features;
 -              if (!(esw_cfg->offload_flags & BIT_1))
 +              adapter->rx_csum = 1;
 +              if (!(esw_cfg->offload_flags & BIT_1)) {
                        netdev->features &= ~NETIF_F_TSO;
 -              if (!(esw_cfg->offload_flags & BIT_2))
 +                      features &= ~NETIF_F_TSO;
 +              }
 +              if (!(esw_cfg->offload_flags & BIT_2)) {
                        netdev->features &= ~NETIF_F_TSO6;
 +                      features &= ~NETIF_F_TSO6;
 +              }
        } else {
                netdev->features &= ~features;
 +              features &= ~features;
 +              adapter->rx_csum = 0;
        }
  
        netdev->vlan_features = (features & vlan_features);
  static int
  qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
  {
 -      void __iomem *priv_op;
        u32 op_mode, priv_level;
        int err = 0;
  
        if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
                return 0;
  
 -      priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
 -      op_mode = readl(priv_op);
 +      op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
        priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
  
        if (op_mode == QLC_DEV_DRV_DEFAULT)
        return err;
  }
  
 -static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
 +int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
  {
        struct qlcnic_esw_func_cfg esw_cfg;
        struct qlcnic_npar_info *npar;
        return 0;
  }
  
 +
  static int
  qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
                        struct qlcnic_npar_info *npar, int pci_func)
        return 0;
  }
  
 -static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
 +int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
  {
        int i, err;
        struct qlcnic_npar_info *npar;
                npar = &adapter->npars[i];
                pci_func = npar->pci_func;
                memset(&nic_info, 0, sizeof(struct qlcnic_info));
 -              err = qlcnic_get_nic_info(adapter,
 -                                        &nic_info, pci_func);
 +              err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
                if (err)
                        return err;
                nic_info.min_tx_bw = npar->min_bw;
@@@ -1141,16 -909,14 +1141,16 @@@ static int qlcnic_check_npar_opertional
        if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
                return 0;
  
 -      npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
 +      npar_state = QLC_SHARED_REG_RD32(adapter,
 +                                       QLCNIC_CRB_DEV_NPAR_STATE);
        while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
                msleep(1000);
 -              npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
 +              npar_state = QLC_SHARED_REG_RD32(adapter,
 +                                               QLCNIC_CRB_DEV_NPAR_STATE);
        }
        if (!npar_opt_timeo) {
                dev_err(&adapter->pdev->dev,
-                       "Waiting for NPAR state to opertional timeout\n");
+                       "Waiting for NPAR state to operational timeout\n");
                return -EIO;
        }
        return 0;
@@@ -1178,7 -944,8 +1178,7 @@@ qlcnic_set_mgmt_operations(struct qlcni
        return err;
  }
  
 -static int
 -qlcnic_start_firmware(struct qlcnic_adapter *adapter)
 +int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
  {
        int err;
  
@@@ -1218,8 -985,9 +1218,8 @@@ check_fw_status
        if (err)
                goto err_out;
  
 -      QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
        qlcnic_idc_debug_info(adapter, 1);
 -
        err = qlcnic_check_eswitch_mode(adapter);
        if (err) {
                dev_err(&adapter->pdev->dev,
        return 0;
  
  err_out:
 -      QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
        dev_err(&adapter->pdev->dev, "Device state set to failed\n");
  
        qlcnic_release_firmware(adapter);
@@@ -1249,7 -1017,6 +1249,7 @@@ qlcnic_request_irq(struct qlcnic_adapte
  {
        irq_handler_t handler;
        struct qlcnic_host_sds_ring *sds_ring;
 +      struct qlcnic_host_tx_ring *tx_ring;
        int err, ring;
  
        unsigned long flags = 0;
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
        if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
 -              handler = qlcnic_tmp_intr;
 +              if (qlcnic_82xx_check(adapter))
 +                      handler = qlcnic_tmp_intr;
                if (!QLCNIC_IS_MSI_FAMILY(adapter))
                        flags |= IRQF_SHARED;
  
                        handler = qlcnic_msi_intr;
                else {
                        flags |= IRQF_SHARED;
 -                      handler = qlcnic_intr;
 +                      if (qlcnic_82xx_check(adapter))
 +                              handler = qlcnic_intr;
 +                      else
 +                              handler = qlcnic_83xx_intr;
                }
        }
        adapter->irq = netdev->irq;
  
 -      for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 -              sds_ring = &recv_ctx->sds_rings[ring];
 -              sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
 -              err = request_irq(sds_ring->irq, handler,
 -                                flags, sds_ring->name, sds_ring);
 -              if (err)
 -                      return err;
 +      if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
 +              if (qlcnic_82xx_check(adapter) ||
 +                  (qlcnic_83xx_check(adapter) &&
 +                   (adapter->flags & QLCNIC_MSIX_ENABLED))) {
 +                      for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 +                              sds_ring = &recv_ctx->sds_rings[ring];
 +                              snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ,
 +                                       "%s[%d]", netdev->name, ring);
 +                              err = request_irq(sds_ring->irq, handler, flags,
 +                                                sds_ring->name, sds_ring);
 +                              if (err)
 +                                      return err;
 +                      }
 +              }
 +              if (qlcnic_83xx_check(adapter) &&
 +                  (adapter->flags & QLCNIC_MSIX_ENABLED)) {
 +                      handler = qlcnic_msix_tx_intr;
 +                      for (ring = 0; ring < adapter->max_drv_tx_rings;
 +                           ring++) {
 +                              tx_ring = &adapter->tx_ring[ring];
 +                              snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ,
 +                                       "%s[%d]", netdev->name,
 +                                       adapter->max_sds_rings + ring);
 +                              err = request_irq(tx_ring->irq, handler, flags,
 +                                                tx_ring->name, tx_ring);
 +                              if (err)
 +                                      return err;
 +                      }
 +              }
        }
 -
        return 0;
  }
  
@@@ -1315,48 -1057,21 +1315,48 @@@ qlcnic_free_irq(struct qlcnic_adapter *
  {
        int ring;
        struct qlcnic_host_sds_ring *sds_ring;
 +      struct qlcnic_host_tx_ring *tx_ring;
  
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
  
 -      for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 -              sds_ring = &recv_ctx->sds_rings[ring];
 -              free_irq(sds_ring->irq, sds_ring);
 +      if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
 +              if (qlcnic_82xx_check(adapter) ||
 +                  (qlcnic_83xx_check(adapter) &&
 +                   (adapter->flags & QLCNIC_MSIX_ENABLED))) {
 +                      for (ring = 0; ring < adapter->max_sds_rings; ring++) {
 +                              sds_ring = &recv_ctx->sds_rings[ring];
 +                              free_irq(sds_ring->irq, sds_ring);
 +                      }
 +              }
 +              if (qlcnic_83xx_check(adapter)) {
 +                      for (ring = 0; ring < adapter->max_drv_tx_rings;
 +                           ring++) {
 +                              tx_ring = &adapter->tx_ring[ring];
 +                              if (tx_ring->irq)
 +                                      free_irq(tx_ring->irq, tx_ring);
 +                      }
 +              }
        }
  }
  
 -static int
 -__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 +static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
  {
 -      int ring;
 -      u32 capab2;
 +      u32 capab = 0;
 +
 +      if (qlcnic_82xx_check(adapter)) {
 +              if (adapter->ahw->capabilities2 &
 +                  QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
 +                      adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
 +      } else {
 +              capab = adapter->ahw->capabilities;
 +              if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
 +                      adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
 +      }
 +}
  
 +int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 +{
 +      int ring;
        struct qlcnic_host_rds_ring *rds_ring;
  
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                return 0;
        if (qlcnic_set_eswitch_port_config(adapter))
                return -EIO;
 -
 -      if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
 -              capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
 -              if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
 -                      adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
 -      }
 +      qlcnic_get_lro_mss_capability(adapter);
  
        if (qlcnic_fw_create_ctx(adapter))
                return -EIO;
  
        for (ring = 0; ring < adapter->max_rds_rings; ring++) {
                rds_ring = &adapter->recv_ctx->rds_rings[ring];
 -              qlcnic_post_rx_buffers(adapter, rds_ring);
 +              qlcnic_post_rx_buffers(adapter, rds_ring, ring);
        }
  
        qlcnic_set_multi(netdev);
        return 0;
  }
  
 -/* Usage: During resume and firmware recovery module.*/
 -
 -static int
 -qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
 +int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
  {
        int err = 0;
  
        return err;
  }
  
 -static void
 -__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
 +void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
  {
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                return;
  
  /* Usage: During suspend and firmware recovery module */
  
 -static void
 -qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
 +void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
  {
        rtnl_lock();
        if (netif_running(netdev))
  
  }
  
 -static int
 +int
  qlcnic_attach(struct qlcnic_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
@@@ -1497,7 -1222,8 +1497,7 @@@ err_out_napi_del
        return err;
  }
  
 -static void
 -qlcnic_detach(struct qlcnic_adapter *adapter)
 +void qlcnic_detach(struct qlcnic_adapter *adapter)
  {
        if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
                return;
@@@ -1546,9 -1272,21 +1546,9 @@@ out
  static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
  {
        int err = 0;
 -      adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
 -                              GFP_KERNEL);
 -      if (!adapter->ahw) {
 -              dev_err(&adapter->pdev->dev,
 -                      "Failed to allocate recv ctx resources for adapter\n");
 -              err = -ENOMEM;
 -              goto err_out;
 -      }
        adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
                                GFP_KERNEL);
        if (!adapter->recv_ctx) {
 -              dev_err(&adapter->pdev->dev,
 -                      "Failed to allocate recv ctx resources for adapter\n");
 -              kfree(adapter->ahw);
 -              adapter->ahw = NULL;
                err = -ENOMEM;
                goto err_out;
        }
        adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
        adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
        adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
 +      /* clear stats */
 +      memset(&adapter->stats, 0, sizeof(adapter->stats));
  err_out:
        return err;
  }
@@@ -1571,9 -1307,8 +1571,9 @@@ static void qlcnic_free_adapter_resourc
                vfree(adapter->ahw->fw_dump.tmpl_hdr);
                adapter->ahw->fw_dump.tmpl_hdr = NULL;
        }
 -      kfree(adapter->ahw);
 -      adapter->ahw = NULL;
 +
 +      kfree(adapter->ahw->reset.buff);
 +      adapter->ahw->fw_dump.tmpl_hdr = NULL;
  }
  
  int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
  
        adapter->max_sds_rings = 1;
        adapter->ahw->diag_test = test;
 +      adapter->ahw->linkup = 0;
  
        ret = qlcnic_attach(adapter);
        if (ret) {
  
        for (ring = 0; ring < adapter->max_rds_rings; ring++) {
                rds_ring = &adapter->recv_ctx->rds_rings[ring];
 -              qlcnic_post_rx_buffers(adapter, rds_ring);
 +              qlcnic_post_rx_buffers(adapter, rds_ring, ring);
        }
  
        if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
@@@ -1648,7 -1382,6 +1648,7 @@@ qlcnic_reset_hw_context(struct qlcnic_a
        netif_device_attach(netdev);
  
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
 +      dev_err(&adapter->pdev->dev, "%s:\n", __func__);
        return 0;
  }
  
@@@ -1692,40 -1425,34 +1692,40 @@@ qlcnic_setup_netdev(struct qlcnic_adapt
        int err;
        struct pci_dev *pdev = adapter->pdev;
  
 +      adapter->rx_csum = 1;
        adapter->ahw->mc_enabled = 0;
 -      adapter->ahw->max_mc_count = 38;
 +      adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
  
        netdev->netdev_ops         = &qlcnic_netdev_ops;
 -      netdev->watchdog_timeo     = 5*HZ;
 +      netdev->watchdog_timeo     = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
  
        qlcnic_change_mtu(netdev, netdev->mtu);
  
        SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
  
 -      netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
 -              NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
 +      netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
 +                           NETIF_F_IPV6_CSUM | NETIF_F_GRO |
 +                           NETIF_F_HW_VLAN_RX);
 +      netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
 +                                NETIF_F_IPV6_CSUM);
 +
 +      if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
 +              netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
 +              netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
 +      }
  
 -      if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
 -              netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
 -      if (pci_using_dac == 1)
 -              netdev->hw_features |= NETIF_F_HIGHDMA;
 +      if (pci_using_dac) {
 +              netdev->features |= NETIF_F_HIGHDMA;
 +              netdev->vlan_features |= NETIF_F_HIGHDMA;
 +      }
  
 -      netdev->vlan_features = netdev->hw_features;
 +      if (qlcnic_vlan_tx_check(adapter))
 +              netdev->features |= (NETIF_F_HW_VLAN_TX);
  
 -      if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
 -              netdev->hw_features |= NETIF_F_HW_VLAN_TX;
        if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
 -              netdev->hw_features |= NETIF_F_LRO;
 -
 -      netdev->features |= netdev->hw_features |
 -              NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
 +              netdev->features |= NETIF_F_LRO;
  
 +      netdev->hw_features = netdev->features;
        netdev->irq = adapter->msix_entries[0].vector;
  
        err = register_netdev(netdev);
@@@ -1753,61 -1480,17 +1753,61 @@@ static int qlcnic_set_dma_mask(struct p
        return 0;
  }
  
 -static int
 -qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
 +void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
  {
 -      adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
 -                                      GFP_KERNEL);
 +      int ring;
 +      struct qlcnic_host_tx_ring *tx_ring;
  
 -      if (adapter->msix_entries)
 -              return 0;
 +      for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +              tx_ring = &adapter->tx_ring[ring];
 +              if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
 +                      vfree(tx_ring->cmd_buf_arr);
 +                      tx_ring->cmd_buf_arr = NULL;
 +              }
 +      }
 +      if (adapter->tx_ring != NULL)
 +              kfree(adapter->tx_ring);
 +}
 +
 +int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
 +                        struct net_device *netdev)
 +{
 +      int ring, vector, index;
 +      struct qlcnic_host_tx_ring *tx_ring;
 +      struct qlcnic_cmd_buffer *cmd_buf_arr;
 +
 +      tx_ring = kcalloc(adapter->max_drv_tx_rings,
 +                        sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
 +      if (tx_ring == NULL)
 +              return -ENOMEM;
  
 -      dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
 -      return -ENOMEM;
 +      adapter->tx_ring = tx_ring;
 +
 +      for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +              tx_ring = &adapter->tx_ring[ring];
 +              tx_ring->num_desc = adapter->num_txd;
 +              tx_ring->txq = netdev_get_tx_queue(netdev, ring);
 +              cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
 +              if (cmd_buf_arr == NULL) {
 +                      qlcnic_free_tx_rings(adapter);
 +                      return -ENOMEM;
 +              }
 +              memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
 +              tx_ring->cmd_buf_arr = cmd_buf_arr;
 +      }
 +
 +      if (qlcnic_83xx_check(adapter)) {
 +              for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
 +                      tx_ring = &adapter->tx_ring[ring];
 +                      tx_ring->adapter = adapter;
 +                      if (adapter->flags & QLCNIC_MSIX_ENABLED) {
 +                              index = adapter->max_sds_rings + ring;
 +                              vector = adapter->msix_entries[index].vector;
 +                              tx_ring->irq = vector;
 +                      }
 +              }
 +      }
 +      return 0;
  }
  
  static int
@@@ -1815,10 -1498,9 +1815,10 @@@ qlcnic_probe(struct pci_dev *pdev, cons
  {
        struct net_device *netdev = NULL;
        struct qlcnic_adapter *adapter = NULL;
 +      struct qlcnic_hardware_context *ahw;
        int err, pci_using_dac = -1;
 -      uint8_t revision_id;
 -      char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
 +      u32 capab2;
 +      char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
  
        err = pci_enable_device(pdev);
        if (err)
        pci_set_master(pdev);
        pci_enable_pcie_error_reporting(pdev);
  
 +      ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
 +      if (!ahw)
 +              goto err_out_free_res;
 +
 +      if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) {
 +              ahw->hw_ops = &qlcnic_hw_ops;
 +              ahw->reg_tbl = (u32 *)qlcnic_reg_tbl;
 +      } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
 +              qlcnic_83xx_register_map(ahw);
 +      } else {
 +              goto err_out_free_hw_res;
 +      }
 +
 +      err = qlcnic_setup_pci_map(pdev, ahw);
 +      if (err)
 +              goto err_out_free_hw_res;
 +
        netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
        if (!netdev) {
                err = -ENOMEM;
 -              goto err_out_free_res;
 +              goto err_out_iounmap;
        }
  
        SET_NETDEV_DEV(netdev, &pdev->dev);
        adapter = netdev_priv(netdev);
        adapter->netdev  = netdev;
        adapter->pdev    = pdev;
 +      adapter->ahw = ahw;
 +
 +      adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
 +      if (adapter->qlcnic_wq == NULL) {
 +              dev_err(&pdev->dev, "Failed to create workqueue\n");
 +              goto err_out_free_netdev;
 +      }
  
        err = qlcnic_alloc_adapter_resources(adapter);
        if (err)
                goto err_out_free_netdev;
  
        adapter->dev_rst_time = jiffies;
 -      revision_id = pdev->revision;
 -      adapter->ahw->revision_id = revision_id;
 -      adapter->mac_learn = qlcnic_mac_learn;
 +      adapter->ahw->revision_id = pdev->revision;
 +      if (qlcnic_mac_learn == FDB_MAC_LEARN)
 +              adapter->fdb_mac_learn = true;
 +      else if (qlcnic_mac_learn == DRV_MAC_LEARN)
 +              adapter->drv_mac_learn = true;
 +      adapter->max_drv_tx_rings = 1;
  
        rwlock_init(&adapter->ahw->crb_lock);
        mutex_init(&adapter->ahw->mem_lock);
        spin_lock_init(&adapter->tx_clean_lock);
        INIT_LIST_HEAD(&adapter->mac_list);
  
 -      err = qlcnic_setup_pci_map(pdev, adapter->ahw);
 -      if (err)
 -              goto err_out_free_hw;
 -      qlcnic_check_vf(adapter);
 -
 -      /* This will be reset for mezz cards  */
 -      adapter->portnum = adapter->ahw->pci_func;
 -
 -      err = qlcnic_get_board_info(adapter);
 -      if (err) {
 -              dev_err(&pdev->dev, "Error getting board config info.\n");
 -              goto err_out_iounmap;
 -      }
 -
 -      err = qlcnic_setup_idc_param(adapter);
 -      if (err)
 -              goto err_out_iounmap;
 +      if (qlcnic_82xx_check(adapter)) {
 +              qlcnic_check_vf(adapter, ent);
 +              adapter->portnum = adapter->ahw->pci_func;
 +              err = qlcnic_start_firmware(adapter);
 +              if (err) {
 +                      dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
 +                      goto err_out_free_hw;
 +              }
  
 -      adapter->flags |= QLCNIC_NEED_FLR;
 +              err = qlcnic_setup_idc_param(adapter);
 +              if (err)
 +                      goto err_out_free_hw;
  
 -      err = adapter->nic_ops->start_firmware(adapter);
 -      if (err) {
 -              dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
 -                      "\t\tIf reboot doesn't help, try flashing the card\n");
 -              goto err_out_maintenance_mode;
 +              adapter->flags |= QLCNIC_NEED_FLR;
 +      } else if (qlcnic_83xx_check(adapter)) {
 +              qlcnic_83xx_check_vf(adapter, ent);
 +              adapter->portnum = adapter->ahw->pci_func;
 +              err = qlcnic_83xx_init(adapter);
 +              if (err) {
 +                      dev_err(&pdev->dev, "%s: failed\n", __func__);
 +                      goto err_out_free_hw;
 +              }
 +      } else {
 +              dev_err(&pdev->dev,
 +                      "%s: failed. Please Reboot\n", __func__);
 +              goto err_out_free_hw;
        }
  
        if (qlcnic_read_mac_addr(adapter))
  
        if (adapter->portnum == 0) {
                qlcnic_get_board_name(adapter, board_name);
 +
                pr_info("%s: %s Board Chip rev 0x%x\n",
                        module_name(THIS_MODULE),
                        board_name, adapter->ahw->revision_id);
        }
 +      err = qlcnic_setup_intr(adapter, 0);
 +      if (err) {
 +              dev_err(&pdev->dev, "Failed to setup interrupt\n");
 +              goto err_out_disable_msi;
 +      }
  
 -      qlcnic_clear_stats(adapter);
 -
 -      err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
 -      if (err)
 -              goto err_out_decr_ref;
 -
 -      qlcnic_setup_intr(adapter);
 +      if (qlcnic_83xx_check(adapter)) {
 +              err = qlcnic_83xx_setup_mbx_intr(adapter);
 +              if (err)
 +                      goto err_out_disable_msi;
 +      }
  
        err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
        if (err)
 -              goto err_out_disable_msi;
 +              goto err_out_disable_mbx_intr;
 +
 +      if (qlcnic_82xx_check(adapter)) {
 +              if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
 +                      capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
 +                      if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB)
 +                              qlcnic_fw_cmd_set_drv_version(adapter);
 +              }
 +      }
  
        pci_set_drvdata(pdev, adapter);
  
                break;
        }
  
 -      if (adapter->mac_learn)
 +      if (qlcnic_get_act_pci_func(adapter))
 +              goto err_out_disable_mbx_intr;
 +
 +      if (adapter->drv_mac_learn)
                qlcnic_alloc_lb_filters_mem(adapter);
  
 -      qlcnic_create_diag_entries(adapter);
 +      qlcnic_add_sysfs(adapter);
  
        return 0;
  
 +err_out_disable_mbx_intr:
 +      if (qlcnic_83xx_check(adapter))
 +              qlcnic_83xx_free_mbx_intr(adapter);
 +
  err_out_disable_msi:
        qlcnic_teardown_intr(adapter);
 -      kfree(adapter->msix_entries);
 -
 -err_out_decr_ref:
 +      qlcnic_cancel_idc_work(adapter);
        qlcnic_clr_all_drv_state(adapter, 0);
  
 -err_out_iounmap:
 -      qlcnic_cleanup_pci_map(adapter);
 -
  err_out_free_hw:
        qlcnic_free_adapter_resources(adapter);
  
  err_out_free_netdev:
        free_netdev(netdev);
  
 +err_out_iounmap:
 +      qlcnic_cleanup_pci_map(adapter);
 +
 +err_out_free_hw_res:
 +      kfree(ahw);
 +
  err_out_free_res:
        pci_release_regions(pdev);
  
@@@ -2011,13 -1645,24 +2011,13 @@@ err_out_disable_pdev
        pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        return err;
 -
 -err_out_maintenance_mode:
 -      netdev->netdev_ops = &qlcnic_netdev_failed_ops;
 -      SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
 -      err = register_netdev(netdev);
 -      if (err) {
 -              dev_err(&pdev->dev, "failed to register net device\n");
 -              goto err_out_decr_ref;
 -      }
 -      pci_set_drvdata(pdev, adapter);
 -      qlcnic_create_diag_entries(adapter);
 -      return 0;
  }
  
  static void qlcnic_remove(struct pci_dev *pdev)
  {
        struct qlcnic_adapter *adapter;
        struct net_device *netdev;
 +      struct qlcnic_hardware_context *ahw;
  
        adapter = pci_get_drvdata(pdev);
        if (adapter == NULL)
  
        netdev = adapter->netdev;
  
 -      qlcnic_cancel_fw_work(adapter);
 +      qlcnic_cancel_idc_work(adapter);
 +      ahw = adapter->ahw;
  
        unregister_netdev(netdev);
  
 +      if (qlcnic_83xx_check(adapter)) {
 +              qlcnic_83xx_free_mbx_intr(adapter);
 +              qlcnic_83xx_register_nic_idc_func(adapter, 0);
 +              cancel_delayed_work_sync(&adapter->idc_aen_work);
 +      }
 +
        qlcnic_detach(adapter);
  
        if (adapter->npars != NULL)
        qlcnic_free_lb_filters_mem(adapter);
  
        qlcnic_teardown_intr(adapter);
 -      kfree(adapter->msix_entries);
  
 -      qlcnic_remove_diag_entries(adapter);
 +      qlcnic_remove_sysfs(adapter);
  
        qlcnic_cleanup_pci_map(adapter);
  
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
  
 +      if (adapter->qlcnic_wq) {
 +              destroy_workqueue(adapter->qlcnic_wq);
 +              adapter->qlcnic_wq = NULL;
 +      }
        qlcnic_free_adapter_resources(adapter);
 +      kfree(ahw);
        free_netdev(netdev);
  }
  static int __qlcnic_shutdown(struct pci_dev *pdev)
  
        netif_device_detach(netdev);
  
 -      qlcnic_cancel_fw_work(adapter);
 +      qlcnic_cancel_idc_work(adapter);
  
        if (netif_running(netdev))
                qlcnic_down(adapter, netdev);
        retval = pci_save_state(pdev);
        if (retval)
                return retval;
 -
        if (qlcnic_82xx_check(adapter)) {
                if (qlcnic_wol_supported(adapter)) {
                        pci_enable_wake(pdev, PCI_D3cold, 1);
@@@ -2139,7 -1774,7 +2139,7 @@@ qlcnic_resume(struct pci_dev *pdev
        pci_set_master(pdev);
        pci_restore_state(pdev);
  
 -      err = adapter->nic_ops->start_firmware(adapter);
 +      err = qlcnic_start_firmware(adapter);
        if (err) {
                dev_err(&pdev->dev, "failed to start firmware\n");
                return err;
@@@ -2162,8 -1797,14 +2162,8 @@@ done
  static int qlcnic_open(struct net_device *netdev)
  {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 -      u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
        int err;
  
 -      if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
 -              netdev_err(netdev, "Device in FAILED state\n");
 -              return -EIO;
 -      }
 -
        netif_carrier_off(netdev);
  
        err = qlcnic_attach(adapter);
@@@ -2191,7 -1832,6 +2191,7 @@@ static int qlcnic_close(struct net_devi
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
  
        __qlcnic_down(adapter, netdev);
 +
        return 0;
  }
  
@@@ -2199,53 -1839,22 +2199,53 @@@ void qlcnic_alloc_lb_filters_mem(struc
  {
        void *head;
        int i;
 +      struct net_device *netdev = adapter->netdev;
 +      u32 filter_size = 0;
 +      u16 act_pci_func = 0;
  
        if (adapter->fhash.fmax && adapter->fhash.fhead)
                return;
  
 +      act_pci_func = adapter->ahw->act_pci_func;
        spin_lock_init(&adapter->mac_learn_lock);
 +      spin_lock_init(&adapter->rx_mac_learn_lock);
 +
 +      if (qlcnic_82xx_check(adapter)) {
 +              filter_size = QLCNIC_LB_MAX_FILTERS;
 +              adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
 +      } else {
 +              filter_size = QLC_83XX_LB_MAX_FILTERS;
 +              adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
 +      }
 +
 +      head = kcalloc(adapter->fhash.fbucket_size,
 +                     sizeof(struct hlist_head), GFP_ATOMIC);
  
 -      head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
 -                                                              GFP_KERNEL);
        if (!head)
                return;
  
 -      adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
 +      adapter->fhash.fmax = (filter_size / act_pci_func);
        adapter->fhash.fhead = head;
  
 -      for (i = 0; i < adapter->fhash.fmax; i++)
 +      netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
 +                  act_pci_func, adapter->fhash.fmax);
 +
 +      for (i = 0; i < adapter->fhash.fbucket_size; i++)
                INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
 +
 +      adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
 +
 +      head = kcalloc(adapter->rx_fhash.fbucket_size,
 +                     sizeof(struct hlist_head), GFP_ATOMIC);
 +
 +      if (!head)
 +              return;
 +
 +      adapter->rx_fhash.fmax = (filter_size / act_pci_func);
 +      adapter->rx_fhash.fhead = head;
 +
 +      for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
 +              INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
  }
  
  static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
  
        adapter->fhash.fhead = NULL;
        adapter->fhash.fmax = 0;
 +
 +      if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
 +              kfree(adapter->rx_fhash.fhead);
 +
 +      adapter->rx_fhash.fmax = 0;
 +      adapter->rx_fhash.fhead = NULL;
  }
  
 -static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
 +int qlcnic_check_temp(struct qlcnic_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
        u32 temp_state, temp_val, temp = 0;
        int rv = 0;
  
 +      if (qlcnic_83xx_check(adapter))
 +              temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
 +
        if (qlcnic_82xx_check(adapter))
 -              temp = QLCRD32(adapter, CRB_TEMP_STATE);
 +              temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
  
        temp_state = qlcnic_get_temp_state(temp);
        temp_val = qlcnic_get_temp_val(temp);
@@@ -2333,7 -1933,7 +2333,7 @@@ static struct net_device_stats *qlcnic_
        return stats;
  }
  
 -static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
 +irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
  {
        u32 status;
  
@@@ -2409,14 -2009,6 +2409,14 @@@ static irqreturn_t qlcnic_msix_intr(in
        return IRQ_HANDLED;
  }
  
 +static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
 +{
 +      struct qlcnic_host_tx_ring *tx_ring = data;
 +
 +      napi_schedule(&tx_ring->napi);
 +      return IRQ_HANDLED;
 +}
 +
  #ifdef CONFIG_NET_POLL_CONTROLLER
  static void qlcnic_poll_controller(struct net_device *netdev)
  {
@@@ -2443,7 -2035,7 +2443,7 @@@ qlcnic_idc_debug_info(struct qlcnic_ada
        val |= encoding << 7;
        val |= (jiffies - adapter->dev_rst_time) << 8;
  
 -      QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
        adapter->dev_rst_time = jiffies;
  }
  
@@@ -2458,14 -2050,14 +2458,14 @@@ qlcnic_set_drv_state(struct qlcnic_adap
        if (qlcnic_api_lock(adapter))
                return -EIO;
  
 -      val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +      val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
  
        if (state == QLCNIC_DEV_NEED_RESET)
                QLC_DEV_SET_RST_RDY(val, adapter->portnum);
        else if (state == QLCNIC_DEV_NEED_QUISCENT)
                QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
  
 -      QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
  
        qlcnic_api_unlock(adapter);
  
@@@ -2480,9 -2072,9 +2480,9 @@@ qlcnic_clr_drv_state(struct qlcnic_adap
        if (qlcnic_api_lock(adapter))
                return -EBUSY;
  
 -      val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +      val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
        QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
 -      QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
  
        qlcnic_api_unlock(adapter);
  
@@@ -2497,22 -2089,20 +2497,22 @@@ qlcnic_clr_all_drv_state(struct qlcnic_
        if (qlcnic_api_lock(adapter))
                goto err;
  
 -      val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
 +      val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
        QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
 -      QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
  
        if (failed) {
 -              QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
 +                                  QLCNIC_DEV_FAILED);
                dev_info(&adapter->pdev->dev,
                                "Device state set to Failed. Please Reboot\n");
        } else if (!(val & 0x11111111))
 -              QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
 +                                  QLCNIC_DEV_COLD);
  
 -      val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +      val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
        QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
 -      QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
  
        qlcnic_api_unlock(adapter);
  err:
@@@ -2527,13 -2117,12 +2527,13 @@@ static in
  qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
  {
        int act, state, active_mask;
 +      struct qlcnic_hardware_context *ahw = adapter->ahw;
  
 -      state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 -      act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
 +      state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
 +      act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
  
        if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
 -              active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
 +              active_mask = (~(1 << (ahw->pci_func * 4)));
                act = act & active_mask;
        }
  
  
  static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
  {
 -      u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
 +      u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
  
        if (val != QLCNIC_DRV_IDC_VER) {
                dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
@@@ -2570,21 -2159,19 +2570,21 @@@ qlcnic_can_start_firmware(struct qlcnic
        if (qlcnic_api_lock(adapter))
                return -1;
  
 -      val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
 +      val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
        if (!(val & (1 << (portnum * 4)))) {
                QLC_DEV_SET_REF_CNT(val, portnum);
 -              QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
        }
  
 -      prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 +      prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
        QLCDB(adapter, HW, "Device state = %u\n", prev_state);
  
        switch (prev_state) {
        case QLCNIC_DEV_COLD:
 -              QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
 -              QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
 +                                  QLCNIC_DEV_INITIALIZING);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
 +                                  QLCNIC_DRV_IDC_VER);
                qlcnic_idc_debug_info(adapter, 0);
                qlcnic_api_unlock(adapter);
                return 1;
                return ret;
  
        case QLCNIC_DEV_NEED_RESET:
 -              val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +              val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
                QLC_DEV_SET_RST_RDY(val, portnum);
 -              QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
                break;
  
        case QLCNIC_DEV_NEED_QUISCENT:
 -              val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +              val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
                QLC_DEV_SET_QSCNT_RDY(val, portnum);
 -              QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
                break;
  
        case QLCNIC_DEV_FAILED:
  
        do {
                msleep(1000);
 -              prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 +              prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
  
                if (prev_state == QLCNIC_DEV_QUISCENT)
                        continue;
        if (qlcnic_api_lock(adapter))
                return -1;
  
 -      val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +      val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
        QLC_DEV_CLR_RST_QSCNT(val, portnum);
 -      QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
  
        ret = qlcnic_check_idc_ver(adapter);
        qlcnic_api_unlock(adapter);
@@@ -2656,7 -2243,7 +2656,7 @@@ qlcnic_fwinit_work(struct work_struct *
        if (qlcnic_api_lock(adapter))
                goto err_ret;
  
 -      dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 +      dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
        if (dev_state == QLCNIC_DEV_QUISCENT ||
            dev_state == QLCNIC_DEV_NEED_QUISCENT) {
                qlcnic_api_unlock(adapter);
  
        if (!qlcnic_check_drv_state(adapter)) {
  skip_ack_check:
 -              dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 +              dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
  
                if (dev_state == QLCNIC_DEV_NEED_RESET) {
 -                      QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
 -                                              QLCNIC_DEV_INITIALIZING);
 +                      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
 +                                          QLCNIC_DEV_INITIALIZING);
                        set_bit(__QLCNIC_START_FW, &adapter->state);
                        QLCDB(adapter, DRV, "Restarting fw\n");
                        qlcnic_idc_debug_info(adapter, 0);
 -                      val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
 +                      val = QLC_SHARED_REG_RD32(adapter,
 +                                                QLCNIC_CRB_DRV_STATE);
                        QLC_DEV_SET_RST_RDY(val, adapter->portnum);
 -                      QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
 +                      QLC_SHARED_REG_WR32(adapter,
 +                                          QLCNIC_CRB_DRV_STATE, val);
                }
  
                qlcnic_api_unlock(adapter);
        qlcnic_api_unlock(adapter);
  
  wait_npar:
 -      dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 +      dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
        QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
  
        switch (dev_state) {
        case QLCNIC_DEV_READY:
 -              if (!adapter->nic_ops->start_firmware(adapter)) {
 +              if (!qlcnic_start_firmware(adapter)) {
                        qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
                        adapter->fw_wait_cnt = 0;
                        return;
@@@ -2765,7 -2350,7 +2765,7 @@@ qlcnic_detach_work(struct work_struct *
        } else
                qlcnic_down(adapter, netdev);
  
 -      status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
 +      status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
  
        if (status & QLCNIC_RCODE_FATAL_ERROR) {
                dev_err(&adapter->pdev->dev,
@@@ -2816,18 -2401,19 +2816,18 @@@ qlcnic_set_npar_non_operational(struct 
  {
        u32 state;
  
 -      state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
 +      state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
        if (state == QLCNIC_DEV_NPAR_NON_OPER)
                return;
  
        if (qlcnic_api_lock(adapter))
                return;
 -      QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
 +                          QLCNIC_DEV_NPAR_NON_OPER);
        qlcnic_api_unlock(adapter);
  }
  
 -/*Transit to RESET state from READY state only */
 -void
 -qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
 +void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
  {
        u32 state, xg_val = 0, gb_val = 0;
  
        dev_info(&adapter->pdev->dev, "Pause control frames disabled"
                                " on all ports\n");
        adapter->need_fw_reset = 1;
 +
        if (qlcnic_api_lock(adapter))
                return;
  
 -      state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 -      if (state  == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
 -              netdev_err(adapter->netdev,
 -                              "Device is in FAILED state, Please Reboot\n");
 -              qlcnic_api_unlock(adapter);
 -              return;
 -      }
 +      state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
  
        if (state == QLCNIC_DEV_READY) {
 -              QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
 +                                  QLCNIC_DEV_NEED_RESET);
                adapter->flags |= QLCNIC_FW_RESET_OWNER;
                QLCDB(adapter, DRV, "NEED_RESET state set\n");
                qlcnic_idc_debug_info(adapter, 0);
        }
  
 -      QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
 +                          QLCNIC_DEV_NPAR_NON_OPER);
        qlcnic_api_unlock(adapter);
  }
  
@@@ -2868,22 -2457,34 +2868,22 @@@ qlcnic_dev_set_npar_ready(struct qlcnic
        if (qlcnic_api_lock(adapter))
                return;
  
 -      QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
 +      QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
 +                          QLCNIC_DEV_NPAR_OPER);
        QLCDB(adapter, DRV, "NPAR operational state set\n");
  
        qlcnic_api_unlock(adapter);
  }
  
 -static void
 -qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 -              work_func_t func, int delay)
 +void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
 +                        work_func_t func, int delay)
  {
        if (test_bit(__QLCNIC_AER, &adapter->state))
                return;
  
        INIT_DELAYED_WORK(&adapter->fw_work, func);
 -      queue_delayed_work(qlcnic_wq, &adapter->fw_work,
 -                                      round_jiffies_relative(delay));
 -}
 -
 -static void
 -qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
 -{
 -      while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
 -              msleep(10);
 -
 -      if (!adapter->fw_work.work.func)
 -              return;
 -
 -      cancel_delayed_work_sync(&adapter->fw_work);
 +      queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
 +                         round_jiffies_relative(delay));
  }
  
  static void
@@@ -2895,8 -2496,7 +2895,8 @@@ qlcnic_attach_work(struct work_struct *
        u32 npar_state;
  
        if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
 -              npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
 +              npar_state = QLC_SHARED_REG_RD32(adapter,
 +                                               QLCNIC_CRB_DEV_NPAR_STATE);
                if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
                        qlcnic_clr_all_drv_state(adapter, 0);
                else if (npar_state != QLCNIC_DEV_NPAR_OPER)
@@@ -2936,16 -2536,16 +2936,16 @@@ qlcnic_check_health(struct qlcnic_adapt
                goto detach;
  
        if (adapter->need_fw_reset)
 -              qlcnic_dev_request_reset(adapter);
 +              qlcnic_dev_request_reset(adapter, 0);
  
 -      state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
 +      state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
        if (state == QLCNIC_DEV_NEED_RESET) {
                qlcnic_set_npar_non_operational(adapter);
                adapter->need_fw_reset = 1;
        } else if (state == QLCNIC_DEV_NEED_QUISCENT)
                goto detach;
  
 -      heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
 +      heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
        if (heartbeat != adapter->heartbeat) {
                adapter->heartbeat = heartbeat;
                adapter->fw_fail_cnt = 0;
  
        adapter->flags |= QLCNIC_FW_HANG;
  
 -      qlcnic_dev_request_reset(adapter);
 +      qlcnic_dev_request_reset(adapter, 0);
  
        if (qlcnic_auto_fw_reset)
                clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
  
        dev_err(&adapter->pdev->dev, "firmware hang detected\n");
 +      peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
        dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
                        "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
                        "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
                        "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
                        "PEG_NET_4_PC: 0x%x\n",
 -                      QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
 -                      QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
 +                      peg_status,
 +                      QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
                        QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
                        QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
                        QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
                        QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
                        QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
 -      peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
        if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
                dev_err(&adapter->pdev->dev,
                        "Firmware aborted with error code 0x00006700. "
@@@ -3067,39 -2667,17 +3067,39 @@@ static int qlcnic_attach_func(struct pc
        if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
                adapter->need_fw_reset = 1;
                set_bit(__QLCNIC_START_FW, &adapter->state);
 -              QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
 +              QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
 +                                  QLCNIC_DEV_INITIALIZING);
                QLCDB(adapter, DRV, "Restarting fw\n");
        }
        qlcnic_api_unlock(adapter);
  
 -      err = adapter->nic_ops->start_firmware(adapter);
 +      err = qlcnic_start_firmware(adapter);
        if (err)
                return err;
  
        qlcnic_clr_drv_state(adapter);
 -      qlcnic_setup_intr(adapter);
 +      kfree(adapter->msix_entries);
 +      adapter->msix_entries = NULL;
 +      err = qlcnic_setup_intr(adapter, 0);
 +
 +      if (err) {
 +              kfree(adapter->msix_entries);
 +              netdev_err(netdev, "failed to setup interrupt\n");
 +              return err;
 +      }
 +
 +      if (qlcnic_83xx_check(adapter)) {
 +              /* register for NIC IDC AEN Events */
 +              qlcnic_83xx_register_nic_idc_func(adapter, 1);
 +              err = qlcnic_83xx_setup_mbx_intr(adapter);
 +              if (err) {
 +                      dev_err(&adapter->pdev->dev,
 +                              "failed to setup mbx interrupt\n");
 +                      qlcnic_clr_all_drv_state(adapter, 1);
 +                      clear_bit(__QLCNIC_AER, &adapter->state);
 +                      goto done;
 +              }
 +      }
  
        if (netif_running(netdev)) {
                err = qlcnic_attach(adapter);
@@@ -3141,12 -2719,6 +3141,12 @@@ static pci_ers_result_t qlcnic_io_error
        if (netif_running(netdev))
                qlcnic_down(adapter, netdev);
  
 +      if (qlcnic_83xx_check(adapter)) {
 +              qlcnic_83xx_free_mbx_intr(adapter);
 +              qlcnic_83xx_register_nic_idc_func(adapter, 0);
 +              cancel_delayed_work_sync(&adapter->idc_aen_work);
 +      }
 +
        qlcnic_detach(adapter);
        qlcnic_teardown_intr(adapter);
  
@@@ -3166,13 -2738,12 +3166,13 @@@ static pci_ers_result_t qlcnic_io_slot_
  
  static void qlcnic_io_resume(struct pci_dev *pdev)
  {
 +      u32 state;
        struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
  
        pci_cleanup_aer_uncorrect_error_status(pdev);
 -
 -      if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
 -          test_and_clear_bit(__QLCNIC_AER, &adapter->state))
 +      state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
 +      if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
 +                                                          &adapter->state))
                qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
                                                FW_POLL_DELAY);
  }
@@@ -3205,59 -2776,39 +3205,59 @@@ qlcnicvf_start_firmware(struct qlcnic_a
        return err;
  }
  
 -int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
 +int qlcnic_validate_max_rss(u8 max_hw, u8 val)
  {
 -      if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
 -              netdev_info(netdev, "no msix or msi support, hence no rss\n");
 -              return -EINVAL;
 +      u32 max_allowed;
 +
 +      if (max_hw > QLC_MAX_SDS_RINGS) {
 +              max_hw = QLC_MAX_SDS_RINGS;
 +              pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS);
        }
  
 -      if ((val > max_hw) || (val <  2) || !is_power_of_2(val)) {
 -              netdev_info(netdev, "rss_ring valid range [2 - %x] in "
 -                      " powers of 2\n", max_hw);
 +      max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
 +                                               num_online_cpus()));
 +      if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
 +              pr_info("rss_ring valid range [2 - %x] in powers of 2\n",
 +                      max_allowed);
                return -EINVAL;
        }
        return 0;
 -
  }
  
 -int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
 +int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
  {
 +      int err;
        struct net_device *netdev = adapter->netdev;
 -      int err = 0;
  
 -      if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
 +      if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return -EBUSY;
  
        netif_device_detach(netdev);
        if (netif_running(netdev))
                __qlcnic_down(adapter, netdev);
 +
        qlcnic_detach(adapter);
 +
 +      if (qlcnic_83xx_check(adapter))
 +              qlcnic_83xx_free_mbx_intr(adapter);
 +
        qlcnic_teardown_intr(adapter);
 +      err = qlcnic_setup_intr(adapter, data);
 +      if (err) {
 +              kfree(adapter->msix_entries);
 +              netdev_err(netdev, "failed to setup interrupt\n");
 +              return err;
 +      }
  
 -      if (qlcnic_enable_msix(adapter, data)) {
 -              netdev_info(netdev, "failed setting max_rss; rss disabled\n");
 -              qlcnic_enable_msi_legacy(adapter);
 +      if (qlcnic_83xx_check(adapter)) {
 +              /* register for NIC IDC AEN Events */
 +              qlcnic_83xx_register_nic_idc_func(adapter, 1);
 +              err = qlcnic_83xx_setup_mbx_intr(adapter);
 +              if (err) {
 +                      dev_err(&adapter->pdev->dev,
 +                              "failed to setup mbx interrupt\n");
 +                      goto done;
 +              }
        }
  
        if (netif_running(netdev)) {
                        goto done;
                qlcnic_restore_indev_addr(netdev, NETDEV_UP);
        }
 +      err = len;
   done:
        netif_device_attach(netdev);
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@@ -3308,7 -2858,8 +3308,7 @@@ qlcnic_config_indev_addr(struct qlcnic_
        in_dev_put(indev);
  }
  
 -static void
 -qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
 +void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
  {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct net_device *dev;
  
        qlcnic_config_indev_addr(adapter, netdev, event);
  
 +      rcu_read_lock();
        for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
                dev = __vlan_find_dev_deep(netdev, vid);
                if (!dev)
                        continue;
                qlcnic_config_indev_addr(adapter, dev, event);
        }
 +      rcu_read_unlock();
  }
  
  static int qlcnic_netdev_event(struct notifier_block *this,
@@@ -3391,11 -2940,9 +3391,11 @@@ recheck
        switch (event) {
        case NETDEV_UP:
                qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
 +
                break;
        case NETDEV_DOWN:
                qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
 +
                break;
        default:
                break;
@@@ -3413,10 -2960,11 +3413,10 @@@ static struct notifier_block qlcnic_ine
        .notifier_call = qlcnic_inetaddr_event,
  };
  #else
 -static void
 -qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
 +void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
  { }
  #endif
 -static struct pci_error_handlers qlcnic_err_handler = {
 +static const struct pci_error_handlers qlcnic_err_handler = {
        .error_detected = qlcnic_io_error_detected,
        .slot_reset = qlcnic_io_slot_reset,
        .resume = qlcnic_io_resume,
@@@ -3442,6 -2990,12 +3442,6 @@@ static int __init qlcnic_init_module(vo
  
        printk(KERN_INFO "%s\n", qlcnic_driver_string);
  
 -      qlcnic_wq = create_singlethread_workqueue("qlcnic");
 -      if (qlcnic_wq == NULL) {
 -              printk(KERN_ERR "qlcnic: cannot create workqueue\n");
 -              return -ENOMEM;
 -      }
 -
  #ifdef CONFIG_INET
        register_netdevice_notifier(&qlcnic_netdev_cb);
        register_inetaddr_notifier(&qlcnic_inetaddr_cb);
                unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
                unregister_netdevice_notifier(&qlcnic_netdev_cb);
  #endif
 -              destroy_workqueue(qlcnic_wq);
        }
  
        return ret;
@@@ -3462,12 -3017,14 +3462,12 @@@ module_init(qlcnic_init_module)
  
  static void __exit qlcnic_exit_module(void)
  {
 -
        pci_unregister_driver(&qlcnic_driver);
  
  #ifdef CONFIG_INET
        unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
        unregister_netdevice_notifier(&qlcnic_netdev_cb);
  #endif
 -      destroy_workqueue(qlcnic_wq);
  }
  
  module_exit(qlcnic_exit_module);
diff --combined drivers/scsi/Kconfig
@@@ -42,7 -42,7 +42,7 @@@ config SCSI_DM
  
  config SCSI_TGT
        tristate "SCSI target support"
 -      depends on SCSI && EXPERIMENTAL
 +      depends on SCSI
        ---help---
          If you want to use SCSI target mode drivers enable this option.
          If you choose M, the module will be called scsi_tgt.
@@@ -883,7 -883,7 +883,7 @@@ config SCSI_IBMVSCS
          This is the IBM POWER Virtual SCSI Client
  
          To compile this driver as a module, choose M here: the
-         module will be called ibmvscsic.
+         module will be called ibmvscsi.
  
  config SCSI_IBMVSCSIS
        tristate "IBM Virtual SCSI Server support"
@@@ -1392,8 -1392,8 +1392,8 @@@ config SCSI_SYM53C41
          module will be called sym53c416.
  
  config SCSI_DC395x
 -      tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support (EXPERIMENTAL)"
 -      depends on PCI && SCSI && EXPERIMENTAL
 +      tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
 +      depends on PCI && SCSI
        ---help---
          This driver supports PCI SCSI host adapters based on the ASIC
          TRM-S1040 chip, e.g Tekram DC395(U/UW/F) and DC315(U) variants.
@@@ -1618,8 -1618,8 +1618,8 @@@ config GVP11_SCS
          module will be called gvp11.
  
  config SCSI_A4000T
 -      tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
 -      depends on AMIGA && SCSI && EXPERIMENTAL
 +      tristate "A4000T NCR53c710 SCSI support"
 +      depends on AMIGA && SCSI
        select SCSI_SPI_ATTRS
        help
          If you have an Amiga 4000T and have SCSI devices connected to the
          module will be called a4000t.
  
  config SCSI_ZORRO7XX
 -      tristate "Zorro NCR53c710 SCSI support (EXPERIMENTAL)"
 -      depends on ZORRO && SCSI && EXPERIMENTAL
 +      tristate "Zorro NCR53c710 SCSI support"
 +      depends on ZORRO && SCSI
        select SCSI_SPI_ATTRS
        help
          Support for various NCR53c710-based SCSI controllers on Zorro
@@@ -1807,8 -1807,8 +1807,8 @@@ config SCSI_BFA_F
          be called bfa.
  
  config SCSI_VIRTIO
 -      tristate "virtio-scsi support (EXPERIMENTAL)"
 -      depends on EXPERIMENTAL && VIRTIO
 +      tristate "virtio-scsi support"
 +      depends on VIRTIO
        help
            This is the virtual HBA driver for virtio.  If the kernel will
            be used in a virtual machine, say Y or M.
@@@ -163,7 -163,7 +163,7 @@@ struct binder_pri_ptr_cookie 
        void *cookie;
  };
  
 -enum BinderDriverReturnProtocol {
 +enum binder_driver_return_protocol {
        BR_ERROR = _IOR('r', 0, int),
        /*
         * int: error code
        BR_SPAWN_LOOPER = _IO('r', 13),
        /*
         * No parameters.  The driver has determined that a process has no
-        * threads waiting to service incomming transactions.  When a process
+        * threads waiting to service incoming transactions.  When a process
         * receives this command, it must spawn a new service thread and
         * register it via bcENTER_LOOPER.
         */
         */
  };
  
 -enum BinderDriverCommandProtocol {
 +enum binder_driver_command_protocol {
        BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
        BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
        /*
@@@ -72,7 -72,7 +72,7 @@@ int pow_receive_group = 15
  module_param(pow_receive_group, int, 0444);
  MODULE_PARM_DESC(pow_receive_group, "\n"
        "\tPOW group to receive packets from. All ethernet hardware\n"
-       "\twill be configured to send incomming packets to this POW\n"
+       "\twill be configured to send incoming packets to this POW\n"
        "\tgroup. Also any other software can submit packets to this\n"
        "\tgroup for the kernel to process.");
  
@@@ -453,10 -453,12 +453,10 @@@ int cvm_oct_common_init(struct net_devi
        if (priv->of_node)
                mac = of_get_mac_address(priv->of_node);
  
 -      if (mac && is_valid_ether_addr(mac)) {
 +      if (mac && is_valid_ether_addr(mac))
                memcpy(dev->dev_addr, mac, ETH_ALEN);
 -              dev->addr_assign_type &= ~NET_ADDR_RANDOM;
 -      } else {
 +      else
                eth_hw_addr_random(dev);
 -      }
  
        /*
         * Force the interface to use the POW send if always_use_pow
diff --combined fs/btrfs/extent-tree.c
@@@ -4534,7 -4534,7 +4534,7 @@@ int btrfs_delalloc_reserve_metadata(str
        unsigned nr_extents = 0;
        int extra_reserve = 0;
        enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
 -      int ret;
 +      int ret = 0;
        bool delalloc_lock = true;
  
        /* If we are a free space inode we need to not flush since we will be in
        csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
  
 -      if (root->fs_info->quota_enabled) {
 +      if (root->fs_info->quota_enabled)
                ret = btrfs_qgroup_reserve(root, num_bytes +
                                           nr_extents * root->leafsize);
 -              if (ret) {
 -                      spin_lock(&BTRFS_I(inode)->lock);
 -                      calc_csum_metadata_size(inode, num_bytes, 0);
 -                      spin_unlock(&BTRFS_I(inode)->lock);
 -                      if (delalloc_lock)
 -                              mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 -                      return ret;
 -              }
 -      }
  
 -      ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
 +      /*
 +       * ret != 0 here means the qgroup reservation failed, we go straight to
 +       * the shared error handling then.
 +       */
 +      if (ret == 0)
 +              ret = reserve_metadata_bytes(root, block_rsv,
 +                                           to_reserve, flush);
 +
        if (ret) {
                u64 to_free = 0;
                unsigned dropped;
@@@ -6522,7 -6524,7 +6522,7 @@@ reada
  }
  
  /*
-  * hepler to process tree block while walking down the tree.
+  * helper to process tree block while walking down the tree.
   *
   * when wc->stage == UPDATE_BACKREF, this function updates
   * back refs for pointers in the block.
@@@ -6597,7 -6599,7 +6597,7 @@@ static noinline int walk_down_proc(stru
  }
  
  /*
-  * hepler to process tree block pointer.
+  * helper to process tree block pointer.
   *
   * when wc->stage == DROP_REFERENCE, this function checks
   * reference count of the block pointed to. if the block
@@@ -6735,7 -6737,7 +6735,7 @@@ skip
  }
  
  /*
-  * hepler to process tree block while walking up the tree.
+  * helper to process tree block while walking up the tree.
   *
   * when wc->stage == DROP_REFERENCE, this function drops
   * reference count on the block.
diff --combined fs/btrfs/transaction.c
@@@ -112,7 -112,6 +112,6 @@@ loop
                 * to redo the trans_no_join checks above
                 */
                kmem_cache_free(btrfs_transaction_cachep, cur_trans);
-               cur_trans = fs_info->running_transaction;
                goto loop;
        } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
                spin_unlock(&fs_info->trans_lock);
@@@ -333,14 -332,12 +332,14 @@@ start_transaction(struct btrfs_root *ro
                                          &root->fs_info->trans_block_rsv,
                                          num_bytes, flush);
                if (ret)
 -                      return ERR_PTR(ret);
 +                      goto reserve_fail;
        }
  again:
        h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
 -      if (!h)
 -              return ERR_PTR(-ENOMEM);
 +      if (!h) {
 +              ret = -ENOMEM;
 +              goto alloc_fail;
 +      }
  
        /*
         * If we are JOIN_NOLOCK we're already committing a transaction and
        if (ret < 0) {
                /* We must get the transaction if we are JOIN_NOLOCK. */
                BUG_ON(type == TRANS_JOIN_NOLOCK);
 -
 -              if (type < TRANS_JOIN_NOLOCK)
 -                      sb_end_intwrite(root->fs_info->sb);
 -              kmem_cache_free(btrfs_trans_handle_cachep, h);
 -              return ERR_PTR(ret);
 +              goto join_fail;
        }
  
        cur_trans = root->fs_info->running_transaction;
@@@ -408,19 -409,6 +407,19 @@@ got_it
        if (!current->journal_info && type != TRANS_USERSPACE)
                current->journal_info = h;
        return h;
 +
 +join_fail:
 +      if (type < TRANS_JOIN_NOLOCK)
 +              sb_end_intwrite(root->fs_info->sb);
 +      kmem_cache_free(btrfs_trans_handle_cachep, h);
 +alloc_fail:
 +      if (num_bytes)
 +              btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
 +                                      num_bytes);
 +reserve_fail:
 +      if (qgroup_reserved)
 +              btrfs_qgroup_free(root, qgroup_reserved);
 +      return ERR_PTR(ret);
  }
  
  struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
diff --combined fs/ocfs2/cluster/tcp.c
@@@ -870,7 -870,7 +870,7 @@@ int o2net_register_handler(u32 msg_type
                /* we've had some trouble with handlers seemingly vanishing. */
                mlog_bug_on_msg(o2net_handler_tree_lookup(msg_type, key, &p,
                                                          &parent) == NULL,
-                               "couldn't find handler we *just* registerd "
+                               "couldn't find handler we *just* registered "
                                "for type %u key %08x\n", msg_type, key);
        }
        write_unlock(&o2net_handler_lock);
@@@ -1165,8 -1165,10 +1165,8 @@@ out
        o2net_debug_del_nst(&nst); /* must be before dropping sc and node */
        if (sc)
                sc_put(sc);
 -      if (vec)
 -              kfree(vec);
 -      if (msg)
 -              kfree(msg);
 +      kfree(vec);
 +      kfree(msg);
        o2net_complete_nsw(nn, &nsw, 0, 0, 0);
        return ret;
  }
diff --combined net/bluetooth/hci_core.c
@@@ -1146,8 -1146,7 +1146,8 @@@ static void hci_power_on(struct work_st
                return;
  
        if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
 -              schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
 +              queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
 +                                 HCI_AUTO_OFF_TIMEOUT);
  
        if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
                mgmt_index_added(hdev);
@@@ -1183,10 -1182,14 +1183,10 @@@ static void hci_discov_off(struct work_
  
  int hci_uuids_clear(struct hci_dev *hdev)
  {
 -      struct list_head *p, *n;
 -
 -      list_for_each_safe(p, n, &hdev->uuids) {
 -              struct bt_uuid *uuid;
 +      struct bt_uuid *uuid, *tmp;
  
 -              uuid = list_entry(p, struct bt_uuid, list);
 -
 -              list_del(p);
 +      list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
 +              list_del(&uuid->list);
                kfree(uuid);
        }
  
@@@ -1618,8 -1621,8 +1618,8 @@@ static int hci_do_le_scan(struct hci_de
        if (err < 0)
                return err;
  
 -      schedule_delayed_work(&hdev->le_scan_disable,
 -                            msecs_to_jiffies(timeout));
 +      queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
 +                         msecs_to_jiffies(timeout));
  
        return 0;
  }
@@@ -1796,15 -1799,6 +1796,15 @@@ int hci_register_dev(struct hci_dev *hd
                goto err;
        }
  
 +      hdev->req_workqueue = alloc_workqueue(hdev->name,
 +                                            WQ_HIGHPRI | WQ_UNBOUND |
 +                                            WQ_MEM_RECLAIM, 1);
 +      if (!hdev->req_workqueue) {
 +              destroy_workqueue(hdev->workqueue);
 +              error = -ENOMEM;
 +              goto err;
 +      }
 +
        error = hci_add_sysfs(hdev);
        if (error < 0)
                goto err_wqueue;
        hci_notify(hdev, HCI_DEV_REG);
        hci_dev_hold(hdev);
  
 -      schedule_work(&hdev->power_on);
 +      queue_work(hdev->req_workqueue, &hdev->power_on);
  
        return id;
  
  err_wqueue:
        destroy_workqueue(hdev->workqueue);
 +      destroy_workqueue(hdev->req_workqueue);
  err:
        ida_simple_remove(&hci_index_ida, hdev->id);
        write_lock(&hci_dev_list_lock);
@@@ -1887,7 -1880,6 +1887,7 @@@ void hci_unregister_dev(struct hci_dev 
        hci_del_sysfs(hdev);
  
        destroy_workqueue(hdev->workqueue);
 +      destroy_workqueue(hdev->req_workqueue);
  
        hci_dev_lock(hdev);
        hci_blacklist_clear(hdev);
@@@ -1929,7 -1921,7 +1929,7 @@@ int hci_recv_frame(struct sk_buff *skb
                return -ENXIO;
        }
  
-       /* Incomming skb */
+       /* Incoming skb */
        bt_cb(skb)->incoming = 1;
  
        /* Time stamp */
diff --combined net/sctp/sm_make_chunk.c
@@@ -1201,7 -1201,7 +1201,7 @@@ nodata
   * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
   * This is a helper function to allocate an error chunk for
   * for those invalid parameter codes in which we may not want
-  * to report all the errors, if the incomming chunk is large
+  * to report all the errors, if the incoming chunk is large
   */
  static inline struct sctp_chunk *sctp_make_op_error_fixed(
        const struct sctp_association *asoc,
@@@ -1589,6 -1589,8 +1589,6 @@@ static sctp_cookie_param_t *sctp_pack_c
        struct sctp_signed_cookie *cookie;
        struct scatterlist sg;
        int headersize, bodysize;
 -      unsigned int keylen;
 -      char *key;
  
        /* Header size is static data prior to the actual cookie, including
         * any padding.
  
                /* Sign the message.  */
                sg_init_one(&sg, &cookie->c, bodysize);
 -              keylen = SCTP_SECRET_SIZE;
 -              key = (char *)ep->secret_key[ep->current_key];
                desc.tfm = sctp_sk(ep->base.sk)->hmac;
                desc.flags = 0;
  
 -              if (crypto_hash_setkey(desc.tfm, key, keylen) ||
 +              if (crypto_hash_setkey(desc.tfm, ep->secret_key,
 +                                     sizeof(ep->secret_key)) ||
                    crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
                        goto free_cookie;
        }
@@@ -1679,7 -1682,8 +1679,7 @@@ struct sctp_association *sctp_unpack_co
        int headersize, bodysize, fixed_size;
        __u8 *digest = ep->digest;
        struct scatterlist sg;
 -      unsigned int keylen, len;
 -      char *key;
 +      unsigned int len;
        sctp_scope_t scope;
        struct sk_buff *skb = chunk->skb;
        struct timeval tv;
                goto no_hmac;
  
        /* Check the signature.  */
 -      keylen = SCTP_SECRET_SIZE;
        sg_init_one(&sg, bear_cookie, bodysize);
 -      key = (char *)ep->secret_key[ep->current_key];
        desc.tfm = sctp_sk(ep->base.sk)->hmac;
        desc.flags = 0;
  
        memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
 -      if (crypto_hash_setkey(desc.tfm, key, keylen) ||
 +      if (crypto_hash_setkey(desc.tfm, ep->secret_key,
 +                             sizeof(ep->secret_key)) ||
            crypto_hash_digest(&desc, &sg, bodysize, digest)) {
                *error = -SCTP_IERROR_NOMEM;
                goto fail;
        }
  
        if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
 -              /* Try the previous key. */
 -              key = (char *)ep->secret_key[ep->last_key];
 -              memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
 -              if (crypto_hash_setkey(desc.tfm, key, keylen) ||
 -                  crypto_hash_digest(&desc, &sg, bodysize, digest)) {
 -                      *error = -SCTP_IERROR_NOMEM;
 -                      goto fail;
 -              }
 -
 -              if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
 -                      /* Yikes!  Still bad signature! */
 -                      *error = -SCTP_IERROR_BAD_SIG;
 -                      goto fail;
 -              }
 +              *error = -SCTP_IERROR_BAD_SIG;
 +              goto fail;
        }
  
  no_hmac:
@@@ -353,13 -353,13 +353,13 @@@ static const struct snd_kcontrol_new wm
        SOC_ENUM_EXT("Equalizer Function", eqmode, eqmode_get, eqmode_put),
        SOC_ENUM("EQ1 Cutoff", eq1_cutoff),
        SOC_SINGLE_TLV("EQ1 Volume", WM8983_EQ1_LOW_SHELF,  0, 24, 1, eq_tlv),
-       SOC_ENUM("EQ2 Bandwith", eq2_bw),
+       SOC_ENUM("EQ2 Bandwidth", eq2_bw),
        SOC_ENUM("EQ2 Cutoff", eq2_cutoff),
        SOC_SINGLE_TLV("EQ2 Volume", WM8983_EQ2_PEAK_1, 0, 24, 1, eq_tlv),
-       SOC_ENUM("EQ3 Bandwith", eq3_bw),
+       SOC_ENUM("EQ3 Bandwidth", eq3_bw),
        SOC_ENUM("EQ3 Cutoff", eq3_cutoff),
        SOC_SINGLE_TLV("EQ3 Volume", WM8983_EQ3_PEAK_2, 0, 24, 1, eq_tlv),
-       SOC_ENUM("EQ4 Bandwith", eq4_bw),
+       SOC_ENUM("EQ4 Bandwidth", eq4_bw),
        SOC_ENUM("EQ4 Cutoff", eq4_cutoff),
        SOC_SINGLE_TLV("EQ4 Volume", WM8983_EQ4_PEAK_3, 0, 24, 1, eq_tlv),
        SOC_ENUM("EQ5 Cutoff", eq5_cutoff),
@@@ -851,33 -851,30 +851,33 @@@ static int wm8983_set_pll(struct snd_so
        struct pll_div pll_div;
  
        codec = dai->codec;
 -      if (freq_in && freq_out) {
 +      if (!freq_in || !freq_out) {
 +              /* disable the PLL */
 +              snd_soc_update_bits(codec, WM8983_POWER_MANAGEMENT_1,
 +                                  WM8983_PLLEN_MASK, 0);
 +              return 0;
 +      } else {
                ret = pll_factors(&pll_div, freq_out * 4 * 2, freq_in);
                if (ret)
                        return ret;
 -      }
 -
 -      /* disable the PLL before re-programming it */
 -      snd_soc_update_bits(codec, WM8983_POWER_MANAGEMENT_1,
 -                          WM8983_PLLEN_MASK, 0);
  
 -      if (!freq_in || !freq_out)
 -              return 0;
 +              /* disable the PLL before re-programming it */
 +              snd_soc_update_bits(codec, WM8983_POWER_MANAGEMENT_1,
 +                                  WM8983_PLLEN_MASK, 0);
 +
 +              /* set PLLN and PRESCALE */
 +              snd_soc_write(codec, WM8983_PLL_N,
 +                      (pll_div.div2 << WM8983_PLL_PRESCALE_SHIFT)
 +                      | pll_div.n);
 +              /* set PLLK */
 +              snd_soc_write(codec, WM8983_PLL_K_3, pll_div.k & 0x1ff);
 +              snd_soc_write(codec, WM8983_PLL_K_2, (pll_div.k >> 9) & 0x1ff);
 +              snd_soc_write(codec, WM8983_PLL_K_1, (pll_div.k >> 18));
 +              /* enable the PLL */
 +              snd_soc_update_bits(codec, WM8983_POWER_MANAGEMENT_1,
 +                                      WM8983_PLLEN_MASK, WM8983_PLLEN);
 +      }
  
 -      /* set PLLN and PRESCALE */
 -      snd_soc_write(codec, WM8983_PLL_N,
 -                    (pll_div.div2 << WM8983_PLL_PRESCALE_SHIFT)
 -                    | pll_div.n);
 -      /* set PLLK */
 -      snd_soc_write(codec, WM8983_PLL_K_3, pll_div.k & 0x1ff);
 -      snd_soc_write(codec, WM8983_PLL_K_2, (pll_div.k >> 9) & 0x1ff);
 -      snd_soc_write(codec, WM8983_PLL_K_1, (pll_div.k >> 18));
 -      /* enable the PLL */
 -      snd_soc_update_bits(codec, WM8983_POWER_MANAGEMENT_1,
 -                          WM8983_PLLEN_MASK, WM8983_PLLEN);
        return 0;
  }
  
@@@ -371,13 -371,13 +371,13 @@@ static const struct snd_kcontrol_new wm
        SOC_ENUM_EXT("Equalizer Function", eqmode, eqmode_get, eqmode_put),
        SOC_ENUM("EQ1 Cutoff", eq1_cutoff),
        SOC_SINGLE_TLV("EQ1 Volume", WM8985_EQ1_LOW_SHELF,  0, 24, 1, eq_tlv),
-       SOC_ENUM("EQ2 Bandwith", eq2_bw),
+       SOC_ENUM("EQ2 Bandwidth", eq2_bw),
        SOC_ENUM("EQ2 Cutoff", eq2_cutoff),
        SOC_SINGLE_TLV("EQ2 Volume", WM8985_EQ2_PEAK_1, 0, 24, 1, eq_tlv),
-       SOC_ENUM("EQ3 Bandwith", eq3_bw),
+       SOC_ENUM("EQ3 Bandwidth", eq3_bw),
        SOC_ENUM("EQ3 Cutoff", eq3_cutoff),
        SOC_SINGLE_TLV("EQ3 Volume", WM8985_EQ3_PEAK_2, 0, 24, 1, eq_tlv),
-       SOC_ENUM("EQ4 Bandwith", eq4_bw),
+       SOC_ENUM("EQ4 Bandwidth", eq4_bw),
        SOC_ENUM("EQ4 Cutoff", eq4_cutoff),
        SOC_SINGLE_TLV("EQ4 Volume", WM8985_EQ4_PEAK_3, 0, 24, 1, eq_tlv),
        SOC_ENUM("EQ5 Cutoff", eq5_cutoff),
@@@ -830,30 -830,33 +830,30 @@@ static int wm8985_set_pll(struct snd_so
        struct pll_div pll_div;
  
        codec = dai->codec;
 -      if (freq_in && freq_out) {
 +      if (!freq_in || !freq_out) {
 +              /* disable the PLL */
 +              snd_soc_update_bits(codec, WM8985_POWER_MANAGEMENT_1,
 +                                  WM8985_PLLEN_MASK, 0);
 +      } else {
                ret = pll_factors(&pll_div, freq_out * 4 * 2, freq_in);
                if (ret)
                        return ret;
 -      }
  
 -      /* disable the PLL before reprogramming it */
 -      snd_soc_update_bits(codec, WM8985_POWER_MANAGEMENT_1,
 -                          WM8985_PLLEN_MASK, 0);
 -      
 -      if (!freq_in || !freq_out)
 -              return 0;
 -
 -      /* set PLLN and PRESCALE */
 -      snd_soc_write(codec, WM8985_PLL_N,
 -                    (pll_div.div2 << WM8985_PLL_PRESCALE_SHIFT)
 -                    | pll_div.n);
 -      /* set PLLK */
 -      snd_soc_write(codec, WM8985_PLL_K_3, pll_div.k & 0x1ff);
 -      snd_soc_write(codec, WM8985_PLL_K_2, (pll_div.k >> 9) & 0x1ff);
 -      snd_soc_write(codec, WM8985_PLL_K_1, (pll_div.k >> 18));
 -      /* set the source of the clock to be the PLL */
 -      snd_soc_update_bits(codec, WM8985_CLOCK_GEN_CONTROL,
 -                          WM8985_CLKSEL_MASK, WM8985_CLKSEL);
 -      /* enable the PLL */
 -      snd_soc_update_bits(codec, WM8985_POWER_MANAGEMENT_1,
 -                          WM8985_PLLEN_MASK, WM8985_PLLEN);
 +              /* set PLLN and PRESCALE */
 +              snd_soc_write(codec, WM8985_PLL_N,
 +                            (pll_div.div2 << WM8985_PLL_PRESCALE_SHIFT)
 +                            | pll_div.n);
 +              /* set PLLK */
 +              snd_soc_write(codec, WM8985_PLL_K_3, pll_div.k & 0x1ff);
 +              snd_soc_write(codec, WM8985_PLL_K_2, (pll_div.k >> 9) & 0x1ff);
 +              snd_soc_write(codec, WM8985_PLL_K_1, (pll_div.k >> 18));
 +              /* set the source of the clock to be the PLL */
 +              snd_soc_update_bits(codec, WM8985_CLOCK_GEN_CONTROL,
 +                                  WM8985_CLKSEL_MASK, WM8985_CLKSEL);
 +              /* enable the PLL */
 +              snd_soc_update_bits(codec, WM8985_POWER_MANAGEMENT_1,
 +                                  WM8985_PLLEN_MASK, WM8985_PLLEN);
 +      }
        return 0;
  }