Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Oct 2018 14:40:30 +0000 (07:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Oct 2018 14:40:30 +0000 (07:40 -0700)
Pull SCSI updates from James Bottomley:
 "This is mostly updates of the usual drivers: UFS, esp_scsi, NCR5380,
  qla2xxx, lpfc, libsas, hisi_sas.

  In addition there's a set of mostly small updates to the target
  subsystem a set of conversions to the generic DMA API, which do have
  some potential for issues in the older drivers but we'll handle those
  as case by case fixes.

  A new myrs driver for the DAC960/mylex raid controllers to replace the
  block based DAC960 which is also being removed by Jens in this merge
  window.

  Plus the usual slew of trivial changes"

[ "myrs" stands for "MYlex Raid Scsi". Obviously. Silly of me to even
  wonder. There's also a "myrb" driver, where the 'b' stands for
  'block'. Truly, somebody has got mad naming skillz. - Linus ]

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (237 commits)
  scsi: myrs: Fix the processor absent message in processor_show()
  scsi: myrs: Fix a logical vs bitwise bug
  scsi: hisi_sas: Fix NULL pointer dereference
  scsi: myrs: fix build failure on 32 bit
  scsi: fnic: replace gross legacy tag hack with blk-mq hack
  scsi: mesh: switch to generic DMA API
  scsi: ips: switch to generic DMA API
  scsi: smartpqi: fully convert to the generic DMA API
  scsi: vmw_pscsi: switch to generic DMA API
  scsi: snic: switch to generic DMA API
  scsi: qla4xxx: fully convert to the generic DMA API
  scsi: qla2xxx: fully convert to the generic DMA API
  scsi: qla1280: switch to generic DMA API
  scsi: qedi: fully convert to the generic DMA API
  scsi: qedf: fully convert to the generic DMA API
  scsi: pm8001: switch to generic DMA API
  scsi: nsp32: switch to generic DMA API
  scsi: mvsas: fully convert to the generic DMA API
  scsi: mvumi: switch to generic DMA API
  scsi: mpt3sas: switch to generic DMA API
  ...

200 files changed:
Documentation/scsi/ufs.txt
MAINTAINERS
drivers/message/fusion/lsi/mpi_cnfg.h
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptsas.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-sas.c
drivers/scsi/3w-xxxx.c
drivers/scsi/3w-xxxx.h
drivers/scsi/53c700.h
drivers/scsi/BusLogic.c
drivers/scsi/FlashPoint.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/a100u2w.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/commsup.c
drivers/scsi/advansys.c
drivers/scsi/aic7xxx/aic7770.c
drivers/scsi/aic7xxx/aic79xx.h
drivers/scsi/aic7xxx/aic79xx_core.c
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/aic7xxx/aic7xxx.h
drivers/scsi/aic7xxx/aic7xxx_93cx6.c
drivers/scsi/aic7xxx/aic7xxx_core.c
drivers/scsi/aic7xxx/aic7xxx_pci.c
drivers/scsi/aic7xxx/aicasm/aicasm.h
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/aic94xx/aic94xx_scb.c
drivers/scsi/aic94xx/aic94xx_task.c
drivers/scsi/am53c974.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/atp870u.c
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_fcbuild.c
drivers/scsi/bfa/bfa_fcbuild.h
drivers/scsi/bfa/bfad_im.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/csiostor/csio_init.c
drivers/scsi/csiostor/csio_lnode.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/csiostor/csio_wr.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/dc395x.c
drivers/scsi/esp_scsi.c
drivers/scsi/esp_scsi.h
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/ips.c
drivers/scsi/isci/host.c
drivers/scsi/isci/host.h
drivers/scsi/isci/request.c
drivers/scsi/isci/task.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/jazz_esp.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/libsas/sas_discover.c
drivers/scsi/libsas/sas_expander.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_bsg.c
drivers/scsi/lpfc/lpfc_bsg.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw4.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli4.h
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mac_esp.c
drivers/scsi/megaraid/megaraid_mbox.c
drivers/scsi/megaraid/megaraid_mbox.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mesh.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/mvumi.c
drivers/scsi/myrb.c [new file with mode: 0644]
drivers/scsi/myrb.h [new file with mode: 0644]
drivers/scsi/myrs.c [new file with mode: 0644]
drivers/scsi/myrs.h [new file with mode: 0644]
drivers/scsi/nsp32.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/pm8001/pm8001_defs.h
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_hwi.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/pm8001/pm80xx_hwi.h
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla1280.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/raid_class.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/sg.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/smartpqi/smartpqi_sis.c
drivers/scsi/snic/snic_disc.c
drivers/scsi/snic/snic_io.c
drivers/scsi/snic/snic_main.c
drivers/scsi/snic/snic_scsi.c
drivers/scsi/snic/vnic_dev.c
drivers/scsi/sun3x_esp.c
drivers/scsi/sun_esp.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/Makefile
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufs_bsg.c [new file with mode: 0644]
drivers/scsi/ufs/ufs_bsg.h [new file with mode: 0644]
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/scsi/vmw_pvscsi.c
drivers/scsi/zorro_esp.c
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_erl2.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_iblock.c
drivers/target/target_core_iblock.h
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
include/linux/wait.h
include/target/iscsi/iscsi_target_core.h
include/target/iscsi/iscsi_target_stat.h
include/target/target_core_base.h
include/uapi/scsi/scsi_bsg_ufs.h [new file with mode: 0644]

index 41a6164..520b5b0 100644 (file)
@@ -128,6 +128,26 @@ The current UFSHCD implementation supports following functionality,
 In this version of UFSHCD Query requests and power management
 functionality are not implemented.
 
+4. BSG Support
+------------------
+
+This transport driver supports exchanging UFS protocol information units
+(UPIUs) with a UFS device. Typically, user space will allocate
+struct ufs_bsg_request and struct ufs_bsg_reply (see ufs_bsg.h) as
+request_upiu and reply_upiu respectively.  Filling those UPIUs should
+be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
+*Caveat emptor*: The driver makes no further input validations and sends the
+UPIU to the device as it is.  Open the bsg device in /dev/ufs-bsg and
+send SG_IO with the applicable sg_io_v4:
+
+       io_hdr_v4.guard = 'Q';
+       io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
+       io_hdr_v4.subprotocol = BSG_SUB_PROTOCOL_SCSI_TRANSPORT;
+       io_hdr_v4.response = (__u64)reply_upiu;
+       io_hdr_v4.max_response_len = reply_len;
+       io_hdr_v4.request_len = request_len;
+       io_hdr_v4.request = (__u64)request_upiu;
+
 UFS Specifications can be found at,
 UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
 UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
index 16fb17c..46a7656 100644 (file)
@@ -4055,7 +4055,7 @@ M:        Uma Krishnan <ukrishn@linux.vnet.ibm.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/cxlflash/
-F:     include/uapi/scsi/cxlflash_ioctls.h
+F:     include/uapi/scsi/cxlflash_ioctl.h
 F:     Documentation/powerpc/cxlflash.txt
 
 CYBERPRO FB DRIVER
@@ -10011,6 +10011,13 @@ S:     Supported
 F:     drivers/gpu/drm/mxsfb/
 F:     Documentation/devicetree/bindings/display/mxsfb.txt
 
+MYLEX DAC960 PCI RAID Controller
+M:     Hannes Reinecke <hare@kernel.org>
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/myrb.*
+F:     drivers/scsi/myrs.*
+
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
 M:     Chris Lee <christopher.lee@cspi.com>
 L:     netdev@vger.kernel.org
index 059997f..178f414 100644 (file)
@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
     U64                     LinkFailureCount;           /* 50h */
     U64                     LossOfSyncCount;            /* 58h */
     U64                     LossOfSignalCount;          /* 60h */
-    U64                     PrimativeSeqErrCount;       /* 68h */
+    U64                     PrimitiveSeqErrCount;       /* 68h */
     U64                     InvalidTxWordCount;         /* 70h */
     U64                     InvalidCrcCount;            /* 78h */
     U64                     FcpInitiatorIoCount;        /* 80h */
index e6b4ae5..ba551d8 100644 (file)
@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
        MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
        struct pci_dev *pdev;
 
-       if ((ioc == NULL))
+       if (!ioc)
                return -1;
 
        pdev = ioc->pcidev;
-       if ((pdev == NULL))
+       if (!pdev)
                return -1;
 
        pci_stop_and_remove_bus_device_locked(pdev);
@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
                u8 phy_num = (u8)(evData0);
                u8 port_num = (u8)(evData0 >> 8);
                u8 port_width = (u8)(evData0 >> 16);
-               u8 primative = (u8)(evData0 >> 24);
+               u8 primitive = (u8)(evData0 >> 24);
                snprintf(evStr, EVENT_DESCR_STR_SZ,
-                   "SAS Broadcase Primative: phy=%d port=%d "
-                   "width=%d primative=0x%02x",
-                   phy_num, port_num, port_width, primative);
+                   "SAS Broadcast Primitive: phy=%d port=%d "
+                   "width=%d primitive=0x%02x",
+                   phy_num, port_num, port_width, primitive);
                break;
        }
 
index b8cf265..9b404fc 100644 (file)
@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
 static void mptsas_send_expander_event(struct fw_event_work *fw_event);
 static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
 static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
-static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
 static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
 static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
 void   mptsas_schedule_target_reset(void *ioc);
@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
                mptsas_free_fw_event(ioc, fw_event);
                break;
        case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
-               mptsas_broadcast_primative_work(fw_event);
+               mptsas_broadcast_primitive_work(fw_event);
                break;
        case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
                mptsas_send_expander_event(fw_event);
@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
 }
 
 /**
- *     mptsas_broadcast_primative_work - Handle broadcast primitives
+ *     mptsas_broadcast_primitive_work - Handle broadcast primitives
  *     @work: work queue payload containing info describing the event
  *
  *     this will be handled in workqueue context.
  */
 static void
-mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
 {
        MPT_ADAPTER *ioc = fw_event->ioc;
        MPT_FRAME_HDR   *mf;
index 27521fc..05293ba 100644 (file)
@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
        unsigned long *cpu_addr;
        int retval = 1;
 
-       cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+       cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+                       size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
        if (!cpu_addr) {
                TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
                goto out;
@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
 
        if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
                TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
-               pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+                               cpu_addr, dma_handle);
                goto out;
        }
 
@@ -1027,16 +1029,16 @@ out:
 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
 {
        if (tw_dev->command_packet_virt[0])
-               pci_free_consistent(tw_dev->tw_pci_dev,
-                                   sizeof(TW_Command_Full)*TW_Q_LENGTH,
-                                   tw_dev->command_packet_virt[0],
-                                   tw_dev->command_packet_phys[0]);
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+                               sizeof(TW_Command_Full) * TW_Q_LENGTH,
+                               tw_dev->command_packet_virt[0],
+                               tw_dev->command_packet_phys[0]);
 
        if (tw_dev->generic_buffer_virt[0])
-               pci_free_consistent(tw_dev->tw_pci_dev,
-                                   TW_SECTOR_SIZE*TW_Q_LENGTH,
-                                   tw_dev->generic_buffer_virt[0],
-                                   tw_dev->generic_buffer_phys[0]);
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+                               TW_SECTOR_SIZE * TW_Q_LENGTH,
+                               tw_dev->generic_buffer_virt[0],
+                               tw_dev->generic_buffer_phys[0]);
 
        kfree(tw_dev->event_queue[0]);
 } /* End twa_free_device_extension() */
@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-           || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-                   || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-                       TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
-                       retval = -ENODEV;
-                       goto out_disable_device;
-               }
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+               TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+               retval = -ENODEV;
+               goto out_disable_device;
+       }
 
        host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
        if (!host) {
@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-           || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-                   || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-                       TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
-                       retval = -ENODEV;
-                       goto out_disable_device;
-               }
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+               TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
+               retval = -ENODEV;
+               goto out_disable_device;
+       }
 
        /* Initialize the card */
        if (twa_reset_sequence(tw_dev, 0)) {
index 40c1e6e..266bdac 100644 (file)
@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
        unsigned long *cpu_addr;
        int retval = 1;
 
-       cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
-                                        &dma_handle);
+       cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
+                       size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
        if (!cpu_addr) {
                TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
                goto out;
@@ -899,19 +899,19 @@ out:
 static void twl_free_device_extension(TW_Device_Extension *tw_dev)
 {
        if (tw_dev->command_packet_virt[0])
-               pci_free_consistent(tw_dev->tw_pci_dev,
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
                                    sizeof(TW_Command_Full)*TW_Q_LENGTH,
                                    tw_dev->command_packet_virt[0],
                                    tw_dev->command_packet_phys[0]);
 
        if (tw_dev->generic_buffer_virt[0])
-               pci_free_consistent(tw_dev->tw_pci_dev,
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
                                    TW_SECTOR_SIZE*TW_Q_LENGTH,
                                    tw_dev->generic_buffer_virt[0],
                                    tw_dev->generic_buffer_phys[0]);
 
        if (tw_dev->sense_buffer_virt[0])
-               pci_free_consistent(tw_dev->tw_pci_dev,
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
                                    sizeof(TW_Command_Apache_Header)*
                                    TW_Q_LENGTH,
                                    tw_dev->sense_buffer_virt[0],
@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-           || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-                   || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-                       TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
-                       retval = -ENODEV;
-                       goto out_disable_device;
-               }
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+               TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+               retval = -ENODEV;
+               goto out_disable_device;
+       }
 
        host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
        if (!host) {
@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-           || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-                   || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-                       TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
-                       retval = -ENODEV;
-                       goto out_disable_device;
-               }
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+               TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+               retval = -ENODEV;
+               goto out_disable_device;
+       }
 
        /* Initialize the card */
        if (twl_reset_sequence(tw_dev, 0)) {
index 4713669..a582576 100644 (file)
@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
 
        dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
 
-       cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+       cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+                       size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
        if (cpu_addr == NULL) {
-               printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
+               printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
                return 1;
        }
 
        if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
                printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
-               pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+                               cpu_addr, dma_handle);
                return 1;
        }
 
@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
 
        /* Free command packet and generic buffer memory */
        if (tw_dev->command_packet_virtual_address[0])
-               pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+                               sizeof(TW_Command) * TW_Q_LENGTH,
+                               tw_dev->command_packet_virtual_address[0],
+                               tw_dev->command_packet_physical_address[0]);
 
        if (tw_dev->alignment_virtual_address[0])
-               pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
+               dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+                               sizeof(TW_Sector) * TW_Q_LENGTH,
+                               tw_dev->alignment_virtual_address[0],
+                               tw_dev->alignment_physical_address[0]);
 } /* End tw_free_device_extension() */
 
 /* This function will send an initconnection command to controller */
@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
 
        pci_set_master(pdev);
 
-       retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
+       retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
        if (retval) {
                printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
                goto out_disable_device;
index 69e80c1..bd87fba 100644 (file)
@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
 #define TW_IOCTL_TIMEOUT                      25 /* 25 seconds */
 #define TW_IOCTL_CHRDEV_TIMEOUT               60 /* 60 seconds */
 #define TW_IOCTL_CHRDEV_FREE                  -1
-#define TW_DMA_MASK                          DMA_BIT_MASK(32)
 #define TW_MAX_CDB_LEN                       16
 
 /* Bitmask macros to eliminate bitfields */
index 0c9a100..05fe439 100644 (file)
@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
 /* The SYNC negotiation sequence looks like:
  * 
  * If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
- * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
+ * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
  * If we get an SDTR reply, work out the SXFER parameters, squirrel
  * them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
  * DEV_NEGOTIATED_SYNC.  If we get a REJECT msg, squirrel
index 0d4ffe0..9cee941 100644 (file)
@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
        dma_addr_t blkp;
 
        while (adapter->alloc_ccbs < adapter->initccbs) {
-               blk_pointer = pci_alloc_consistent(adapter->pci_device,
-                                                       blk_size, &blkp);
+               blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+                               blk_size, &blkp, GFP_KERNEL);
                if (blk_pointer == NULL) {
                        blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
                                        adapter);
@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
                next_ccb = ccb->next_all;
                if (ccb->allocgrp_head) {
                        if (lastccb)
-                               pci_free_consistent(adapter->pci_device,
+                               dma_free_coherent(&adapter->pci_device->dev,
                                                lastccb->allocgrp_size, lastccb,
                                                lastccb->allocgrp_head);
                        lastccb = ccb;
                }
        }
        if (lastccb)
-               pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
-                                       lastccb, lastccb->allocgrp_head);
+               dma_free_coherent(&adapter->pci_device->dev,
+                               lastccb->allocgrp_size, lastccb,
+                               lastccb->allocgrp_head);
 }
 
 
@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
        if (addl_ccbs <= 0)
                return;
        while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
-               blk_pointer = pci_alloc_consistent(adapter->pci_device,
-                                                       blk_size, &blkp);
+               blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+                               blk_size, &blkp, GFP_KERNEL);
                if (blk_pointer == NULL)
                        break;
                blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
        if (ccb->command != NULL)
                scsi_dma_unmap(ccb->command);
        if (dma_unmap)
-               pci_unmap_single(adapter->pci_device, ccb->sensedata,
-                        ccb->sense_datalen, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
+                        ccb->sense_datalen, DMA_FROM_DEVICE);
 
        ccb->command = NULL;
        ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                if (pci_enable_device(pci_device))
                        continue;
 
-               if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+               if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
                        continue;
 
                bus = pci_device->bus->number;
@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                if (pci_enable_device(pci_device))
                        continue;
 
-               if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+               if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
                        continue;
 
                bus = pci_device->bus->number;
@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
                if (pci_enable_device(pci_device))
                        continue;
 
-               if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+               if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
                        continue;
 
                bus = pci_device->bus->number;
@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
           Release any allocated memory structs not released elsewhere
         */
        if (adapter->mbox_space)
-               pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
+               dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
                        adapter->mbox_space, adapter->mbox_space_handle);
        pci_dev_put(adapter->pci_device);
        adapter->mbox_space = NULL;
@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
           Initialize the Outgoing and Incoming Mailbox pointers.
         */
        adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
-       adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
-                               adapter->mbox_sz, &adapter->mbox_space_handle);
+       adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
+                               adapter->mbox_sz, &adapter->mbox_space_handle,
+                               GFP_KERNEL);
        if (adapter->mbox_space == NULL)
                return blogic_failure(adapter, "MAILBOX ALLOCATION");
        adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
        memcpy(ccb->cdb, cdb, cdblen);
        ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
        ccb->command = command;
-       sense_buf = pci_map_single(adapter->pci_device,
+       sense_buf = dma_map_single(&adapter->pci_device->dev,
                                command->sense_buffer, ccb->sense_datalen,
-                               PCI_DMA_FROMDEVICE);
+                               DMA_FROM_DEVICE);
        if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
                blogic_err("DMA mapping for sense data buffer failed\n",
                                adapter);
index 867b864..0f17bd5 100644 (file)
@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
                        }
 
                        if (currSCCB->Lun == 0x00) {
-                               if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
+                               if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
 
                                        currTar_Info->TarStatus |=
                                            (unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
                                            ~EE_SYNC_MASK;
                                }
 
-                               else if ((currSCCB->Sccb_scsistat ==
-                                         SELECT_WN_ST)) {
+                               else if (currSCCB->Sccb_scsistat ==
+                                         SELECT_WN_ST) {
 
                                        currTar_Info->TarStatus =
                                            (currTar_Info->
index 7c09700..70988c3 100644 (file)
@@ -42,6 +42,9 @@ config SCSI_DMA
        bool
        default n
 
+config SCSI_ESP_PIO
+       bool
+
 config SCSI_NETLINK
        bool
        default n
@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
          substantial, so users of MultiMaster Host Adapters may not
          wish to include it.
 
+config SCSI_MYRB
+       tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
+       depends on PCI
+       select RAID_ATTRS
+       help
+         This driver adds support for the Mylex DAC960, AcceleRAID, and
+         eXtremeRAID PCI RAID controllers. This driver supports the
+         older, block based interface.
+         This driver is a reimplementation of the original DAC960
+         driver. If you have used the DAC960 driver you should enable
+         this module.
+
+         To compile this driver as a module, choose M here: the
+         module will be called myrb.
+
+config SCSI_MYRS
+       tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
+       depends on PCI
+       select RAID_ATTRS
+       help
+         This driver adds support for the Mylex DAC960, AcceleRAID, and
+         eXtremeRAID PCI RAID controllers.  This driver supports the
+         newer, SCSI-based interface only.
+         This driver is a reimplementation of the original DAC960
+         driver. If you have used the DAC960 driver you should enable
+         this module.
+
+         To compile this driver as a module, choose M here: the
+         module will be called myrs.
+
 config VMWARE_PVSCSI
        tristate "VMware PVSCSI driver support"
        depends on PCI && SCSI && X86
@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
        tristate "Zorro ESP SCSI support"
        depends on ZORRO && SCSI
        select SCSI_SPI_ATTRS
+       select SCSI_ESP_PIO
        help
          Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
          expansion boards for the Amiga.
@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
        tristate "Macintosh NCR53c9[46] SCSI"
        depends on MAC && SCSI
        select SCSI_SPI_ATTRS
+       select SCSI_ESP_PIO
        help
          This is the NCR 53c9x SCSI controller found on most of the 68040
          based Macintoshes.
index 6d71b2a..fcb41ae 100644 (file)
@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100)  += a100u2w.o
 obj-$(CONFIG_SCSI_QLOGICPTI)   += qlogicpti.o
 obj-$(CONFIG_SCSI_MESH)                += mesh.o
 obj-$(CONFIG_SCSI_MAC53C94)    += mac53c94.o
+obj-$(CONFIG_SCSI_MYRB)                += myrb.o
+obj-$(CONFIG_SCSI_MYRS)                += myrs.o
 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
 obj-$(CONFIG_SCSI_3W_9XXX)     += 3w-9xxx.o
 obj-$(CONFIG_SCSI_3W_SAS)      += 3w-sas.o
index 90ea0f5..8429c85 100644 (file)
 
 static int do_abort(struct Scsi_Host *);
 static void do_reset(struct Scsi_Host *);
+static void bus_reset_cleanup(struct Scsi_Host *);
 
 /**
  * initialize_SCp - init the scsi pointer field
@@ -513,16 +514,15 @@ static void complete_cmd(struct Scsi_Host *instance,
 
        if (hostdata->sensing == cmd) {
                /* Autosense processing ends here */
-               if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
+               if (status_byte(cmd->result) != GOOD) {
                        scsi_eh_restore_cmnd(cmd, &hostdata->ses);
-                       set_host_byte(cmd, DID_ERROR);
-               } else
+               } else {
                        scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+                       set_driver_byte(cmd, DRIVER_SENSE);
+               }
                hostdata->sensing = NULL;
        }
 
-       hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-
        cmd->scsi_done(cmd);
 }
 
@@ -884,7 +884,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
                        /* Probably Bus Reset */
                        NCR5380_read(RESET_PARITY_INTERRUPT_REG);
 
-                       dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+                       if (sr & SR_RST) {
+                               /* Certainly Bus Reset */
+                               shost_printk(KERN_WARNING, instance,
+                                            "bus reset interrupt\n");
+                               bus_reset_cleanup(instance);
+                       } else {
+                               dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+                       }
 #ifdef SUN3_SCSI_VME
                        dregs->csr |= CSR_DMA_ENABLE;
 #endif
@@ -902,20 +909,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
        return IRQ_RETVAL(handled);
 }
 
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
- *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
+/**
+ * NCR5380_select - attempt arbitration and selection for a given command
+ * @instance: the Scsi_Host instance
+ * @cmd: the scsi_cmnd to execute
  *
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
+ * This routine establishes an I_T_L nexus for a SCSI command. This involves
+ * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
  *
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
+ * Returns true if the operation should be retried.
+ * Returns false if it should not be retried.
  *
  * Side effects :
  * If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +926,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
  * SELECT_ENABLE will be set appropriately, the NCR5380
  * will cease to drive any SCSI bus signals.
  *
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
+ * If successful : the I_T_L nexus will be established, and
+ * hostdata->connected will be set to cmd.
  * SELECT interrupt will be disabled.
  *
  * If failed (no target) : cmd->scsi_done() will be called, and the
  * cmd->result host byte set to DID_BAD_TARGET.
  */
 
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
-                                        struct scsi_cmnd *cmd)
+static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
        __releases(&hostdata->lock) __acquires(&hostdata->lock)
 {
        struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +942,9 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        unsigned char *data;
        int len;
        int err;
+       bool ret = true;
+       bool can_disconnect = instance->irq != NO_IRQ &&
+                             cmd->cmnd[0] != REQUEST_SENSE;
 
        NCR5380_dprint(NDEBUG_ARBITRATION, instance);
        dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +953,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        /*
         * Arbitration and selection phases are slow and involve dropping the
         * lock, so we have to watch out for EH. An exception handler may
-        * change 'selecting' to NULL. This function will then return NULL
+        * change 'selecting' to NULL. This function will then return false
         * so that the caller will forget about 'cmd'. (During information
         * transfer phases, EH may change 'connected' to NULL.)
         */
@@ -984,7 +989,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        if (!hostdata->selecting) {
                /* Command was aborted */
                NCR5380_write(MODE_REG, MR_BASE);
-               goto out;
+               return false;
        }
        if (err < 0) {
                NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1038,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        if (!hostdata->selecting) {
                NCR5380_write(MODE_REG, MR_BASE);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-               goto out;
+               return false;
        }
 
        dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1116,13 +1121,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
                spin_lock_irq(&hostdata->lock);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
                /* Can't touch cmd if it has been reclaimed by the scsi ML */
-               if (hostdata->selecting) {
-                       cmd->result = DID_BAD_TARGET << 16;
-                       complete_cmd(instance, cmd);
-                       dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
-                       cmd = NULL;
-               }
+               if (!hostdata->selecting)
+                       return false;
+
+               cmd->result = DID_BAD_TARGET << 16;
+               complete_cmd(instance, cmd);
+               dsprintk(NDEBUG_SELECTION, instance,
+                       "target did not respond within 250ms\n");
+               ret = false;
                goto out;
        }
 
@@ -1155,12 +1163,12 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        }
        if (!hostdata->selecting) {
                do_abort(instance);
-               goto out;
+               return false;
        }
 
        dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
                 scmd_id(cmd));
-       tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
+       tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
 
        len = 1;
        data = tmp;
@@ -1171,7 +1179,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
                cmd->result = DID_ERROR << 16;
                complete_cmd(instance, cmd);
                dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
-               cmd = NULL;
+               ret = false;
                goto out;
        }
 
@@ -1186,13 +1194,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
 
        initialize_SCp(cmd);
 
-       cmd = NULL;
+       ret = false;
 
 out:
        if (!hostdata->selecting)
                return NULL;
        hostdata->selecting = NULL;
-       return cmd;
+       return ret;
 }
 
 /*
@@ -1711,6 +1719,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                cmd->result = DID_ERROR << 16;
                                complete_cmd(instance, cmd);
                                hostdata->connected = NULL;
+                               hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
                                return;
 #endif
                        case PHASE_DATAIN:
@@ -1793,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                                 cmd, scmd_id(cmd), cmd->device->lun);
 
                                        hostdata->connected = NULL;
+                                       hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
 
                                        cmd->result &= ~0xffff;
                                        cmd->result |= cmd->SCp.Status;
@@ -1951,6 +1961,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                NCR5380_transfer_pio(instance, &phase, &len, &data);
                                if (msgout == ABORT) {
                                        hostdata->connected = NULL;
+                                       hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
                                        cmd->result = DID_ERROR << 16;
                                        complete_cmd(instance, cmd);
                                        maybe_release_dma_irq(instance);
@@ -2014,8 +2025,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
        NCR5380_write(MODE_REG, MR_BASE);
 
        target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
-       dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
+       if (!target_mask || target_mask & (target_mask - 1)) {
+               shost_printk(KERN_WARNING, instance,
+                            "reselect: bad target_mask 0x%02x\n", target_mask);
+               return;
+       }
 
        /*
         * At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2043,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
        if (NCR5380_poll_politely(hostdata,
                                  STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+               shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                return;
        }
@@ -2040,6 +2055,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
 
        if (NCR5380_poll_politely(hostdata,
                                  STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+               if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
+                       /* BUS FREE phase */
+                       return;
+               shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
                do_abort(instance);
                return;
        }
@@ -2101,13 +2120,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
                dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
                         "reselect: removed %p from disconnected queue\n", tmp);
        } else {
+               int target = ffs(target_mask) - 1;
+
                shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
                             target_mask, lun);
                /*
                 * Since we have an established nexus that we can't do anything
                 * with, we must abort it.
                 */
-               do_abort(instance);
+               if (do_abort(instance) == 0)
+                       hostdata->busy[target] &= ~(1 << lun);
                return;
        }
 
@@ -2272,15 +2294,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
        if (list_del_cmd(&hostdata->autosense, cmd)) {
                dsprintk(NDEBUG_ABORT, instance,
                         "abort: removed %p from sense queue\n", cmd);
-               set_host_byte(cmd, DID_ERROR);
                complete_cmd(instance, cmd);
        }
 
 out:
        if (result == FAILED)
                dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
-       else
+       else {
+               hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
                dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
+       }
 
        queue_work(hostdata->work_q, &hostdata->main_task);
        maybe_release_dma_irq(instance);
@@ -2290,31 +2313,12 @@ out:
 }
 
 
-/**
- * NCR5380_host_reset - reset the SCSI host
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+static void bus_reset_cleanup(struct Scsi_Host *instance)
 {
-       struct Scsi_Host *instance = cmd->device->host;
        struct NCR5380_hostdata *hostdata = shost_priv(instance);
        int i;
-       unsigned long flags;
        struct NCR5380_cmd *ncmd;
 
-       spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
-       scmd_printk(KERN_INFO, cmd, __func__);
-#endif
-       NCR5380_dprint(NDEBUG_ANY, instance);
-       NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
-       do_reset(instance);
-
        /* reset NCR registers */
        NCR5380_write(MODE_REG, MR_BASE);
        NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2330,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
         * commands!
         */
 
-       if (list_del_cmd(&hostdata->unissued, cmd)) {
-               cmd->result = DID_RESET << 16;
-               cmd->scsi_done(cmd);
-       }
-
        if (hostdata->selecting) {
                hostdata->selecting->result = DID_RESET << 16;
                complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2347,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
        list_for_each_entry(ncmd, &hostdata->autosense, list) {
                struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
 
-               set_host_byte(cmd, DID_RESET);
                cmd->scsi_done(cmd);
        }
        INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2363,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
 
        queue_work(hostdata->work_q, &hostdata->main_task);
        maybe_release_dma_irq(instance);
+}
+
+/**
+ * NCR5380_host_reset - reset the SCSI host
+ * @cmd: SCSI command undergoing EH
+ *
+ * Returns SUCCESS
+ */
+
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+{
+       struct Scsi_Host *instance = cmd->device->host;
+       struct NCR5380_hostdata *hostdata = shost_priv(instance);
+       unsigned long flags;
+       struct NCR5380_cmd *ncmd;
+
+       spin_lock_irqsave(&hostdata->lock, flags);
+
+#if (NDEBUG & NDEBUG_ANY)
+       shost_printk(KERN_INFO, instance, __func__);
+#endif
+       NCR5380_dprint(NDEBUG_ANY, instance);
+       NCR5380_dprint_phase(NDEBUG_ANY, instance);
+
+       list_for_each_entry(ncmd, &hostdata->unissued, list) {
+               struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
+
+               scmd->result = DID_RESET << 16;
+               scmd->scsi_done(scmd);
+       }
+       INIT_LIST_HEAD(&hostdata->unissued);
+
+       do_reset(instance);
+       bus_reset_cleanup(instance);
+
        spin_unlock_irqrestore(&hostdata->lock, flags);
 
        return SUCCESS;
index 31096a0..efca509 100644 (file)
@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
 static void NCR5380_main(struct work_struct *work);
 static const char *NCR5380_info(struct Scsi_Host *instance);
 static void NCR5380_reselect(struct Scsi_Host *instance);
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
+static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
index 23b1762..00072ed 100644 (file)
@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
 
        if (pci_enable_device(pdev))
                goto out;
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                printk(KERN_WARNING "Unable to set 32bit DMA "
                                    "on inia100 adapter, ignoring.\n");
                goto out_disable_device;
@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
 
        /* Get total memory needed for SCB */
        sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
-       host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
+       host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
+                                            GFP_KERNEL);
        if (!host->scb_virt) {
                printk("inia100: SCB memory allocation error\n");
                goto out_host_put;
@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
 
        /* Get total memory needed for ESCB */
        sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
-       host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
+       host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
+                                             GFP_KERNEL);
        if (!host->escb_virt) {
                printk("inia100: ESCB memory allocation error\n");
                goto out_free_scb_array;
@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
 out_free_irq:
         free_irq(shost->irq, shost);
 out_free_escb_array:
-       pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+       dma_free_coherent(&pdev->dev,
+                       ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
                        host->escb_virt, host->escb_phys);
 out_free_scb_array:
-       pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+       dma_free_coherent(&pdev->dev,
+                       ORC_MAXQUEUE * sizeof(struct orc_scb),
                        host->scb_virt, host->scb_phys);
 out_host_put:
        scsi_host_put(shost);
@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
        scsi_remove_host(shost);
 
         free_irq(shost->irq, shost);
-       pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+       dma_free_coherent(&pdev->dev,
+                       ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
                        host->escb_virt, host->escb_phys);
-       pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+       dma_free_coherent(&pdev->dev,
+                       ORC_MAXQUEUE * sizeof(struct orc_scb),
                        host->scb_virt, host->scb_phys);
         release_region(shost->io_port, 256);
 
index 6e35632..bd7f352 100644 (file)
@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
 
 static void aac_srb_callback(void *context, struct fib * fibptr)
 {
-       struct aac_dev *dev;
        struct aac_srb_reply *srbreply;
        struct scsi_cmnd *scsicmd;
 
@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
 
        BUG_ON(fibptr == NULL);
 
-       dev = fibptr->dev;
-
        srbreply = (struct aac_srb_reply *) fib_data(fibptr);
 
        scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
@@ -3921,13 +3918,11 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
 
 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 {
-       struct aac_dev *dev;
        unsigned long byte_count = 0;
        int nseg;
        struct scatterlist *sg;
        int i;
 
-       dev = (struct aac_dev *)scsicmd->device->host->hostdata;
        // Get rid of old data
        psg->count = 0;
        psg->sg[0].addr = 0;
@@ -3963,14 +3958,12 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 
 static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
 {
-       struct aac_dev *dev;
        unsigned long byte_count = 0;
        u64 addr;
        int nseg;
        struct scatterlist *sg;
        int i;
 
-       dev = (struct aac_dev *)scsicmd->device->host->hostdata;
        // Get rid of old data
        psg->count = 0;
        psg->sg[0].addr[0] = 0;
index 6e1b022..1e77d96 100644 (file)
@@ -2586,9 +2586,7 @@ int aac_acquire_irq(struct aac_dev *dev)
 void aac_free_irq(struct aac_dev *dev)
 {
        int i;
-       int cpu;
 
-       cpu = cpumask_first(cpu_online_mask);
        if (aac_is_src(dev)) {
                if (dev->max_msix > 1) {
                        for (i = 0; i < dev->max_msix; i++)
index 713f690..223ef6f 100644 (file)
@@ -5949,7 +5949,6 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
 static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
 {
        struct asc_board *boardp = adv_dvc_varp->drv_ptr;
-       u32 srb_tag;
        adv_req_t *reqp;
        adv_sgblk_t *sgblkp;
        struct scsi_cmnd *scp;
@@ -5965,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
         * completed. The adv_req_t structure actually contains the
         * completed ADV_SCSI_REQ_Q structure.
         */
-       srb_tag = le32_to_cpu(scsiqp->srb_tag);
        scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
 
        ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6446,7 @@ static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
                                sdtr_data =
                                    AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
                                                   ext_msg.req_ack_offset);
-                               if ((sdtr_data == 0xFF)) {
+                               if (sdtr_data == 0xFF) {
 
                                        q_cntl |= QC_MSG_OUT;
                                        asc_dvc->init_sdtr &= ~target_id;
index 5000bd6..176704b 100644 (file)
  * $FreeBSD$
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
 
 #define ID_AIC7770     0x04907770
 #define ID_AHA_274x    0x04907771
index 31f2bb9..9a51555 100644 (file)
@@ -607,9 +607,6 @@ struct scb {
        ahd_io_ctx_t              io_ctx;
        struct ahd_softc         *ahd_softc;
        scb_flag                  flags;
-#ifndef __linux__
-       bus_dmamap_t              dmamap;
-#endif
        struct scb_platform_data *platform_data;
        struct map_node          *hscb_map;
        struct map_node          *sg_map;
@@ -1056,9 +1053,6 @@ struct ahd_completion
 struct ahd_softc {
        bus_space_tag_t           tags[2];
        bus_space_handle_t        bshs[2];
-#ifndef __linux__
-       bus_dma_tag_t             buffer_dmat;   /* dmat for buffer I/O */
-#endif
        struct scb_data           scb_data;
 
        struct hardware_scb      *next_queued_hscb;
index 2d82ec8..9ee75c9 100644 (file)
  * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
  */
 
-#ifdef __linux__
 #include "aic79xx_osm.h"
 #include "aic79xx_inline.h"
 #include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
-
 
 /***************************** Lookup Tables **********************************/
 static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@ static const char *const ahd_chip_names[] =
        "aic7902",
        "aic7901A"
 };
-static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
 
 /*
  * Hardware error codes.
@@ -6172,17 +6164,11 @@ ahd_free(struct ahd_softc *ahd)
        case 2:
                ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
        case 1:
-#ifndef __linux__
-               ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
-#endif
                break;
        case 0:
                break;
        }
 
-#ifndef __linux__
-       ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
-#endif
        ahd_platform_free(ahd);
        ahd_fini_scbdata(ahd);
        for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6934,9 +6920,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
        for (i = 0; i < newcount; i++) {
                struct scb_platform_data *pdata;
                u_int col_tag;
-#ifndef __linux__
-               int error;
-#endif
 
                next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
                if (next_scb == NULL)
@@ -6970,15 +6953,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
                        next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
                next_scb->ahd_softc = ahd;
                next_scb->flags = SCB_FLAG_NONE;
-#ifndef __linux__
-               error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
-                                         &next_scb->dmamap);
-               if (error != 0) {
-                       kfree(next_scb);
-                       kfree(pdata);
-                       break;
-               }
-#endif
                next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
                col_tag = scb_data->numscbs ^ 0x100;
                next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7065,6 @@ ahd_init(struct ahd_softc *ahd)
        if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
                ahd->features &= ~AHD_TARGETMODE;
 
-#ifndef __linux__
-       /* DMA tag for mapping buffers into device visible space. */
-       if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
-                              /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
-                              /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
-                                       ? (dma_addr_t)0x7FFFFFFFFFULL
-                                       : BUS_SPACE_MAXADDR_32BIT,
-                              /*highaddr*/BUS_SPACE_MAXADDR,
-                              /*filter*/NULL, /*filterarg*/NULL,
-                              /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
-                              /*nsegments*/AHD_NSEG,
-                              /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
-                              /*flags*/BUS_DMA_ALLOCNOW,
-                              &ahd->buffer_dmat) != 0) {
-               return (ENOMEM);
-       }
-#endif
-
        ahd->init_level++;
 
        /*
index cc9bd26..8397ae9 100644 (file)
  * $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
  */
 
-#ifdef __linux__
 #include "aic79xx_osm.h"
 #include "aic79xx_inline.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#endif
-
 #include "aic79xx_pci.h"
 
 static inline uint64_t
@@ -294,13 +288,11 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
 int
 ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
 {
-       struct scb_data *shared_scb_data;
        u_int            command;
        uint32_t         devconfig;
        uint16_t         subvendor; 
        int              error;
 
-       shared_scb_data = NULL;
        ahd->description = entry->name;
        /*
         * Record if this is an HP board.
index 4ce4e90..5614921 100644 (file)
@@ -568,9 +568,6 @@ struct scb {
        ahc_io_ctx_t              io_ctx;
        struct ahc_softc         *ahc_softc;
        scb_flag                  flags;
-#ifndef __linux__
-       bus_dmamap_t              dmamap;
-#endif
        struct scb_platform_data *platform_data;
        struct sg_map_node       *sg_map;
        struct ahc_dma_seg       *sg_list;
@@ -906,9 +903,6 @@ typedef void ahc_callback_t (void *);
 struct ahc_softc {
        bus_space_tag_t           tag;
        bus_space_handle_t        bsh;
-#ifndef __linux__
-       bus_dma_tag_t             buffer_dmat;   /* dmat for buffer I/O */
-#endif
        struct scb_data          *scb_data;
 
        struct scb               *next_queued_scb;
index 9e85a7e..cc9e419 100644 (file)
  *   bit to be sent from the chip.
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
 
 /*
  * Right now, we only have to read the SEEPROM.  But we make it easier to
index 915a34f..f3362f4 100644 (file)
  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
 
 /***************************** Lookup Tables **********************************/
 static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@ static const char *const ahc_chip_names[] = {
        "aic7892",
        "aic7899"
 };
-static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
 
 /*
  * Hardware error codes.
@@ -4509,17 +4502,11 @@ ahc_free(struct ahc_softc *ahc)
        case 2:
                ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
        case 1:
-#ifndef __linux__
-               ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
-#endif
                break;
        case 0:
                break;
        }
 
-#ifndef __linux__
-       ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
-#endif
        ahc_platform_free(ahc);
        ahc_fini_scbdata(ahc);
        for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -5005,9 +4992,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
        newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
        for (i = 0; i < newcount; i++) {
                struct scb_platform_data *pdata;
-#ifndef __linux__
-               int error;
-#endif
+
                pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
                if (pdata == NULL)
                        break;
@@ -5021,12 +5006,6 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
                next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
                next_scb->ahc_softc = ahc;
                next_scb->flags = SCB_FREE;
-#ifndef __linux__
-               error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
-                                         &next_scb->dmamap);
-               if (error != 0)
-                       break;
-#endif
                next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
                next_scb->hscb->tag = ahc->scb_data->numscbs;
                SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5304,6 @@ ahc_init(struct ahc_softc *ahc)
        if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
                ahc->features &= ~AHC_TARGETMODE;
 
-#ifndef __linux__
-       /* DMA tag for mapping buffers into device visible space. */
-       if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
-                              /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
-                              /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
-                                       ? (dma_addr_t)0x7FFFFFFFFFULL
-                                       : BUS_SPACE_MAXADDR_32BIT,
-                              /*highaddr*/BUS_SPACE_MAXADDR,
-                              /*filter*/NULL, /*filterarg*/NULL,
-                              /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
-                              /*nsegments*/AHC_NSEG,
-                              /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
-                              /*flags*/BUS_DMA_ALLOCNOW,
-                              &ahc->buffer_dmat) != 0) {
-               return (ENOMEM);
-       }
-#endif
-
        ahc->init_level++;
 
        /*
index 673e826..656f680 100644 (file)
  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
  */
 
-#ifdef __linux__
 #include "aic7xxx_osm.h"
 #include "aic7xxx_inline.h"
 #include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
-
 #include "aic7xxx_pci.h"
 
 static inline uint64_t
index 51678dd..716a2ae 100644 (file)
  * $FreeBSD$
  */
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #ifndef TRUE
 #define TRUE 1
index f1586a4..924d55a 100644 (file)
 #include <string.h>
 #include <sysexits.h>
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
index 708326d..8c04798 100644 (file)
 #include <string.h>
 #include <sysexits.h>
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
index c0457b8..98e9959 100644 (file)
 #include <stdio.h>
 #include <string.h>
 #include <sysexits.h>
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
index 93c8667..c78d4f6 100644 (file)
 #include <stdio.h>
 #include <string.h>
 #include <sysexits.h>
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 #include "aicasm.h"
 #include "aicasm_symbol.h"
index 232aff1..975fcfc 100644 (file)
 
 #include <sys/types.h>
 
-#ifdef __linux__
 #include "aicdb.h"
-#else
-#include <db.h>
-#endif
 #include <fcntl.h>
 #include <inttypes.h>
 #include <regex.h>
index 34bbcad..7bf7fd5 100644 (file)
  * $FreeBSD$
  */
 
-#ifdef __linux__
 #include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
 
 typedef enum {
        UNINITIALIZED,
index 1391e5f..41c4d8a 100644 (file)
@@ -771,13 +771,8 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                goto Err_remove;
 
        err = -ENODEV;
-       if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
-           && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
-               ;
-       else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
-                && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
-               ;
-       else {
+       if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
                asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
                goto Err_remove;
        }
index 22873ce..91ea87d 100644 (file)
@@ -724,9 +724,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
        switch (pd->max_sas_lrate) {
        case SAS_LINK_RATE_6_0_GBPS:
                *speed_mask &= ~SAS_SPEED_60_DIS;
+               /* fall through*/
        default:
        case SAS_LINK_RATE_3_0_GBPS:
                *speed_mask &= ~SAS_SPEED_30_DIS;
+               /* fall through*/
        case SAS_LINK_RATE_1_5_GBPS:
                *speed_mask &= ~SAS_SPEED_15_DIS;
        }
@@ -734,6 +736,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
        switch (pd->min_sas_lrate) {
        case SAS_LINK_RATE_6_0_GBPS:
                *speed_mask |= SAS_SPEED_30_DIS;
+               /* fall through*/
        case SAS_LINK_RATE_3_0_GBPS:
                *speed_mask |= SAS_SPEED_15_DIS;
        default:
@@ -745,6 +748,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
        switch (pd->max_sata_lrate) {
        case SAS_LINK_RATE_3_0_GBPS:
                *speed_mask &= ~SATA_SPEED_30_DIS;
+               /* fall through*/
        default:
        case SAS_LINK_RATE_1_5_GBPS:
                *speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +807,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
 
                /* link reset retries, this should be nominal */
                control_phy->link_reset_retries = 10;
+               /* fall through */
 
        case RELEASE_SPINUP_HOLD: /* 0x02 */
                /* decide the func_mask */
index cdd4ab6..7fea344 100644 (file)
@@ -42,13 +42,13 @@ static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
        spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
 }
 
-/* PCI_DMA_... to our direction translation.
+/* DMA_... to our direction translation.
  */
 static const u8 data_dir_flags[] = {
-       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
-       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT, /* OUTBOUND */
-       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN, /* INBOUND */
-       [PCI_DMA_NONE]          = DATA_DIR_NONE, /* NO TRANSFER */
+       [DMA_BIDIRECTIONAL]     = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+       [DMA_TO_DEVICE]         = DATA_DIR_OUT,         /* OUTBOUND */
+       [DMA_FROM_DEVICE]       = DATA_DIR_IN,          /* INBOUND */
+       [DMA_NONE]              = DATA_DIR_NONE,        /* NO TRANSFER */
 };
 
 static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +60,12 @@ static int asd_map_scatterlist(struct sas_task *task,
        struct scatterlist *sc;
        int num_sg, res;
 
-       if (task->data_dir == PCI_DMA_NONE)
+       if (task->data_dir == DMA_NONE)
                return 0;
 
        if (task->num_scatter == 0) {
                void *p = task->scatter;
-               dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
+               dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
                                                task->total_xfer_len,
                                                task->data_dir);
                sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +79,7 @@ static int asd_map_scatterlist(struct sas_task *task,
        if (sas_protocol_ata(task->task_proto))
                num_sg = task->num_scatter;
        else
-               num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+               num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
                                    task->num_scatter, task->data_dir);
        if (num_sg == 0)
                return -ENOMEM;
@@ -126,8 +126,8 @@ static int asd_map_scatterlist(struct sas_task *task,
        return 0;
 err_unmap:
        if (sas_protocol_ata(task->task_proto))
-               pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
-                            task->data_dir);
+               dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+                            task->num_scatter, task->data_dir);
        return res;
 }
 
@@ -136,21 +136,21 @@ static void asd_unmap_scatterlist(struct asd_ascb *ascb)
        struct asd_ha_struct *asd_ha = ascb->ha;
        struct sas_task *task = ascb->uldd_task;
 
-       if (task->data_dir == PCI_DMA_NONE)
+       if (task->data_dir == DMA_NONE)
                return;
 
        if (task->num_scatter == 0) {
                dma_addr_t dma = (dma_addr_t)
                       le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
-               pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
-                                task->data_dir);
+               dma_unmap_single(&ascb->ha->pcidev->dev, dma,
+                                task->total_xfer_len, task->data_dir);
                return;
        }
 
        asd_free_coherent(asd_ha, ascb->sg_arr);
        if (task->task_proto != SAS_PROTOCOL_STP)
-               pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
-                            task->data_dir);
+               dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+                            task->num_scatter, task->data_dir);
 }
 
 /* ---------- Task complete tasklet ---------- */
@@ -436,10 +436,10 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
        struct domain_device *dev = task->dev;
        struct scb *scb;
 
-       pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
-                  PCI_DMA_TODEVICE);
-       pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
-                  PCI_DMA_FROMDEVICE);
+       dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
+                  DMA_TO_DEVICE);
+       dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+                  DMA_FROM_DEVICE);
 
        scb = ascb->scb;
 
@@ -471,10 +471,10 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
        struct sas_task *task = a->uldd_task;
 
        BUG_ON(!task);
-       pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
-                    PCI_DMA_TODEVICE);
-       pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
-                    PCI_DMA_FROMDEVICE);
+       dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
+                    DMA_TO_DEVICE);
+       dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+                    DMA_FROM_DEVICE);
 }
 
 /* ---------- SSP ---------- */
index d81ca66..27c0a4a 100644 (file)
@@ -96,9 +96,7 @@ static void pci_esp_dma_drain(struct esp *esp);
 
 static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
 {
-       struct pci_dev *pdev = esp->dev;
-
-       return pci_get_drvdata(pdev);
+       return dev_get_drvdata(esp->dev);
 }
 
 static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +114,6 @@ static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
        return iowrite32(val, esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
-                                    size_t sz, int dir)
-{
-       return pci_map_single(esp->dev, buf, sz, dir);
-}
-
-static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-                         int num_sg, int dir)
-{
-       return pci_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-                                size_t sz, int dir)
-{
-       pci_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-                            int num_sg, int dir)
-{
-       pci_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int pci_esp_irq_pending(struct esp *esp)
 {
        struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +269,6 @@ static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 static const struct esp_driver_ops pci_esp_ops = {
        .esp_write8     =       pci_esp_write8,
        .esp_read8      =       pci_esp_read8,
-       .map_single     =       pci_esp_map_single,
-       .map_sg         =       pci_esp_map_sg,
-       .unmap_single   =       pci_esp_unmap_single,
-       .unmap_sg       =       pci_esp_unmap_sg,
        .irq_pending    =       pci_esp_irq_pending,
        .reset_dma      =       pci_esp_reset_dma,
        .dma_drain      =       pci_esp_dma_drain,
@@ -375,18 +345,18 @@ static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
 
 static void dc390_check_eeprom(struct esp *esp)
 {
+       struct pci_dev *pdev = to_pci_dev(esp->dev);
        u8 EEbuf[128];
        u16 *ptr = (u16 *)EEbuf, wval = 0;
        int i;
 
-       dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
+       dc390_read_eeprom(pdev, ptr);
 
        for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
                wval += *ptr;
 
        /* no Tekram EEprom found */
        if (wval != 0x1234) {
-               struct pci_dev *pdev = esp->dev;
                dev_printk(KERN_INFO, &pdev->dev,
                           "No valid Tekram EEprom found\n");
                return;
@@ -411,7 +381,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
                return -ENODEV;
        }
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                dev_printk(KERN_INFO, &pdev->dev,
                           "failed to set 32bit DMA mask\n");
                goto fail_disable_device;
@@ -435,7 +405,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
 
        esp = shost_priv(shost);
        esp->host = shost;
-       esp->dev = pdev;
+       esp->dev = &pdev->dev;
        esp->ops = &pci_esp_ops;
        /*
         * The am53c974 HBA has a design flaw of generating
@@ -467,8 +437,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       esp->command_block = pci_alloc_consistent(pdev, 16,
-                                                 &esp->command_block_dma);
+       esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
+                       &esp->command_block_dma, GFP_KERNEL);
        if (!esp->command_block) {
                dev_printk(KERN_ERR, &pdev->dev,
                           "failed to allocate command block\n");
@@ -498,7 +468,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
        /* Assume 40MHz clock */
        esp->cfreq = 40000000;
 
-       err = scsi_esp_register(esp, &pdev->dev);
+       err = scsi_esp_register(esp);
        if (err)
                goto fail_free_irq;
 
@@ -508,8 +478,8 @@ fail_free_irq:
        free_irq(pdev->irq, esp);
 fail_unmap_command_block:
        pci_set_drvdata(pdev, NULL);
-       pci_free_consistent(pdev, 16, esp->command_block,
-                           esp->command_block_dma);
+       dma_free_coherent(&pdev->dev, 16, esp->command_block,
+                         esp->command_block_dma);
 fail_unmap_regs:
        pci_iounmap(pdev, esp->regs);
 fail_release_regions:
@@ -532,8 +502,8 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
        scsi_esp_unregister(esp);
        free_irq(pdev->irq, esp);
        pci_set_drvdata(pdev, NULL);
-       pci_free_consistent(pdev, 16, esp->command_block,
-                           esp->command_block_dma);
+       dma_free_coherent(&pdev->dev, 16, esp->command_block,
+                         esp->command_block_dma);
        pci_iounmap(pdev, esp->regs);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index 12316ef..d4404ee 100644 (file)
@@ -1317,13 +1317,10 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
 
 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
 {
-       int id, lun;
        if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
                if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
                        struct scsi_cmnd *abortcmd = pCCB->pcmd;
                        if (abortcmd) {
-                               id = abortcmd->device->id;
-                               lun = abortcmd->device->lun;                            
                                abortcmd->result |= DID_ABORT << 16;
                                arcmsr_ccb_complete(pCCB);
                                printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1798,7 +1795,7 @@ static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
        if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
                printk(KERN_NOTICE
-                       "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+                       "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
                        , acb->host->host_no);
        }
 }
@@ -1811,7 +1808,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
 
        if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
                printk(KERN_NOTICE
-                       "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+                       "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
                        , acb->host->host_no);
        }
 }
@@ -1824,7 +1821,7 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
        writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
        if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
                printk(KERN_NOTICE
-                       "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+                       "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
                        , pACB->host->host_no);
        }
        return;
@@ -1837,7 +1834,7 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
        pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
        writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
        if (!arcmsr_hbaD_wait_msgint_ready(pACB))
-               pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+               pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
                        "timeout\n", pACB->host->host_no);
 }
 
@@ -1850,7 +1847,7 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
        pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
        writel(pACB->out_doorbell, &reg->iobound_doorbell);
        if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
-               pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+               pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
                        "timeout\n", pACB->host->host_no);
        }
 }
@@ -3927,7 +3924,7 @@ static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
        writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
        if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
                printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
-                               rebulid' timeout \n", acb->host->host_no);
+                               rebuild' timeout \n", acb->host->host_no);
        }
 }
 
@@ -3938,7 +3935,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
        writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
        if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
                printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
-                               rebulid' timeout \n",acb->host->host_no);
+                               rebuild' timeout \n",acb->host->host_no);
        }
 }
 
@@ -3950,7 +3947,7 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
        writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
        if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
                printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
-                               rebulid' timeout \n", pACB->host->host_no);
+                               rebuild' timeout \n", pACB->host->host_no);
        }
        return;
 }
@@ -3963,7 +3960,7 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
        writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
        if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
                pr_notice("arcmsr%d: wait 'start adapter "
-                       "background rebulid' timeout\n", pACB->host->host_no);
+                       "background rebuild' timeout\n", pACB->host->host_no);
        }
 }
 
@@ -3977,7 +3974,7 @@ static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
        writel(pACB->out_doorbell, &pmu->iobound_doorbell);
        if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
                pr_notice("arcmsr%d: wait 'start adapter "
-                       "background rebulid' timeout \n", pACB->host->host_no);
+                       "background rebuild' timeout \n", pACB->host->host_no);
        }
 }
 
@@ -4135,9 +4132,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
                pci_read_config_byte(acb->pdev, i, &value[i]);
        }
        /* hardware reset signal */
-       if ((acb->dev_id == 0x1680)) {
+       if (acb->dev_id == 0x1680) {
                writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
-       } else if ((acb->dev_id == 0x1880)) {
+       } else if (acb->dev_id == 0x1880) {
                do {
                        count++;
                        writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4158,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
                } while (((readl(&pmuE->host_diagnostic_3xxx) &
                        ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
                writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
-       } else if ((acb->dev_id == 0x1214)) {
+       } else if (acb->dev_id == 0x1214) {
                writel(0x20, pmuD->reset_request);
        } else {
                pci_write_config_byte(acb->pdev, 0x84, 0x20);
index 8996d23..802d150 100644 (file)
@@ -1193,7 +1193,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
                for (k = 0; k < 16; k++) {
                        if (!atp_dev->id[j][k].prd_table)
                                continue;
-                       pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+                       dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
                        atp_dev->id[j][k].prd_table = NULL;
                }
        }
@@ -1205,7 +1205,7 @@ static int atp870u_init_tables(struct Scsi_Host *host)
        int c,k;
        for(c=0;c < 2;c++) {
                for(k=0;k<16;k++) {
-                               atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
+                               atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
                                if (!atp_dev->id[c][k].prd_table) {
                                        printk("atp870u_init_tables fail\n");
                                atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto fail;
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                 printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
                 err = -EIO;
                 goto disable_device;
index c10aac4..0a6972e 100644 (file)
@@ -520,7 +520,7 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
                 **/
                tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
                if (tag_mem->size) {
-                       pci_free_consistent(ctrl->pdev, tag_mem->size,
+                       dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
                                        tag_mem->va, tag_mem->dma);
                        tag_mem->size = 0;
                }
@@ -1269,12 +1269,12 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
        struct be_sge *sge = nonembedded_sgl(wrb);
        int status = 0;
 
-       nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+       nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
                                sizeof(struct be_mgmt_controller_attributes),
-                               &nonemb_cmd.dma);
+                               &nonemb_cmd.dma, GFP_KERNEL);
        if (nonemb_cmd.va == NULL) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BG_%d : pci_alloc_consistent failed in %s\n",
+                           "BG_%d : dma_alloc_coherent failed in %s\n",
                            __func__);
                return -ENOMEM;
        }
@@ -1314,7 +1314,7 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
                            "BG_%d :  Failed in beiscsi_check_supported_fw\n");
        mutex_unlock(&ctrl->mbox_lock);
        if (nonemb_cmd.va)
-               pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+               dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
 
        return status;
index c8f0a21..96b96e2 100644 (file)
@@ -771,7 +771,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
                        status = beiscsi_get_initiator_name(phba, buf, false);
                        if (status < 0) {
                                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                                           "BS_%d : Retreiving Initiator Name Failed\n");
+                                           "BS_%d : Retrieving Initiator Name Failed\n");
                                status = 0;
                        }
                }
@@ -1071,9 +1071,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        else
                req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
 
-       nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+       nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
                                req_memsize,
-                               &nonemb_cmd.dma);
+                               &nonemb_cmd.dma, GFP_KERNEL);
        if (nonemb_cmd.va == NULL) {
 
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1091,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                            "BS_%d : mgmt_open_connection Failed for cid=%d\n",
                            beiscsi_ep->ep_cid);
 
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+               dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
                beiscsi_free_ep(beiscsi_ep);
                return -EAGAIN;
@@ -1104,8 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                            "BS_%d : mgmt_open_connection Failed");
 
                if (ret != -EBUSY)
-                       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                                           nonemb_cmd.va, nonemb_cmd.dma);
+                       dma_free_coherent(&phba->ctrl.pdev->dev,
+                                       nonemb_cmd.size, nonemb_cmd.va,
+                                       nonemb_cmd.dma);
 
                beiscsi_free_ep(beiscsi_ep);
                return ret;
@@ -1118,7 +1119,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
                    "BS_%d : mgmt_open_connection Success\n");
 
-       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+       dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                            nonemb_cmd.va, nonemb_cmd.dma);
        return 0;
 }
index a3019d8..effb6fc 100644 (file)
@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
        }
 
        pci_set_master(pcidev);
-       ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+       ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
        if (ret) {
-               ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-               if (ret) {
-                       dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
-                       goto pci_region_release;
-               } else {
-                       ret = pci_set_consistent_dma_mask(pcidev,
-                                                         DMA_BIT_MASK(32));
-               }
-       } else {
-               ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+               ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
                if (ret) {
                        dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
                        goto pci_region_release;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
        if (status)
                return status;
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = pci_alloc_consistent(pdev,
-                                                 mbox_mem_alloc->size,
-                                                 &mbox_mem_alloc->dma);
+       mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
+                       mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
        if (!mbox_mem_alloc->va) {
                beiscsi_unmap_pci_function(phba);
                return -ENOMEM;
@@ -1866,7 +1856,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 {
        struct be_queue_info *cq;
        struct sol_cqe *sol;
-       struct dmsg_cqe *dmsg;
        unsigned int total = 0;
        unsigned int num_processed = 0;
        unsigned short code = 0, cid = 0;
@@ -1939,7 +1928,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
                                    "BM_%d : Received %s[%d] on CID : %d\n",
                                    cqe_desc[code], code, cid);
 
-                       dmsg = (struct dmsg_cqe *)sol;
                        hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
                        break;
                case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
 
                /* Map addr only if there is data_count */
                if (dsp_value) {
-                       io_task->mtask_addr = pci_map_single(phba->pcidev,
+                       io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
                                                             task->data,
                                                             task->data_count,
-                                                            PCI_DMA_TODEVICE);
-                       if (pci_dma_mapping_error(phba->pcidev,
+                                                            DMA_TO_DEVICE);
+                       if (dma_mapping_error(&phba->pcidev->dev,
                                                  io_task->mtask_addr))
                                return -ENOMEM;
                        io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
                       BEISCSI_MAX_FRAGS_INIT);
                curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
                do {
-                       mem_arr->virtual_address = pci_alloc_consistent(
-                                                       phba->pcidev,
-                                                       curr_alloc_size,
-                                                       &bus_add);
+                       mem_arr->virtual_address =
+                               dma_alloc_coherent(&phba->pcidev->dev,
+                                       curr_alloc_size, &bus_add, GFP_KERNEL);
                        if (!mem_arr->virtual_address) {
                                if (curr_alloc_size <= BE_MIN_MEM_SIZE)
                                        goto free_mem;
@@ -2560,7 +2547,7 @@ free_mem:
        mem_descr->num_elements = j;
        while ((i) || (j)) {
                for (j = mem_descr->num_elements; j > 0; j--) {
-                       pci_free_consistent(phba->pcidev,
+                       dma_free_coherent(&phba->pcidev->dev,
                                            mem_descr->mem_array[j - 1].size,
                                            mem_descr->mem_array[j - 1].
                                            virtual_address,
@@ -3031,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
                eq = &phwi_context->be_eq[i].q;
                mem = &eq->dma_mem;
                phwi_context->be_eq[i].phba = phba;
-               eq_vaddress = pci_alloc_consistent(phba->pcidev,
+               eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
                                                   num_eq_pages * PAGE_SIZE,
-                                                  &paddr);
+                                                  &paddr, GFP_KERNEL);
                if (!eq_vaddress) {
                        ret = -ENOMEM;
                        goto create_eq_error;
@@ -3069,7 +3056,7 @@ create_eq_error:
                eq = &phwi_context->be_eq[i].q;
                mem = &eq->dma_mem;
                if (mem->va)
-                       pci_free_consistent(phba->pcidev, num_eq_pages
+                       dma_free_coherent(&phba->pcidev->dev, num_eq_pages
                                            * PAGE_SIZE,
                                            mem->va, mem->dma);
        }
@@ -3097,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
                pbe_eq->cq = cq;
                pbe_eq->phba = phba;
                mem = &cq->dma_mem;
-               cq_vaddress = pci_alloc_consistent(phba->pcidev,
+               cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
                                                   num_cq_pages * PAGE_SIZE,
-                                                  &paddr);
+                                                  &paddr, GFP_KERNEL);
                if (!cq_vaddress) {
                        ret = -ENOMEM;
                        goto create_cq_error;
@@ -3134,7 +3121,7 @@ create_cq_error:
                cq = &phwi_context->be_cq[i];
                mem = &cq->dma_mem;
                if (mem->va)
-                       pci_free_consistent(phba->pcidev, num_cq_pages
+                       dma_free_coherent(&phba->pcidev->dev, num_cq_pages
                                            * PAGE_SIZE,
                                            mem->va, mem->dma);
        }
@@ -3326,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
 {
        struct be_dma_mem *mem = &q->dma_mem;
        if (mem->va) {
-               pci_free_consistent(phba->pcidev, mem->size,
+               dma_free_coherent(&phba->pcidev->dev, mem->size,
                        mem->va, mem->dma);
                mem->va = NULL;
        }
@@ -3341,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
        q->len = len;
        q->entry_size = entry_size;
        mem->size = len * entry_size;
-       mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
+       mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+                       GFP_KERNEL);
        if (!mem->va)
                return -ENOMEM;
        return 0;
@@ -3479,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
                             &ctrl->ptag_state[tag].tag_state)) {
                        ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
                        if (ptag_mem->size) {
-                               pci_free_consistent(ctrl->pdev,
+                               dma_free_coherent(&ctrl->pdev->dev,
                                                    ptag_mem->size,
                                                    ptag_mem->va,
                                                    ptag_mem->dma);
@@ -3880,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
        j = 0;
        for (i = 0; i < SE_MEM_MAX; i++) {
                for (j = mem_descr->num_elements; j > 0; j--) {
-                       pci_free_consistent(phba->pcidev,
+                       dma_free_coherent(&phba->pcidev->dev,
                          mem_descr->mem_array[j - 1].size,
                          mem_descr->mem_array[j - 1].virtual_address,
                          (unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
        }
 
        if (io_task->mtask_addr) {
-               pci_unmap_single(phba->pcidev,
+               dma_unmap_single(&phba->pcidev->dev,
                                 io_task->mtask_addr,
                                 io_task->mtask_data_count,
-                                PCI_DMA_TODEVICE);
+                                DMA_TO_DEVICE);
                io_task->mtask_addr = 0;
        }
 }
@@ -4852,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
 
        switch (bsg_req->msgcode) {
        case ISCSI_BSG_HST_VENDOR:
-               nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+               nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
                                        job->request_payload.payload_len,
-                                       &nonemb_cmd.dma);
+                                       &nonemb_cmd.dma, GFP_KERNEL);
                if (nonemb_cmd.va == NULL) {
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                                    "BM_%d : Failed to allocate memory for "
@@ -4867,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                                    "BM_%d : MBX Tag Allocation Failed\n");
 
-                       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                       dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                            nonemb_cmd.va, nonemb_cmd.dma);
                        return -EAGAIN;
                }
@@ -4881,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
                if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
                        clear_bit(MCC_TAG_STATE_RUNNING,
                                  &phba->ctrl.ptag_state[tag].tag_state);
-                       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                       dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                            nonemb_cmd.va, nonemb_cmd.dma);
                        return -EIO;
                }
@@ -4898,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
                bsg_reply->result = status;
                bsg_job_done(job, bsg_reply->result,
                             bsg_reply->reply_payload_rcv_len);
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+               dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
                if (status || extd_status) {
                        beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5754,7 +5742,7 @@ free_twq:
        beiscsi_cleanup_port(phba);
        beiscsi_free_mem(phba);
 free_port:
-       pci_free_consistent(phba->pcidev,
+       dma_free_coherent(&phba->pcidev->dev,
                            phba->ctrl.mbox_mem_alloced.size,
                            phba->ctrl.mbox_mem_alloced.va,
                            phba->ctrl.mbox_mem_alloced.dma);
@@ -5798,7 +5786,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
 
        /* ctrl uninit */
        beiscsi_unmap_pci_function(phba);
-       pci_free_consistent(phba->pcidev,
+       dma_free_coherent(&phba->pcidev->dev,
                            phba->ctrl.mbox_mem_alloced.size,
                            phba->ctrl.mbox_mem_alloced.va,
                            phba->ctrl.mbox_mem_alloced.dma);
index 8fdc07b..ca7b7bb 100644 (file)
@@ -284,7 +284,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
                return rc;
 
 free_cmd:
-       pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+       dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
                            nonemb_cmd->va, nonemb_cmd->dma);
        return rc;
 }
@@ -293,7 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
                                 struct be_dma_mem *cmd,
                                 u8 subsystem, u8 opcode, u32 size)
 {
-       cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+       cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+                       GFP_KERNEL);
        if (!cmd->va) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@ static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
        __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
        tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
        if (tag_mem->size) {
-               pci_free_consistent(phba->pcidev, tag_mem->size,
+               dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
                                    tag_mem->va, tag_mem->dma);
                tag_mem->size = 0;
        }
@@ -761,7 +762,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
                                    "BG_%d : Memory Allocation Failure\n");
 
                                /* Free the DMA memory for the IOCTL issuing */
-                               pci_free_consistent(phba->ctrl.pdev,
+                               dma_free_coherent(&phba->ctrl.pdev->dev,
                                                    nonemb_cmd.size,
                                                    nonemb_cmd.va,
                                                    nonemb_cmd.dma);
@@ -780,7 +781,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
                        ioctl_size += sizeof(struct be_cmd_req_hdr);
 
                        /* Free the previous allocated DMA memory */
-                       pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                       dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                            nonemb_cmd.va,
                                            nonemb_cmd.dma);
 
@@ -869,7 +870,7 @@ static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
                                      status);
                        boot_work = 0;
                }
-               pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+               dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
                                    bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
                bs->nonemb_cmd.va = NULL;
                break;
@@ -1012,9 +1013,10 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
 
        nonemb_cmd = &phba->boot_struct.nonemb_cmd;
        nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
-       nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+       nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
                                              nonemb_cmd->size,
-                                             &nonemb_cmd->dma);
+                                             &nonemb_cmd->dma,
+                                             GFP_KERNEL);
        if (!nonemb_cmd->va) {
                mutex_unlock(&ctrl->mbox_lock);
                return 0;
@@ -1508,9 +1510,10 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
                return -EINVAL;
 
        nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
-       nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+       nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
                                              nonemb_cmd.size,
-                                             &nonemb_cmd.dma);
+                                             &nonemb_cmd.dma,
+                                             GFP_KERNEL);
        if (!nonemb_cmd.va) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
                            "BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1524,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
        wrb = alloc_mcc_wrb(phba, &tag);
        if (!wrb) {
                mutex_unlock(&ctrl->mbox_lock);
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+               dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
                return -ENOMEM;
        }
@@ -1548,7 +1551,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
 
        rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
        if (rc != -EBUSY)
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+               dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
                                    nonemb_cmd.va, nonemb_cmd.dma);
        return rc;
 }
index 3d0c96a..c19c26e 100644 (file)
@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
 struct bfa_aen_entry_s {
        struct list_head        qe;
        enum bfa_aen_category   aen_category;
-       u32                     aen_type;
+       int                     aen_type;
        union bfa_aen_data_u    aen_data;
        u64                     aen_tv_sec;
        u64                     aen_tv_usec;
index d3b00a4..2de5d51 100644 (file)
@@ -190,27 +190,6 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
        fchs->ox_id = ox_id;
 }
 
-enum fc_parse_status
-fc_els_rsp_parse(struct fchs_s *fchs, int len)
-{
-       struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-       struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
-
-       len = len;
-
-       switch (els_cmd->els_code) {
-       case FC_ELS_LS_RJT:
-               if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
-                       return FC_PARSE_BUSY;
-               else
-                       return FC_PARSE_FAILURE;
-
-       case FC_ELS_ACC:
-               return FC_PARSE_OK;
-       }
-       return FC_PARSE_OK;
-}
-
 static void
 fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
 {
@@ -830,18 +809,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
        return sizeof(struct fc_rpsc_acc_s);
 }
 
-u16
-fc_logo_rsp_parse(struct fchs_s *fchs, int len)
-{
-       struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
-       len = len;
-       if (els_cmd->els_code != FC_ELS_ACC)
-               return FC_PARSE_FAILURE;
-
-       return FC_PARSE_OK;
-}
-
 u16
 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
               wwn_t port_name, wwn_t node_name, u16 pdu_size)
@@ -907,40 +874,6 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
        return be16_to_cpu(prlo->payload_len);
 }
 
-u16
-fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
-{
-       struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
-       int             num_pages = 0;
-       int             page = 0;
-
-       len = len;
-
-       if (prlo->command != FC_ELS_ACC)
-               return FC_PARSE_FAILURE;
-
-       num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
-
-       for (page = 0; page < num_pages; page++) {
-               if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
-                       return FC_PARSE_FAILURE;
-
-               if (prlo->prlo_acc_params[page].opa_valid != 0)
-                       return FC_PARSE_FAILURE;
-
-               if (prlo->prlo_acc_params[page].rpa_valid != 0)
-                       return FC_PARSE_FAILURE;
-
-               if (prlo->prlo_acc_params[page].orig_process_assc != 0)
-                       return FC_PARSE_FAILURE;
-
-               if (prlo->prlo_acc_params[page].resp_process_assc != 0)
-                       return FC_PARSE_FAILURE;
-       }
-       return FC_PARSE_OK;
-
-}
-
 u16
 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
               int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
@@ -971,47 +904,6 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
        return be16_to_cpu(tprlo->payload_len);
 }
 
-u16
-fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
-{
-       struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
-       int             num_pages = 0;
-       int             page = 0;
-
-       len = len;
-
-       if (tprlo->command != FC_ELS_ACC)
-               return FC_PARSE_ACC_INVAL;
-
-       num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
-
-       for (page = 0; page < num_pages; page++) {
-               if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
-                       return FC_PARSE_NOT_FCP;
-               if (tprlo->tprlo_acc_params[page].opa_valid != 0)
-                       return FC_PARSE_OPAFLAG_INVAL;
-               if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
-                       return FC_PARSE_RPAFLAG_INVAL;
-               if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
-                       return FC_PARSE_OPA_INVAL;
-               if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
-                       return FC_PARSE_RPA_INVAL;
-       }
-       return FC_PARSE_OK;
-}
-
-enum fc_parse_status
-fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
-{
-       struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
-       len = len;
-       if (els_cmd->els_code != FC_ELS_ACC)
-               return FC_PARSE_FAILURE;
-
-       return FC_PARSE_OK;
-}
-
 u16
 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
                u32 reason_code, u32 reason_expl)
index b109a88..ac08d0b 100644 (file)
@@ -163,7 +163,6 @@ enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
 
 u16        fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
                             u32 s_id, u16 ox_id, u16 rrq_oxid);
-enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
 
 u16        fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
                                u16 ox_id, u8 *name);
@@ -276,8 +275,6 @@ void                fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
 void           fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
                                         __be16 ox_id);
 
-enum fc_parse_status   fc_els_rsp_parse(struct fchs_s *fchs, int len);
-
 enum fc_parse_status   fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
                                        wwn_t port_name);
 
@@ -297,8 +294,6 @@ u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
                u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
 
-u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
-
 u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
                u16 ox_id, wwn_t port_name, wwn_t node_name,
                u16 pdu_size);
@@ -308,14 +303,10 @@ u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
 u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
                u16 ox_id, int num_pages);
 
-u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
-
 u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
                u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
                u32 tpr_id);
 
-u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
-
 u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
                __be16 ox_id, u32 reason_code, u32 reason_expl);
 
index e61ed8d..bd4ac18 100644 (file)
@@ -143,7 +143,7 @@ struct bfad_im_s {
 static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
                                             struct bfad_s *drv, int cnt,
                                             enum bfa_aen_category cat,
-                                            enum bfa_ioc_aen_event evt)
+                                            int evt)
 {
        struct timespec64 ts;
 
index 27c8d6b..cd160f2 100644 (file)
@@ -432,7 +432,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
        struct fcoe_rcv_info *fr;
        struct fcoe_percpu_s *bg;
        struct sk_buff *tmp_skb;
-       unsigned short oxid;
 
        interface = container_of(ptype, struct bnx2fc_interface,
                                 fcoe_packet_type);
@@ -466,8 +465,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
        skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
        fh = (struct fc_frame_header *) skb_transport_header(skb);
 
-       oxid = ntohs(fh->fh_ox_id);
-
        fr = fcoe_dev_from_skb(skb);
        fr->fr_dev = lport;
 
index 66b230b..1a458ce 100644 (file)
@@ -210,11 +210,8 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-       } else {
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
                dev_err(&pdev->dev, "No suitable DMA available.\n");
                goto err_release_regions;
        }
index cc5611e..66e58f0 100644 (file)
@@ -1845,8 +1845,8 @@ csio_ln_fdmi_init(struct csio_lnode *ln)
        /* Allocate Dma buffers for FDMI response Payload */
        dma_buf = &ln->mgmt_req->dma_buf;
        dma_buf->len = 2048;
-       dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
-                                               &dma_buf->paddr);
+       dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
+                                               &dma_buf->paddr, GFP_KERNEL);
        if (!dma_buf->vaddr) {
                csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
                kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@ csio_ln_fdmi_exit(struct csio_lnode *ln)
 
        dma_buf = &ln->mgmt_req->dma_buf;
        if (dma_buf->vaddr)
-               pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+               dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
                                    dma_buf->paddr);
 
        kfree(ln->mgmt_req);
index dab0d3f..8c15b7a 100644 (file)
@@ -2349,8 +2349,8 @@ csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
                }
 
                /* Allocate Dma buffers for DDP */
-               ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
-                                                       &ddp_desc->paddr);
+               ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
+                               &ddp_desc->paddr, GFP_KERNEL);
                if (!ddp_desc->vaddr) {
                        csio_err(hw,
                                 "SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2372,8 @@ no_mem:
        list_for_each(tmp, &scm->ddp_freelist) {
                ddp_desc = (struct csio_dma_buf *) tmp;
                tmp = csio_list_prev(tmp);
-               pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
-                                   ddp_desc->paddr);
+               dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+                                 ddp_desc->vaddr, ddp_desc->paddr);
                list_del_init(&ddp_desc->list);
                kfree(ddp_desc);
        }
@@ -2399,8 +2399,8 @@ csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
        list_for_each(tmp, &scm->ddp_freelist) {
                ddp_desc = (struct csio_dma_buf *) tmp;
                tmp = csio_list_prev(tmp);
-               pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
-                                   ddp_desc->paddr);
+               dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+                                 ddp_desc->vaddr, ddp_desc->paddr);
                list_del_init(&ddp_desc->list);
                kfree(ddp_desc);
        }
index 5022e82..dc12933 100644 (file)
@@ -124,8 +124,8 @@ csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
 
        while (n--) {
                buf->len = sge->sge_fl_buf_size[sreg];
-               buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
-                                                 &buf->paddr);
+               buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
+                                               &buf->paddr, GFP_KERNEL);
                if (!buf->vaddr) {
                        csio_err(hw, "Could only fill %d buffers!\n", n + 1);
                        return -ENOMEM;
@@ -233,7 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
 
        q = wrm->q_arr[free_idx];
 
-       q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
+       q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+                       GFP_KERNEL);
        if (!q->vstart) {
                csio_err(hw,
                         "Failed to allocate DMA memory for "
@@ -1703,14 +1704,14 @@ csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
                                        buf = &q->un.fl.bufs[j];
                                        if (!buf->vaddr)
                                                continue;
-                                       pci_free_consistent(hw->pdev, buf->len,
-                                                           buf->vaddr,
-                                                           buf->paddr);
+                                       dma_free_coherent(&hw->pdev->dev,
+                                                       buf->len, buf->vaddr,
+                                                       buf->paddr);
                                }
                                kfree(q->un.fl.bufs);
                        }
-                       pci_free_consistent(hw->pdev, q->size,
-                                           q->vstart, q->pstart);
+                       dma_free_coherent(&hw->pdev->dev, q->size,
+                                       q->vstart, q->pstart);
                }
                kfree(q);
        }
index 211da1d..064ef57 100644 (file)
@@ -35,6 +35,11 @@ static unsigned int dbg_level;
 
 #include "../libcxgbi.h"
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
 #define        DRV_MODULE_NAME         "cxgb4i"
 #define DRV_MODULE_DESC                "Chelsio T4-T6 iSCSI Driver"
 #define        DRV_MODULE_VERSION      "0.9.5-ko"
@@ -155,6 +160,15 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
        .session_recovery_timedout = iscsi_session_recovery_timedout,
 };
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
+
+static struct notifier_block cxgb4_dcb_change = {
+       .notifier_call = cxgb4_dcb_change_notify,
+};
+#endif
+
 static struct scsi_transport_template *cxgb4i_stt;
 
 /*
@@ -574,6 +588,9 @@ static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
        int nparams, flowclen16, flowclen;
 
        nparams = FLOWC_WR_NPARAMS_MIN;
+#ifdef CONFIG_CHELSIO_T4_DCB
+       nparams++;
+#endif
        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
        flowclen16 = DIV_ROUND_UP(flowclen, 16);
        flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
        struct fw_flowc_wr *flowc;
        int nparams, flowclen16, flowclen;
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+       u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
        flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
        skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
        flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
        flowc->mnemval[8].val = 0;
        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
        flowc->mnemval[8].val = 16384;
+#ifdef CONFIG_CHELSIO_T4_DCB
+       flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+       if (vlan == CPL_L2T_VLAN_NONE) {
+               pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
+                                   csk->tid);
+               flowc->mnemval[9].val = cpu_to_be32(0);
+       } else {
+               flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
+                                       VLAN_PRIO_SHIFT);
+       }
+#endif
 
        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 
@@ -1600,6 +1631,46 @@ static void release_offload_resources(struct cxgbi_sock *csk)
        csk->dst = NULL;
 }
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
+{
+       return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int select_priority(int pri_mask)
+{
+       if (!pri_mask)
+               return 0;
+       return (ffs(pri_mask) - 1);
+}
+
+static u8 get_iscsi_dcb_priority(struct net_device *ndev)
+{
+       int rv;
+       u8 caps;
+
+       struct dcb_app iscsi_dcb_app = {
+               .protocol = 3260
+       };
+
+       rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+       if (rv)
+               return 0;
+
+       if (caps & DCB_CAP_DCBX_VER_IEEE) {
+               iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+               rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+       } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+               iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+               rv = dcb_getapp(ndev, &iscsi_dcb_app);
+       }
+
+       log_debug(1 << CXGBI_DBG_ISCSI,
+                 "iSCSI priority is set to %u\n", select_priority(rv));
+       return select_priority(rv);
+}
+#endif
+
 static int init_act_open(struct cxgbi_sock *csk)
 {
        struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1684,9 @@ static int init_act_open(struct cxgbi_sock *csk)
        unsigned int size, size6;
        unsigned int linkspeed;
        unsigned int rcv_winf, snd_winf;
-
+#ifdef CONFIG_CHELSIO_T4_DCB
+       u8 priority = 0;
+#endif
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p,%u,0x%lx,%u.\n",
                csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1720,15 @@ static int init_act_open(struct cxgbi_sock *csk)
        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
        cxgbi_sock_get(csk);
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+       if (get_iscsi_dcb_state(ndev))
+               priority = get_iscsi_dcb_priority(ndev);
+
+       csk->dcb_priority = priority;
+       csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
+#else
        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
+#endif
        if (!csk->l2t) {
                pr_err("%s, cannot alloc l2t.\n", ndev->name);
                goto rel_resource_without_clip;
@@ -2146,6 +2227,70 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
        return 0;
 }
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
+                       void *data)
+{
+       int i, port = 0xFF;
+       struct net_device *ndev;
+       struct cxgbi_device *cdev = NULL;
+       struct dcb_app_type *iscsi_app = data;
+       struct cxgbi_ports_map *pmap;
+       u8 priority;
+
+       if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+               if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+                       return NOTIFY_DONE;
+
+               priority = iscsi_app->app.priority;
+       } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+               if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+                       return NOTIFY_DONE;
+
+               if (!iscsi_app->app.priority)
+                       return NOTIFY_DONE;
+
+               priority = ffs(iscsi_app->app.priority) - 1;
+       } else {
+               return NOTIFY_DONE;
+       }
+
+       if (iscsi_app->app.protocol != 3260)
+               return NOTIFY_DONE;
+
+       log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
+                 iscsi_app->ifindex, priority);
+
+       ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+       if (!ndev)
+               return NOTIFY_DONE;
+
+       cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
+
+       dev_put(ndev);
+       if (!cdev)
+               return NOTIFY_DONE;
+
+       pmap = &cdev->pmap;
+
+       for (i = 0; i < pmap->used; i++) {
+               if (pmap->port_csk[i]) {
+                       struct cxgbi_sock *csk = pmap->port_csk[i];
+
+                       if (csk->dcb_priority != priority) {
+                               iscsi_conn_failure(csk->user_data,
+                                                  ISCSI_ERR_CONN_FAILED);
+                               pr_info("Restarting iSCSI connection %p with "
+                                       "priority %u->%u.\n", csk,
+                                       csk->dcb_priority, priority);
+                       }
+               }
+       }
+       return NOTIFY_OK;
+}
+#endif
+
 static int __init cxgb4i_init_module(void)
 {
        int rc;
@@ -2157,11 +2302,18 @@ static int __init cxgb4i_init_module(void)
                return rc;
        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+       pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
+       register_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
        return 0;
 }
 
 static void __exit cxgb4i_exit_module(void)
 {
+#ifdef CONFIG_CHELSIO_T4_DCB
+       unregister_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
index dcb190e..5d5d8b5 100644 (file)
@@ -120,6 +120,9 @@ struct cxgbi_sock {
        int wr_max_cred;
        int wr_cred;
        int wr_una_cred;
+#ifdef CONFIG_CHELSIO_T4_DCB
+       u8 dcb_priority;
+#endif
        unsigned char hcrc_len;
        unsigned char dcrc_len;
 
index 1ed2cd8..8c55ec6 100644 (file)
@@ -753,105 +753,6 @@ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
        return NULL;
 }
 
-
-static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
-{
-       struct list_head *head = &acb->srb_free_list;
-       struct ScsiReqBlk *srb = NULL;
-
-       if (!list_empty(head)) {
-               srb = list_entry(head->next, struct ScsiReqBlk, list);
-               list_del(head->next);
-               dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
-       }
-       return srb;
-}
-
-
-static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
-{
-       dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
-       list_add_tail(&srb->list, &acb->srb_free_list);
-}
-
-
-static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
-               struct ScsiReqBlk *srb)
-{
-       dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
-               srb->cmd, dcb->target_id, dcb->target_lun, srb);
-       list_add(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_append(struct DeviceCtlBlk *dcb,
-               struct ScsiReqBlk *srb)
-{
-       dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
-                srb->cmd, dcb->target_id, dcb->target_lun, srb);
-       list_add_tail(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
-       dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
-               srb->cmd, dcb->target_id, dcb->target_lun, srb);
-       list_add_tail(&srb->list, &dcb->srb_going_list);
-}
-
-
-static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
-       struct ScsiReqBlk *i;
-       struct ScsiReqBlk *tmp;
-       dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
-               srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
-       list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
-               if (i == srb) {
-                       list_del(&srb->list);
-                       break;
-               }
-}
-
-
-static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
-               struct ScsiReqBlk *srb)
-{
-       struct ScsiReqBlk *i;
-       struct ScsiReqBlk *tmp;
-       dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
-               srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
-       list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
-               if (i == srb) {
-                       list_del(&srb->list);
-                       break;
-               }
-}
-
-
-static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
-               struct ScsiReqBlk *srb)
-{
-       dprintkdbg(DBG_0,
-               "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
-               srb->cmd, dcb->target_id, dcb->target_lun, srb);
-       list_move(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
-               struct ScsiReqBlk *srb)
-{
-       dprintkdbg(DBG_0,
-               "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
-               srb->cmd, dcb->target_id, dcb->target_lun, srb);
-       list_move(&srb->list, &dcb->srb_going_list);
-}
-
-
 /* Sets the timer to wake us up */
 static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
 {
@@ -923,7 +824,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
 
                        /* Try to send to the bus */
                        if (!start_scsi(acb, pos, srb))
-                               srb_waiting_to_going_move(pos, srb);
+                               list_move(&srb->list, &pos->srb_going_list);
                        else
                                waiting_set_timer(acb, HZ/50);
                        break;
@@ -960,15 +861,15 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
        if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
            acb->active_dcb ||
            (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
-               srb_waiting_append(dcb, srb);
+               list_add_tail(&srb->list, &dcb->srb_waiting_list);
                waiting_process_next(acb);
                return;
        }
 
-       if (!start_scsi(acb, dcb, srb))
-               srb_going_append(dcb, srb);
-       else {
-               srb_waiting_insert(dcb, srb);
+       if (!start_scsi(acb, dcb, srb)) {
+               list_add_tail(&srb->list, &dcb->srb_going_list);
+       else {
+               list_add(&srb->list, &dcb->srb_waiting_list);
                waiting_set_timer(acb, HZ / 50);
        }
 }
@@ -1045,10 +946,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
                        sgp->length++;
                }
 
-               srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
-                                               srb->segment_x,
-                                               SEGMENTX_LEN,
-                                               PCI_DMA_TODEVICE);
+               srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
+                               srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
 
                dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
                        srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
        cmd->scsi_done = done;
        cmd->result = 0;
 
-       srb = srb_get_free(acb);
-       if (!srb)
-       {
+       srb = list_first_entry_or_null(&acb->srb_free_list,
+                       struct ScsiReqBlk, list);
+       if (!srb) {
                /*
                 * Return 1 since we are unable to queue this command at this
                 * point in time.
@@ -1126,12 +1025,13 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
                dprintkdbg(DBG_0, "queue_command: No free srb's\n");
                return 1;
        }
+       list_del(&srb->list);
 
        build_srb(cmd, dcb, srb);
 
        if (!list_empty(&dcb->srb_waiting_list)) {
                /* append to waiting queue */
-               srb_waiting_append(dcb, srb);
+               list_add_tail(&srb->list, &dcb->srb_waiting_list);
                waiting_process_next(acb);
        } else {
                /* process immediately */
@@ -1376,11 +1276,11 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
 
        srb = find_cmd(cmd, &dcb->srb_waiting_list);
        if (srb) {
-               srb_waiting_remove(dcb, srb);
+               list_del(&srb->list);
                pci_unmap_srb_sense(acb, srb);
                pci_unmap_srb(acb, srb);
                free_tag(dcb, srb);
-               srb_free_insert(acb, srb);
+               list_add_tail(&srb->list, &acb->srb_free_list);
                dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
                cmd->result = DID_ABORT << 16;
                return SUCCESS;
@@ -1969,14 +1869,15 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
                        xferred -= psge->length;
                } else {
                        /* Partial SG entry done */
+                       dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
+                                       srb->sg_bus_addr, SEGMENTX_LEN,
+                                       DMA_TO_DEVICE);
                        psge->length -= xferred;
                        psge->address += xferred;
                        srb->sg_index = idx;
-                       pci_dma_sync_single_for_device(srb->dcb->
-                                           acb->dev,
-                                           srb->sg_bus_addr,
-                                           SEGMENTX_LEN,
-                                           PCI_DMA_TODEVICE);
+                       dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
+                                       srb->sg_bus_addr, SEGMENTX_LEN,
+                                       DMA_TO_DEVICE);
                        break;
                }
                psge++;
@@ -3083,7 +2984,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
                                        goto disc1;
                                }
                                free_tag(dcb, srb);
-                               srb_going_to_waiting_move(dcb, srb);
+                               list_move(&srb->list, &dcb->srb_waiting_list);
                                dprintkdbg(DBG_KG,
                                        "disconnect: (0x%p) Retry\n",
                                        srb->cmd);
@@ -3148,7 +3049,7 @@ static void reselect(struct AdapterCtlBlk *acb)
 
                        srb->state = SRB_READY;
                        free_tag(dcb, srb);
-                       srb_going_to_waiting_move(dcb, srb);
+                       list_move(&srb->list, &dcb->srb_waiting_list);
                        waiting_set_timer(acb, HZ / 20);
 
                        /* return; */
@@ -3271,9 +3172,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
                /* unmap DC395x SG list */
                dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
                        srb->sg_bus_addr, SEGMENTX_LEN);
-               pci_unmap_single(acb->dev, srb->sg_bus_addr,
-                                SEGMENTX_LEN,
-                                PCI_DMA_TODEVICE);
+               dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
+                               DMA_TO_DEVICE);
                dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
                           scsi_sg_count(cmd), scsi_bufflen(cmd));
                /* unmap the sg segments */
@@ -3291,8 +3191,8 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
        /* Unmap sense buffer */
        dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
               srb->segment_x[0].address);
-       pci_unmap_single(acb->dev, srb->segment_x[0].address,
-                        srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
+                        srb->segment_x[0].length, DMA_FROM_DEVICE);
        /* Restore SG stuff */
        srb->total_xfer_length = srb->xferred;
        srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
                                tempcnt--;
                        dcb->max_command = tempcnt;
                        free_tag(dcb, srb);
-                       srb_going_to_waiting_move(dcb, srb);
+                       list_move(&srb->list, &dcb->srb_waiting_list);
                        waiting_set_timer(acb, HZ / 20);
                        srb->adapter_status = 0;
                        srb->target_status = 0;
@@ -3447,14 +3347,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
                }
        }
 
-       if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
-               pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
-                                       scsi_sg_count(cmd), dir);
-
        ckc_only = 0;
 /* Check Error Conditions */
       ckc_e:
 
+       pci_unmap_srb(acb, srb);
+
        if (cmd->cmnd[0] == INQUIRY) {
                unsigned char *base = NULL;
                struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
                                cmd->cmnd[0], srb->total_xfer_length);
        }
 
-       srb_going_remove(dcb, srb);
-       /* Add to free list */
-       if (srb == acb->tmp_srb)
-               dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
-       else {
+       if (srb != acb->tmp_srb) {
+               /* Add to free list */
                dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
                        cmd, cmd->result);
-               srb_free_insert(acb, srb);
+               list_move_tail(&srb->list, &acb->srb_free_list);
+       } else {
+               dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
        }
-       pci_unmap_srb(acb, srb);
 
        cmd->scsi_done(cmd);
        waiting_process_next(acb);
@@ -3535,9 +3431,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
                        result = MK_RES(0, did_flag, 0, 0);
                        printk("G:%p(%02i-%i) ", p,
                               p->device->id, (u8)p->device->lun);
-                       srb_going_remove(dcb, srb);
+                       list_del(&srb->list);
                        free_tag(dcb, srb);
-                       srb_free_insert(acb, srb);
+                       list_add_tail(&srb->list, &acb->srb_free_list);
                        p->result = result;
                        pci_unmap_srb_sense(acb, srb);
                        pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
                        result = MK_RES(0, did_flag, 0, 0);
                        printk("W:%p<%02i-%i>", p, p->device->id,
                               (u8)p->device->lun);
-                       srb_waiting_remove(dcb, srb);
-                       srb_free_insert(acb, srb);
+                       list_move_tail(&srb->list, &acb->srb_free_list);
                        p->result = result;
                        pci_unmap_srb_sense(acb, srb);
                        pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
        srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
        srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
        /* Map sense buffer */
-       srb->segment_x[0].address =
-           pci_map_single(acb->dev, cmd->sense_buffer,
-                          SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+       srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
+                       cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+                       DMA_FROM_DEVICE);
        dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
               cmd->sense_buffer, srb->segment_x[0].address,
               SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
                dprintkl(KERN_DEBUG,
                        "request_sense: (0x%p) failed <%02i-%i>\n",
                        srb->cmd, dcb->target_id, dcb->target_lun);
-               srb_going_to_waiting_move(dcb, srb);
+               list_move(&srb->list, &dcb->srb_waiting_list);
                waiting_set_timer(acb, HZ / 100);
        }
 }
@@ -4392,7 +4287,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
        
        /* link static array of srbs into the srb free list */
        for (i = 0; i < acb->srb_count - 1; i++)
-               srb_free_insert(acb, &acb->srb_array[i]);
+               list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
 }
 
 
index c3fc34b..ac7da9d 100644 (file)
@@ -369,19 +369,28 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 {
        struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
        struct scatterlist *sg = scsi_sglist(cmd);
-       int dir = cmd->sc_data_direction;
-       int total, i;
+       int total = 0, i;
 
-       if (dir == DMA_NONE)
+       if (cmd->sc_data_direction == DMA_NONE)
                return;
 
-       spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
+       if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+               /*
+                * For pseudo DMA and PIO we need the virtual address instead of
+                * a dma address, so perform an identity mapping.
+                */
+               spriv->num_sg = scsi_sg_count(cmd);
+               for (i = 0; i < spriv->num_sg; i++) {
+                       sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
+                       total += sg_dma_len(&sg[i]);
+               }
+       } else {
+               spriv->num_sg = scsi_dma_map(cmd);
+               for (i = 0; i < spriv->num_sg; i++)
+                       total += sg_dma_len(&sg[i]);
+       }
        spriv->cur_residue = sg_dma_len(sg);
        spriv->cur_sg = sg;
-
-       total = 0;
-       for (i = 0; i < spriv->u.num_sg; i++)
-               total += sg_dma_len(&sg[i]);
        spriv->tot_residue = total;
 }
 
@@ -441,13 +450,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 
 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 {
-       struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
-       int dir = cmd->sc_data_direction;
-
-       if (dir == DMA_NONE)
-               return;
-
-       esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
+       if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+               scsi_dma_unmap(cmd);
 }
 
 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -478,17 +482,6 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
        spriv->tot_residue = ent->saved_tot_residue;
 }
 
-static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
-{
-       if (cmd->cmd_len == 6 ||
-           cmd->cmd_len == 10 ||
-           cmd->cmd_len == 12) {
-               esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
-       } else {
-               esp->flags |= ESP_FLAG_DOING_SLOWCMD;
-       }
-}
-
 static void esp_write_tgt_config3(struct esp *esp, int tgt)
 {
        if (esp->rev > ESP100A) {
@@ -624,6 +617,26 @@ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
        }
 }
 
+static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       ent->sense_ptr = ent->cmd->sense_buffer;
+       if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+               ent->sense_dma = (uintptr_t)ent->sense_ptr;
+               return;
+       }
+
+       ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
+                                       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+}
+
+static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+       if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+               dma_unmap_single(esp->dev, ent->sense_dma,
+                                SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+       ent->sense_ptr = NULL;
+}
+
 /* When a contingent allegiance conditon is created, we force feed a
  * REQUEST_SENSE command to the device to fetch the sense data.  I
  * tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +658,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
        if (!ent->sense_ptr) {
                esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
                                  tgt, lun);
-
-               ent->sense_ptr = cmd->sense_buffer;
-               ent->sense_dma = esp->ops->map_single(esp,
-                                                     ent->sense_ptr,
-                                                     SCSI_SENSE_BUFFERSIZE,
-                                                     DMA_FROM_DEVICE);
+               esp_map_sense(esp, ent);
        }
        ent->saved_sense_ptr = ent->sense_ptr;
 
@@ -717,10 +725,10 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 static void esp_maybe_execute_command(struct esp *esp)
 {
        struct esp_target_data *tp;
-       struct esp_lun_data *lp;
        struct scsi_device *dev;
        struct scsi_cmnd *cmd;
        struct esp_cmd_entry *ent;
+       bool select_and_stop = false;
        int tgt, lun, i;
        u32 val, start_cmd;
        u8 *p;
@@ -743,7 +751,6 @@ static void esp_maybe_execute_command(struct esp *esp)
        tgt = dev->id;
        lun = dev->lun;
        tp = &esp->target[tgt];
-       lp = dev->hostdata;
 
        list_move(&ent->list, &esp->active_cmds);
 
@@ -752,7 +759,8 @@ static void esp_maybe_execute_command(struct esp *esp)
        esp_map_dma(esp, cmd);
        esp_save_pointers(esp, ent);
 
-       esp_check_command_len(esp, cmd);
+       if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
+               select_and_stop = true;
 
        p = esp->command_block;
 
@@ -793,42 +801,22 @@ static void esp_maybe_execute_command(struct esp *esp)
                        tp->flags &= ~ESP_TGT_CHECK_NEGO;
                }
 
-               /* Process it like a slow command.  */
-               if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
-                       esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+               /* If there are multiple message bytes, use Select and Stop */
+               if (esp->msg_out_len)
+                       select_and_stop = true;
        }
 
 build_identify:
-       /* If we don't have a lun-data struct yet, we're probing
-        * so do not disconnect.  Also, do not disconnect unless
-        * we have a tag on this command.
-        */
-       if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
-               *p++ = IDENTIFY(1, lun);
-       else
-               *p++ = IDENTIFY(0, lun);
+       *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
 
        if (ent->tag[0] && esp->rev == ESP100) {
                /* ESP100 lacks select w/atn3 command, use select
                 * and stop instead.
                 */
-               esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+               select_and_stop = true;
        }
 
-       if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
-               start_cmd = ESP_CMD_SELA;
-               if (ent->tag[0]) {
-                       *p++ = ent->tag[0];
-                       *p++ = ent->tag[1];
-
-                       start_cmd = ESP_CMD_SA3;
-               }
-
-               for (i = 0; i < cmd->cmd_len; i++)
-                       *p++ = cmd->cmnd[i];
-
-               esp->select_state = ESP_SELECT_BASIC;
-       } else {
+       if (select_and_stop) {
                esp->cmd_bytes_left = cmd->cmd_len;
                esp->cmd_bytes_ptr = &cmd->cmnd[0];
 
@@ -843,6 +831,19 @@ build_identify:
 
                start_cmd = ESP_CMD_SELAS;
                esp->select_state = ESP_SELECT_MSGOUT;
+       } else {
+               start_cmd = ESP_CMD_SELA;
+               if (ent->tag[0]) {
+                       *p++ = ent->tag[0];
+                       *p++ = ent->tag[1];
+
+                       start_cmd = ESP_CMD_SA3;
+               }
+
+               for (i = 0; i < cmd->cmd_len; i++)
+                       *p++ = cmd->cmnd[i];
+
+               esp->select_state = ESP_SELECT_BASIC;
        }
        val = tgt;
        if (esp->rev == FASHME)
@@ -902,9 +903,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
        }
 
        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
-               esp->ops->unmap_single(esp, ent->sense_dma,
-                                      SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
-               ent->sense_ptr = NULL;
+               esp_unmap_sense(esp, ent);
 
                /* Restore the message/status bytes to what we actually
                 * saw originally.  Also, report that we are providing
@@ -965,7 +964,7 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
        cmd->scsi_done = done;
 
        spriv = ESP_CMD_PRIV(cmd);
-       spriv->u.dma_addr = ~(dma_addr_t)0x0;
+       spriv->num_sg = 0;
 
        list_add_tail(&ent->list, &esp->queued_cmds);
 
@@ -1252,14 +1251,10 @@ static int esp_finish_select(struct esp *esp)
                        esp_unmap_dma(esp, cmd);
                        esp_free_lun_tag(ent, cmd->device->hostdata);
                        tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
-                       esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
                        esp->cmd_bytes_ptr = NULL;
                        esp->cmd_bytes_left = 0;
                } else {
-                       esp->ops->unmap_single(esp, ent->sense_dma,
-                                              SCSI_SENSE_BUFFERSIZE,
-                                              DMA_FROM_DEVICE);
-                       ent->sense_ptr = NULL;
+                       esp_unmap_sense(esp, ent);
                }
 
                /* Now that the state is unwound properly, put back onto
@@ -1303,9 +1298,8 @@ static int esp_finish_select(struct esp *esp)
                                esp_flush_fifo(esp);
                }
 
-               /* If we are doing a slow command, negotiation, etc.
-                * we'll do the right thing as we transition to the
-                * next phase.
+               /* If we are doing a Select And Stop command, negotiation, etc.
+                * we'll do the right thing as we transition to the next phase.
                 */
                esp_event(esp, ESP_EVENT_CHECK_PHASE);
                return 0;
@@ -1338,6 +1332,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
 
        bytes_sent = esp->data_dma_len;
        bytes_sent -= ecount;
+       bytes_sent -= esp->send_cmd_residual;
 
        /*
         * The am53c974 has a DMA 'pecularity'. The doc states:
@@ -1358,7 +1353,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
                        struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
                        u8 *ptr;
 
-                       ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
+                       ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
                                                  &offset, &count);
                        if (likely(ptr)) {
                                *(ptr + offset) = bval;
@@ -2039,11 +2034,8 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
        esp_free_lun_tag(ent, cmd->device->hostdata);
        cmd->result = DID_RESET << 16;
 
-       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
-               esp->ops->unmap_single(esp, ent->sense_dma,
-                                      SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
-               ent->sense_ptr = NULL;
-       }
+       if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
+               esp_unmap_sense(esp, ent);
 
        cmd->scsi_done(cmd);
        list_del(&ent->list);
@@ -2382,7 +2374,7 @@ static const char *esp_chip_names[] = {
 
 static struct scsi_transport_template *esp_transport_template;
 
-int scsi_esp_register(struct esp *esp, struct device *dev)
+int scsi_esp_register(struct esp *esp)
 {
        static int instance;
        int err;
@@ -2402,10 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
 
        esp_bootup_reset(esp);
 
-       dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
+       dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
                   esp->host->unique_id, esp->regs, esp->dma_regs,
                   esp->host->irq);
-       dev_printk(KERN_INFO, dev,
+       dev_printk(KERN_INFO, esp->dev,
                   "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
                   esp->host->unique_id, esp_chip_names[esp->rev],
                   esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2413,7 +2405,7 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
        /* Let the SCSI bus reset settle. */
        ssleep(esp_bus_reset_settle);
 
-       err = scsi_add_host(esp->host, dev);
+       err = scsi_add_host(esp->host, esp->dev);
        if (err)
                return err;
 
@@ -2790,3 +2782,131 @@ MODULE_PARM_DESC(esp_debug,
 
 module_init(esp_init);
 module_exit(esp_exit);
+
+#ifdef CONFIG_SCSI_ESP_PIO
+static inline unsigned int esp_wait_for_fifo(struct esp *esp)
+{
+       int i = 500000;
+
+       do {
+               unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+               if (fbytes)
+                       return fbytes;
+
+               udelay(1);
+       } while (--i);
+
+       shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
+                    esp_read8(ESP_STATUS));
+       return 0;
+}
+
+static inline int esp_wait_for_intr(struct esp *esp)
+{
+       int i = 500000;
+
+       do {
+               esp->sreg = esp_read8(ESP_STATUS);
+               if (esp->sreg & ESP_STAT_INTR)
+                       return 0;
+
+               udelay(1);
+       } while (--i);
+
+       shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
+                    esp->sreg);
+       return 1;
+}
+
+#define ESP_FIFO_SIZE 16
+
+void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+                     u32 dma_count, int write, u8 cmd)
+{
+       u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+       cmd &= ~ESP_CMD_DMA;
+       esp->send_cmd_error = 0;
+
+       if (write) {
+               u8 *dst = (u8 *)addr;
+               u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+               scsi_esp_cmd(esp, cmd);
+
+               while (1) {
+                       if (!esp_wait_for_fifo(esp))
+                               break;
+
+                       *dst++ = readb(esp->fifo_reg);
+                       --esp_count;
+
+                       if (!esp_count)
+                               break;
+
+                       if (esp_wait_for_intr(esp)) {
+                               esp->send_cmd_error = 1;
+                               break;
+                       }
+
+                       if ((esp->sreg & ESP_STAT_PMASK) != phase)
+                               break;
+
+                       esp->ireg = esp_read8(ESP_INTRPT);
+                       if (esp->ireg & mask) {
+                               esp->send_cmd_error = 1;
+                               break;
+                       }
+
+                       if (phase == ESP_MIP)
+                               esp_write8(ESP_CMD_MOK, ESP_CMD);
+
+                       esp_write8(ESP_CMD_TI, ESP_CMD);
+               }
+       } else {
+               unsigned int n = ESP_FIFO_SIZE;
+               u8 *src = (u8 *)addr;
+
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+               if (n > esp_count)
+                       n = esp_count;
+               writesb(esp->fifo_reg, src, n);
+               src += n;
+               esp_count -= n;
+
+               scsi_esp_cmd(esp, cmd);
+
+               while (esp_count) {
+                       if (esp_wait_for_intr(esp)) {
+                               esp->send_cmd_error = 1;
+                               break;
+                       }
+
+                       if ((esp->sreg & ESP_STAT_PMASK) != phase)
+                               break;
+
+                       esp->ireg = esp_read8(ESP_INTRPT);
+                       if (esp->ireg & ~ESP_INTR_BSERV) {
+                               esp->send_cmd_error = 1;
+                               break;
+                       }
+
+                       n = ESP_FIFO_SIZE -
+                           (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
+
+                       if (n > esp_count)
+                               n = esp_count;
+                       writesb(esp->fifo_reg, src, n);
+                       src += n;
+                       esp_count -= n;
+
+                       esp_write8(ESP_CMD_TI, ESP_CMD);
+               }
+       }
+
+       esp->send_cmd_residual = esp_count;
+}
+EXPORT_SYMBOL(esp_send_pio_cmd);
+#endif
index 8163dca..aa87a6b 100644 (file)
 #define SYNC_DEFP_FAST            0x19   /* 10mb/s */
 
 struct esp_cmd_priv {
-       union {
-               dma_addr_t      dma_addr;
-               int             num_sg;
-       } u;
-
+       int                     num_sg;
        int                     cur_residue;
        struct scatterlist      *cur_sg;
        int                     tot_residue;
@@ -363,19 +359,6 @@ struct esp_driver_ops {
        void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
        u8 (*esp_read8)(struct esp *esp, unsigned long reg);
 
-       /* Map and unmap DMA memory.  Eventually the driver will be
-        * converted to the generic DMA API as soon as SBUS is able to
-        * cope with that.  At such time we can remove this.
-        */
-       dma_addr_t (*map_single)(struct esp *esp, void *buf,
-                                size_t sz, int dir);
-       int (*map_sg)(struct esp *esp, struct scatterlist *sg,
-                     int num_sg, int dir);
-       void (*unmap_single)(struct esp *esp, dma_addr_t addr,
-                            size_t sz, int dir);
-       void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
-                        int num_sg, int dir);
-
        /* Return non-zero if there is an IRQ pending.  Usually this
         * status bit lives in the DMA controller sitting in front of
         * the ESP.  This has to be accurate or else the ESP interrupt
@@ -435,7 +418,7 @@ struct esp {
        const struct esp_driver_ops *ops;
 
        struct Scsi_Host        *host;
-       void                    *dev;
+       struct device           *dev;
 
        struct esp_cmd_entry    *active_cmd;
 
@@ -490,11 +473,11 @@ struct esp {
        u32                     flags;
 #define ESP_FLAG_DIFFERENTIAL  0x00000001
 #define ESP_FLAG_RESETTING     0x00000002
-#define ESP_FLAG_DOING_SLOWCMD 0x00000004
 #define ESP_FLAG_WIDE_CAPABLE  0x00000008
 #define ESP_FLAG_QUICKIRQ_CHECK        0x00000010
 #define ESP_FLAG_DISABLE_SYNC  0x00000020
 #define ESP_FLAG_USE_FIFO      0x00000040
+#define ESP_FLAG_NO_DMA_MAP    0x00000080
 
        u8                      select_state;
 #define ESP_SELECT_NONE                0x00 /* Not selecting */
@@ -532,7 +515,7 @@ struct esp {
        u32                     min_period;
        u32                     radelay;
 
-       /* Slow command state.  */
+       /* ESP_CMD_SELAS command state */
        u8                      *cmd_bytes_ptr;
        int                     cmd_bytes_left;
 
@@ -540,6 +523,11 @@ struct esp {
 
        void                    *dma;
        int                     dmarev;
+
+       /* These are used by esp_send_pio_cmd() */
+       u8 __iomem              *fifo_reg;
+       int                     send_cmd_error;
+       u32                     send_cmd_residual;
 };
 
 /* A front-end driver for the ESP chip should do the following in
@@ -568,16 +556,18 @@ struct esp {
  *     example, the DMA engine has to be reset before ESP can
  *     be programmed.
  * 11) If necessary, call dev_set_drvdata() as needed.
- * 12) Call scsi_esp_register() with prepared 'esp' structure
- *     and a device pointer if possible.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure.
  * 13) Check scsi_esp_register() return value, release all resources
  *     if an error was returned.
  */
 extern struct scsi_host_template scsi_esp_template;
-extern int scsi_esp_register(struct esp *, struct device *);
+extern int scsi_esp_register(struct esp *);
 
 extern void scsi_esp_unregister(struct esp *);
 extern irqreturn_t scsi_esp_intr(int, void *);
 extern void scsi_esp_cmd(struct esp *, u8);
 
+extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
+                            u32 dma_count, int write, u8 cmd);
+
 #endif /* !(_ESP_SCSI_H) */
index c7bf316..844ef68 100644 (file)
@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
        u32 fcp_bytes_written = 0;
        unsigned long flags;
 
-       pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
-                        PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+                        DMA_FROM_DEVICE);
        skb = buf->os_buf;
        fp = (struct fc_frame *)skb;
        buf->os_buf = NULL;
@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
        skb_reset_transport_header(skb);
        skb_reset_network_header(skb);
        skb_put(skb, len);
-       pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
-
-       if (pci_dma_mapping_error(fnic->pdev, pa)) {
+       pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+       if (dma_mapping_error(&fnic->pdev->dev, pa)) {
                r = -ENOMEM;
                printk(KERN_ERR "PCI mapping failed with error %d\n", r);
                goto free_skb;
@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
        struct fc_frame *fp = buf->os_buf;
        struct fnic *fnic = vnic_dev_priv(rq->vdev);
 
-       pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
-                        PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+                        DMA_FROM_DEVICE);
 
        dev_kfree_skb(fp_skb(fp));
        buf->os_buf = NULL;
@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
        struct ethhdr *eth_hdr;
        struct vlan_ethhdr *vlan_hdr;
        unsigned long flags;
-       int r;
 
        if (!fnic->vlan_hw_insert) {
                eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
                }
        }
 
-       pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
-       r = pci_dma_mapping_error(fnic->pdev, pa);
-       if (r) {
-               printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+       pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
+                       DMA_TO_DEVICE);
+       if (dma_mapping_error(&fnic->pdev->dev, pa)) {
+               printk(KERN_ERR "DMA mapping failed\n");
                goto free_skb;
        }
 
@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 
 irq_restore:
        spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
-       pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
 free_skb:
        kfree_skb(skb);
 }
@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
        if (FC_FCOE_VER)
                FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
 
-       pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
-
-       if (pci_dma_mapping_error(fnic->pdev, pa)) {
+       pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fnic->pdev->dev, pa)) {
                ret = -ENOMEM;
                printk(KERN_ERR "DMA map failed with error %d\n", ret);
                goto free_skb_on_err;
@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
        spin_lock_irqsave(&fnic->wq_lock[0], flags);
 
        if (!vnic_wq_desc_avail(wq)) {
-               pci_unmap_single(fnic->pdev, pa,
-                                tot_len, PCI_DMA_TODEVICE);
+               dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
                ret = -1;
                goto irq_restore;
        }
@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
        struct fc_frame *fp = (struct fc_frame *)skb;
        struct fnic *fnic = vnic_dev_priv(wq->vdev);
 
-       pci_unmap_single(fnic->pdev, buf->dma_addr,
-                        buf->len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+                        DMA_TO_DEVICE);
        dev_kfree_skb_irq(fp_skb(fp));
        buf->os_buf = NULL;
 }
@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
        struct fc_frame *fp = buf->os_buf;
        struct fnic *fnic = vnic_dev_priv(wq->vdev);
 
-       pci_unmap_single(fnic->pdev, buf->dma_addr,
-                        buf->len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+                        DMA_TO_DEVICE);
 
        dev_kfree_skb(fp_skb(fp));
        buf->os_buf = NULL;
index e52599f..cc461fd 100644 (file)
@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * limitation for the device.  Try 64-bit first, and
         * fail to 32-bit.
         */
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (err) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        shost_printk(KERN_ERR, fnic->lport->host,
                                     "No usable DMA configuration "
                                     "aborting\n");
                        goto err_out_release_regions;
                }
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (err) {
-                       shost_printk(KERN_ERR, fnic->lport->host,
-                                    "Unable to obtain 32-bit DMA "
-                                    "for consistent allocations, aborting.\n");
-                       goto err_out_release_regions;
-               }
-       } else {
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (err) {
-                       shost_printk(KERN_ERR, fnic->lport->host,
-                                    "Unable to obtain 64-bit DMA "
-                                    "for consistent allocations, aborting.\n");
-                       goto err_out_release_regions;
-               }
        }
 
        /* Map vNIC resources from BAR0 */
index 8cbd3c9..96acfce 100644 (file)
@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
                                   struct scsi_cmnd *sc)
 {
        if (io_req->sgl_list_pa)
-               pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+               dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
                                 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
-                                PCI_DMA_TODEVICE);
+                                DMA_TO_DEVICE);
        scsi_dma_unmap(sc);
 
        if (io_req->sgl_cnt)
                mempool_free(io_req->sgl_list_alloc,
                             fnic->io_sgl_pool[io_req->sgl_type]);
        if (io_req->sense_buf_pa)
-               pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
-                                SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
+                                SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 }
 
 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
        int flags;
        u8 exch_flags;
        struct scsi_lun fc_lun;
-       int r;
 
        if (sg_count) {
                /* For each SGE, create a device desc entry */
@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
                        desc++;
                }
 
-               io_req->sgl_list_pa = pci_map_single
-                       (fnic->pdev,
-                        io_req->sgl_list,
-                        sizeof(io_req->sgl_list[0]) * sg_count,
-                        PCI_DMA_TODEVICE);
-
-               r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
-               if (r) {
-                       printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+               io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
+                               io_req->sgl_list,
+                               sizeof(io_req->sgl_list[0]) * sg_count,
+                               DMA_TO_DEVICE);
+               if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
+                       printk(KERN_ERR "DMA mapping failed\n");
                        return SCSI_MLQUEUE_HOST_BUSY;
                }
        }
 
-       io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+       io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
                                              sc->sense_buffer,
                                              SCSI_SENSE_BUFFERSIZE,
-                                             PCI_DMA_FROMDEVICE);
-
-       r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
-       if (r) {
-               pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+                                             DMA_FROM_DEVICE);
+       if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
+               dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
                                sizeof(io_req->sgl_list[0]) * sg_count,
-                               PCI_DMA_TODEVICE);
-               printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+                               DMA_TO_DEVICE);
+               printk(KERN_ERR "DMA mapping failed\n");
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
@@ -2272,33 +2266,17 @@ clean_pending_aborts_end:
 static inline int
 fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
 {
-       struct blk_queue_tag *bqt = fnic->lport->host->bqt;
-       int tag, ret = SCSI_NO_TAG;
-
-       BUG_ON(!bqt);
-       if (!bqt) {
-               pr_err("Tags are not supported\n");
-               goto end;
-       }
-
-       do {
-               tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
-               if (tag >= bqt->max_depth) {
-                       pr_err("Tag allocation failure\n");
-                       goto end;
-               }
-       } while (test_and_set_bit(tag, bqt->tag_map));
+       struct request_queue *q = sc->request->q;
+       struct request *dummy;
 
-       bqt->tag_index[tag] = sc->request;
-       sc->request->tag = tag;
-       sc->tag = tag;
-       if (!sc->request->special)
-               sc->request->special = sc;
+       dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
+       if (IS_ERR(dummy))
+               return SCSI_NO_TAG;
 
-       ret = tag;
+       sc->tag = sc->request->tag = dummy->tag;
+       sc->request->special = sc;
 
-end:
-       return ret;
+       return dummy->tag;
 }
 
 /**
@@ -2308,20 +2286,9 @@ end:
 static inline void
 fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
 {
-       struct blk_queue_tag *bqt = fnic->lport->host->bqt;
-       int tag = sc->request->tag;
+       struct request *dummy = sc->request->special;
 
-       if (tag == SCSI_NO_TAG)
-               return;
-
-       BUG_ON(!bqt || !bqt->tag_index[tag]);
-       if (!bqt)
-               return;
-
-       bqt->tag_index[tag] = NULL;
-       clear_bit(tag, bqt->tag_map);
-
-       return;
+       blk_mq_free_request(dummy);
 }
 
 /*
@@ -2380,19 +2347,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
        tag = sc->request->tag;
        if (unlikely(tag < 0)) {
                /*
-                * XXX(hch): current the midlayer fakes up a struct
-                * request for the explicit reset ioctls, and those
-                * don't have a tag allocated to them.  The below
-                * code pokes into midlayer structures to paper over
-                * this design issue, but that won't work for blk-mq.
-                *
-                * Either someone who can actually test the hardware
-                * will have to come up with a similar hack for the
-                * blk-mq case, or we'll have to bite the bullet and
-                * fix the way the EH ioctls work for real, but until
-                * that happens we fail these explicit requests here.
+                * Really should fix the midlayer to pass in a proper
+                * request for ioctls...
                 */
-
                tag = fnic_scsi_host_start_tag(fnic, sc);
                if (unlikely(tag == SCSI_NO_TAG))
                        goto fnic_device_reset_end;
index ba69d61..434447e 100644 (file)
@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
 {
        vnic_dev_desc_ring_size(ring, desc_count, desc_size);
 
-       ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+       ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
                ring->size_unaligned,
-               &ring->base_addr_unaligned);
+               &ring->base_addr_unaligned, GFP_KERNEL);
 
        if (!ring->descs_unaligned) {
                printk(KERN_ERR
@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
 {
        if (ring->descs) {
-               pci_free_consistent(vdev->pdev,
+               dma_free_coherent(&vdev->pdev->dev,
                        ring->size_unaligned,
                        ring->descs_unaligned,
                        ring->base_addr_unaligned);
@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
        int err = 0;
 
        if (!vdev->fw_info) {
-               vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+               vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
                        sizeof(struct vnic_devcmd_fw_info),
-                       &vdev->fw_info_pa);
+                       &vdev->fw_info_pa, GFP_KERNEL);
                if (!vdev->fw_info)
                        return -ENOMEM;
 
@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
        int wait = 1000;
 
        if (!vdev->stats) {
-               vdev->stats = pci_alloc_consistent(vdev->pdev,
-                       sizeof(struct vnic_stats), &vdev->stats_pa);
+               vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+                       sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
                if (!vdev->stats)
                        return -ENOMEM;
        }
@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
        int wait = 1000;
 
        if (!vdev->notify) {
-               vdev->notify = pci_alloc_consistent(vdev->pdev,
+               vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
                        sizeof(struct vnic_devcmd_notify),
-                       &vdev->notify_pa);
+                       &vdev->notify_pa, GFP_KERNEL);
                if (!vdev->notify)
                        return -ENOMEM;
        }
@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
 {
        if (vdev) {
                if (vdev->notify)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(struct vnic_devcmd_notify),
                                vdev->notify,
                                vdev->notify_pa);
                if (vdev->linkstatus)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(u32),
                                vdev->linkstatus,
                                vdev->linkstatus_pa);
                if (vdev->stats)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(struct vnic_stats),
                                vdev->stats, vdev->stats_pa);
                if (vdev->fw_info)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(struct vnic_devcmd_fw_info),
                                vdev->fw_info, vdev->fw_info_pa);
                kfree(vdev);
index 6c7d2e2..0ddb53c 100644 (file)
@@ -34,6 +34,7 @@
 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
 #define HISI_SAS_RESET_BIT     0
 #define HISI_SAS_REJECT_CMD_BIT        1
+#define HISI_SAS_RESERVED_IPTT_CNT  96
 
 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
 #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -217,7 +218,7 @@ struct hisi_sas_hw {
        int (*hw_init)(struct hisi_hba *hisi_hba);
        void (*setup_itct)(struct hisi_hba *hisi_hba,
                           struct hisi_sas_device *device);
-       int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+       int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
                                struct domain_device *device);
        struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
        void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
index a4e2e6a..b3f01d5 100644 (file)
@@ -183,7 +183,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 
 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
 {
-       hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+       unsigned long flags;
+
+       if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
+           hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
+               spin_lock_irqsave(&hisi_hba->lock, flags);
+               hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+               spin_unlock_irqrestore(&hisi_hba->lock, flags);
+       }
 }
 
 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +200,34 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
        set_bit(slot_idx, bitmap);
 }
 
-static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
+static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
+                                    struct scsi_cmnd *scsi_cmnd)
 {
-       unsigned int index;
+       int index;
        void *bitmap = hisi_hba->slot_index_tags;
+       unsigned long flags;
+
+       if (scsi_cmnd)
+               return scsi_cmnd->request->tag;
 
+       spin_lock_irqsave(&hisi_hba->lock, flags);
        index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
-                       hisi_hba->last_slot_index + 1);
+                                  hisi_hba->last_slot_index + 1);
        if (index >= hisi_hba->slot_index_count) {
-               index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
-                                          0);
-               if (index >= hisi_hba->slot_index_count)
+               index = find_next_zero_bit(bitmap,
+                               hisi_hba->slot_index_count,
+                               hisi_hba->hw->max_command_entries -
+                               HISI_SAS_RESERVED_IPTT_CNT);
+               if (index >= hisi_hba->slot_index_count) {
+                       spin_unlock_irqrestore(&hisi_hba->lock, flags);
                        return -SAS_QUEUE_FULL;
+               }
        }
        hisi_sas_slot_index_set(hisi_hba, index);
-       *slot_idx = index;
        hisi_hba->last_slot_index = index;
+       spin_unlock_irqrestore(&hisi_hba->lock, flags);
 
-       return 0;
+       return index;
 }
 
 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -249,9 +266,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
 
        memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
 
-       spin_lock_irqsave(&hisi_hba->lock, flags);
        hisi_sas_slot_index_free(hisi_hba, slot->idx);
-       spin_unlock_irqrestore(&hisi_hba->lock, flags);
 }
 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
 
@@ -287,13 +302,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
                              int *pass)
 {
        struct domain_device *device = task->dev;
-       struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+       struct hisi_hba *hisi_hba;
        struct hisi_sas_device *sas_dev = device->lldd_dev;
        struct hisi_sas_port *port;
        struct hisi_sas_slot *slot;
        struct hisi_sas_cmd_hdr *cmd_hdr_base;
        struct asd_sas_port *sas_port = device->port;
-       struct device *dev = hisi_hba->dev;
+       struct device *dev;
        int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
        int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
        struct hisi_sas_dq *dq;
@@ -314,6 +329,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
                return -ECOMM;
        }
 
+       hisi_hba = dev_to_hisi_hba(device);
+       dev = hisi_hba->dev;
+
        if (DEV_IS_GONE(sas_dev)) {
                if (sas_dev)
                        dev_info(dev, "task prep: device %d not ready\n",
@@ -381,16 +399,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
                goto err_out_dma_unmap;
        }
 
-       spin_lock_irqsave(&hisi_hba->lock, flags);
        if (hisi_hba->hw->slot_index_alloc)
-               rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
-                                                   device);
-       else
-               rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
-       spin_unlock_irqrestore(&hisi_hba->lock, flags);
-       if (rc)
+               rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+       else {
+               struct scsi_cmnd *scsi_cmnd = NULL;
+
+               if (task->uldd_task) {
+                       struct ata_queued_cmd *qc;
+
+                       if (dev_is_sata(device)) {
+                               qc = task->uldd_task;
+                               scsi_cmnd = qc->scsicmd;
+                       } else {
+                               scsi_cmnd = task->uldd_task;
+                       }
+               }
+               rc  = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
+       }
+       if (rc < 0)
                goto err_out_dma_unmap;
 
+       slot_idx = rc;
        slot = &hisi_hba->slot_info[slot_idx];
 
        spin_lock_irqsave(&dq->lock, flags);
@@ -451,9 +480,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
        return 0;
 
 err_out_tag:
-       spin_lock_irqsave(&hisi_hba->lock, flags);
        hisi_sas_slot_index_free(hisi_hba, slot_idx);
-       spin_unlock_irqrestore(&hisi_hba->lock, flags);
 err_out_dma_unmap:
        if (!sas_protocol_ata(task->task_proto)) {
                if (task->num_scatter) {
@@ -904,6 +931,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
        _r.maximum_linkrate = max;
        _r.minimum_linkrate = min;
 
+       sas_phy->phy->maximum_linkrate = max;
+       sas_phy->phy->minimum_linkrate = min;
+
        hisi_hba->hw->phy_disable(hisi_hba, phy_no);
        msleep(100);
        hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
@@ -950,8 +980,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
 
 static void hisi_sas_task_done(struct sas_task *task)
 {
-       if (!del_timer(&task->slow_task->timer))
-               return;
+       del_timer(&task->slow_task->timer);
        complete(&task->slow_task->completion);
 }
 
@@ -960,13 +989,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
        struct sas_task_slow *slow = from_timer(slow, t, timer);
        struct sas_task *task = slow->task;
        unsigned long flags;
+       bool is_completed = true;
 
        spin_lock_irqsave(&task->task_state_lock, flags);
-       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
                task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+               is_completed = false;
+       }
        spin_unlock_irqrestore(&task->task_state_lock, flags);
 
-       complete(&task->slow_task->completion);
+       if (!is_completed)
+               complete(&task->slow_task->completion);
 }
 
 #define TASK_TIMEOUT 20
@@ -1019,8 +1052,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
                                struct hisi_sas_slot *slot = task->lldd_task;
 
                                dev_err(dev, "abort tmf: TMF task timeout and not done\n");
-                               if (slot)
+                               if (slot) {
+                                       struct hisi_sas_cq *cq =
+                                              &hisi_hba->cq[slot->dlvry_queue];
+                                       /*
+                                        * flush tasklet to avoid free'ing task
+                                        * before using task in IO completion
+                                        */
+                                       tasklet_kill(&cq->tasklet);
                                        slot->task = NULL;
+                               }
 
                                goto ex_err;
                        } else
@@ -1396,6 +1437,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
 
        spin_lock_irqsave(&task->task_state_lock, flags);
        if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+               struct hisi_sas_slot *slot = task->lldd_task;
+               struct hisi_sas_cq *cq;
+
+               if (slot) {
+                       /*
+                        * flush tasklet to avoid free'ing task
+                        * before using task in IO completion
+                        */
+                       cq = &hisi_hba->cq[slot->dlvry_queue];
+                       tasklet_kill(&cq->tasklet);
+               }
                spin_unlock_irqrestore(&task->task_state_lock, flags);
                rc = TMF_RESP_FUNC_COMPLETE;
                goto out;
@@ -1451,12 +1503,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
                /* SMP */
                struct hisi_sas_slot *slot = task->lldd_task;
                u32 tag = slot->idx;
+               struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
 
                rc = hisi_sas_internal_task_abort(hisi_hba, device,
                             HISI_SAS_INT_ABT_CMD, tag);
                if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
-                                       task->lldd_task)
-                       hisi_sas_do_release_task(hisi_hba, task, slot);
+                                       task->lldd_task) {
+                       /*
+                        * flush tasklet to avoid free'ing task
+                        * before using task in IO completion
+                        */
+                       tasklet_kill(&cq->tasklet);
+                       slot->task = NULL;
+               }
        }
 
 out:
@@ -1705,14 +1764,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
        port = to_hisi_sas_port(sas_port);
 
        /* simply get a slot and send abort command */
-       spin_lock_irqsave(&hisi_hba->lock, flags);
-       rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
-       if (rc) {
-               spin_unlock_irqrestore(&hisi_hba->lock, flags);
+       rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+       if (rc < 0)
                goto err_out;
-       }
-       spin_unlock_irqrestore(&hisi_hba->lock, flags);
 
+       slot_idx = rc;
        slot = &hisi_hba->slot_info[slot_idx];
 
        spin_lock_irqsave(&dq->lock, flags_dq);
@@ -1748,7 +1804,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
        spin_lock_irqsave(&task->task_state_lock, flags);
        task->task_state_flags |= SAS_TASK_AT_INITIATOR;
        spin_unlock_irqrestore(&task->task_state_lock, flags);
-
        WRITE_ONCE(slot->ready, 1);
        /* send abort command to the chip */
        spin_lock_irqsave(&dq->lock, flags);
@@ -1759,9 +1814,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
        return 0;
 
 err_out_tag:
-       spin_lock_irqsave(&hisi_hba->lock, flags);
        hisi_sas_slot_index_free(hisi_hba, slot_idx);
-       spin_unlock_irqrestore(&hisi_hba->lock, flags);
 err_out:
        dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
 
@@ -1823,8 +1876,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
                if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
                        struct hisi_sas_slot *slot = task->lldd_task;
 
-                       if (slot)
+                       if (slot) {
+                               struct hisi_sas_cq *cq =
+                                       &hisi_hba->cq[slot->dlvry_queue];
+                               /*
+                                * flush tasklet to avoid free'ing task
+                                * before using task in IO completion
+                                */
+                               tasklet_kill(&cq->tasklet);
                                slot->task = NULL;
+                       }
                        dev_err(dev, "internal task abort: timeout and not done.\n");
                        res = -EIO;
                        goto exit;
@@ -1861,10 +1922,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
        hisi_sas_port_notify_formed(sas_phy);
 }
 
-static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
-{
-}
-
 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
                        u8 reg_index, u8 reg_count, u8 *write_data)
 {
@@ -1954,10 +2011,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
        .lldd_I_T_nexus_reset   = hisi_sas_I_T_nexus_reset,
        .lldd_lu_reset          = hisi_sas_lu_reset,
        .lldd_query_task        = hisi_sas_query_task,
-       .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
+       .lldd_clear_nexus_ha    = hisi_sas_clear_nexus_ha,
        .lldd_port_formed       = hisi_sas_port_formed,
-       .lldd_port_deformed = hisi_sas_port_deformed,
-       .lldd_write_gpio = hisi_sas_write_gpio,
+       .lldd_write_gpio        = hisi_sas_write_gpio,
 };
 
 void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2120,6 +2176,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
        hisi_sas_init_mem(hisi_hba);
 
        hisi_sas_slot_index_init(hisi_hba);
+       hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
+               HISI_SAS_RESERVED_IPTT_CNT;
 
        hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
        if (!hisi_hba->wq) {
@@ -2323,8 +2381,15 @@ int hisi_sas_probe(struct platform_device *pdev,
        shost->max_channel = 1;
        shost->max_cmd_len = 16;
        shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
-       shost->can_queue = hisi_hba->hw->max_command_entries;
-       shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+       if (hisi_hba->hw->slot_index_alloc) {
+               shost->can_queue = hisi_hba->hw->max_command_entries;
+               shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+       } else {
+               shost->can_queue = hisi_hba->hw->max_command_entries -
+                       HISI_SAS_RESERVED_IPTT_CNT;
+               shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+                       HISI_SAS_RESERVED_IPTT_CNT;
+       }
 
        sha->sas_ha_name = DRV_NAME;
        sha->dev = hisi_hba->dev;
index 8f60f0e..f0e457e 100644 (file)
@@ -1809,7 +1809,6 @@ static struct scsi_host_template sht_v1_hw = {
        .scan_start             = hisi_sas_scan_start,
        .change_queue_depth     = sas_change_queue_depth,
        .bios_param             = sas_bios_param,
-       .can_queue              = 1,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
index 9c5c5a6..cc36b64 100644 (file)
@@ -770,7 +770,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
 
 /* This function needs to be protected from pre-emption. */
 static int
-slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
                             struct domain_device *device)
 {
        int sata_dev = dev_is_sata(device);
@@ -778,6 +778,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
        struct hisi_sas_device *sas_dev = device->lldd_dev;
        int sata_idx = sas_dev->sata_idx;
        int start, end;
+       unsigned long flags;
 
        if (!sata_dev) {
                /*
@@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
                end = 64 * (sata_idx + 2);
        }
 
+       spin_lock_irqsave(&hisi_hba->lock, flags);
        while (1) {
                start = find_next_zero_bit(bitmap,
                                        hisi_hba->slot_index_count, start);
-               if (start >= end)
+               if (start >= end) {
+                       spin_unlock_irqrestore(&hisi_hba->lock, flags);
                        return -SAS_QUEUE_FULL;
+               }
                /*
                  * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
                  */
@@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
        }
 
        set_bit(start, bitmap);
-       *slot_idx = start;
-       return 0;
+       spin_unlock_irqrestore(&hisi_hba->lock, flags);
+       return start;
 }
 
 static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -2483,7 +2487,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
        }
 
 out:
-       hisi_sas_slot_task_free(hisi_hba, task, slot);
        sts = ts->stat;
        spin_lock_irqsave(&task->task_state_lock, flags);
        if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -2493,6 +2496,7 @@ out:
        }
        task->task_state_flags |= SAS_TASK_STATE_DONE;
        spin_unlock_irqrestore(&task->task_state_lock, flags);
+       hisi_sas_slot_task_free(hisi_hba, task, slot);
 
        if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
                spin_lock_irqsave(&device->done_lock, flags);
@@ -3560,7 +3564,6 @@ static struct scsi_host_template sht_v2_hw = {
        .scan_start             = hisi_sas_scan_start,
        .change_queue_depth     = sas_change_queue_depth,
        .bios_param             = sas_bios_param,
-       .can_queue              = 1,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
index 08b503e..bd4ce38 100644 (file)
 #define PHY_CTRL_RESET_OFF             0
 #define PHY_CTRL_RESET_MSK             (0x1 << PHY_CTRL_RESET_OFF)
 #define SL_CFG                         (PORT_BASE + 0x84)
+#define AIP_LIMIT                      (PORT_BASE + 0x90)
 #define SL_CONTROL                     (PORT_BASE + 0x94)
 #define SL_CONTROL_NOTIFY_EN_OFF       0
 #define SL_CONTROL_NOTIFY_EN_MSK       (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -431,6 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
                         (u32)((1ULL << hisi_hba->queue_count) - 1));
        hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
        hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+       hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
        hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
        hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
        hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -441,7 +443,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
        hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
        if (pdev->revision >= 0x21)
-               hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
+               hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
        else
                hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
        hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
@@ -495,6 +497,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
 
                /* used for 12G negotiate */
                hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+               hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
        }
 
        for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1751,7 +1754,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
        }
 
 out:
-       hisi_sas_slot_task_free(hisi_hba, task, slot);
        sts = ts->stat;
        spin_lock_irqsave(&task->task_state_lock, flags);
        if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -1761,6 +1763,7 @@ out:
        }
        task->task_state_flags |= SAS_TASK_STATE_DONE;
        spin_unlock_irqrestore(&task->task_state_lock, flags);
+       hisi_sas_slot_task_free(hisi_hba, task, slot);
 
        if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
                spin_lock_irqsave(&device->done_lock, flags);
@@ -2098,7 +2101,6 @@ static struct scsi_host_template sht_v3_hw = {
        .scan_start             = hisi_sas_scan_start,
        .change_queue_depth     = sas_change_queue_depth,
        .bios_param             = sas_bios_param,
-       .can_queue              = 1,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
@@ -2108,6 +2110,7 @@ static struct scsi_host_template sht_v3_hw = {
        .target_destroy         = sas_target_destroy,
        .ioctl                  = sas_ioctl,
        .shost_attrs            = host_attrs,
+       .tag_alloc_policy       = BLK_TAG_ALLOC_RR,
 };
 
 static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2245,8 +2248,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        shost->max_channel = 1;
        shost->max_cmd_len = 16;
        shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
-       shost->can_queue = hisi_hba->hw->max_command_entries;
-       shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+       shost->can_queue = hisi_hba->hw->max_command_entries -
+               HISI_SAS_RESERVED_IPTT_CNT;
+       shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+               HISI_SAS_RESERVED_IPTT_CNT;
 
        sha->sas_ha_name = DRV_NAME;
        sha->dev = dev;
index c120929..c9cccf3 100644 (file)
@@ -2240,8 +2240,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
 
        chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
        chain_size = le32_to_cpu(cp->sg[0].length);
-       temp64 = pci_map_single(h->pdev, chain_block, chain_size,
-                               PCI_DMA_TODEVICE);
+       temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
+                               DMA_TO_DEVICE);
        if (dma_mapping_error(&h->pdev->dev, temp64)) {
                /* prevent subsequent unmapping */
                cp->sg->address = 0;
@@ -2261,7 +2261,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
        chain_sg = cp->sg;
        temp64 = le64_to_cpu(chain_sg->address);
        chain_size = le32_to_cpu(cp->sg[0].length);
-       pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
+       dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
 }
 
 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2277,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
        chain_len = sizeof(*chain_sg) *
                (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
        chain_sg->Len = cpu_to_le32(chain_len);
-       temp64 = pci_map_single(h->pdev, chain_block, chain_len,
-                               PCI_DMA_TODEVICE);
+       temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
+                               DMA_TO_DEVICE);
        if (dma_mapping_error(&h->pdev->dev, temp64)) {
                /* prevent subsequent unmapping */
                chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2297,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
                return;
 
        chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
-       pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
-                       le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
+       dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
+                       le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
 }
 
 
@@ -2759,13 +2759,13 @@ static void complete_scsi_command(struct CommandList *cp)
        return hpsa_cmd_free_and_done(h, cp, cmd);
 }
 
-static void hpsa_pci_unmap(struct pci_dev *pdev,
-       struct CommandList *c, int sg_used, int data_direction)
+static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
+               int sg_used, enum dma_data_direction data_direction)
 {
        int i;
 
        for (i = 0; i < sg_used; i++)
-               pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+               dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
                                le32_to_cpu(c->SG[i].Len),
                                data_direction);
 }
@@ -2774,17 +2774,17 @@ static int hpsa_map_one(struct pci_dev *pdev,
                struct CommandList *cp,
                unsigned char *buf,
                size_t buflen,
-               int data_direction)
+               enum dma_data_direction data_direction)
 {
        u64 addr64;
 
-       if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+       if (buflen == 0 || data_direction == DMA_NONE) {
                cp->Header.SGList = 0;
                cp->Header.SGTotal = cpu_to_le16(0);
                return 0;
        }
 
-       addr64 = pci_map_single(pdev, buf, buflen, data_direction);
+       addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
        if (dma_mapping_error(&pdev->dev, addr64)) {
                /* Prevent subsequent unmap of something never mapped */
                cp->Header.SGList = 0;
@@ -2845,7 +2845,8 @@ static u32 lockup_detected(struct ctlr_info *h)
 
 #define MAX_DRIVER_CMD_RETRIES 25
 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
-       struct CommandList *c, int data_direction, unsigned long timeout_msecs)
+               struct CommandList *c, enum dma_data_direction data_direction,
+               unsigned long timeout_msecs)
 {
        int backoff_time = 10, retry_count = 0;
        int rc;
@@ -2969,8 +2970,8 @@ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
                rc = -1;
                goto out;
        }
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-               PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if (rc)
                goto out;
        ei = c->err_info;
@@ -3022,8 +3023,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
                rc = -1;
                goto out;
        }
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-                                       PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if (rc)
                goto out;
        ei = c->err_info;
@@ -3306,8 +3307,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
                cmd_free(h, c);
                return -1;
        }
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-                                       PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if (rc)
                goto out;
        ei = c->err_info;
@@ -3349,8 +3350,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
        c->Request.CDB[2] = bmic_device_index & 0xff;
        c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-                               PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if (rc)
                goto out;
        ei = c->err_info;
@@ -3377,8 +3378,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
        if (rc)
                goto out;
 
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-               PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if (rc)
                goto out;
        ei = c->err_info;
@@ -3408,7 +3409,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
        c->Request.CDB[2] = bmic_device_index & 0xff;
        c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
 
-       hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+       hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
                                                NO_TIMEOUT);
        ei = c->err_info;
        if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3485,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
        else
                c->Request.CDB[5] = 0;
 
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
                                                NO_TIMEOUT);
        if (rc)
                goto out;
@@ -3731,8 +3732,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
        }
        if (extended_response)
                c->Request.CDB[1] = extended_response;
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-                                       PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if (rc)
                goto out;
        ei = c->err_info;
@@ -6320,8 +6321,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
 
        /* Fill in the scatter gather information */
        if (iocommand.buf_size > 0) {
-               temp64 = pci_map_single(h->pdev, buff,
-                       iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+               temp64 = dma_map_single(&h->pdev->dev, buff,
+                       iocommand.buf_size, DMA_BIDIRECTIONAL);
                if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
                        c->SG[0].Addr = cpu_to_le64(0);
                        c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6336,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
                                        NO_TIMEOUT);
        if (iocommand.buf_size > 0)
-               hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+               hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
        if (rc) {
                rc = -EIO;
@@ -6381,13 +6382,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
                return -EINVAL;
        if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
-       ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
-       if (!ioc) {
-               status = -ENOMEM;
-               goto cleanup1;
-       }
-       if (copy_from_user(ioc, argp, sizeof(*ioc))) {
-               status = -EFAULT;
+       ioc = vmemdup_user(argp, sizeof(*ioc));
+       if (IS_ERR(ioc)) {
+               status = PTR_ERR(ioc);
                goto cleanup1;
        }
        if ((ioc->buf_size < 1) &&
@@ -6447,14 +6444,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        if (ioc->buf_size > 0) {
                int i;
                for (i = 0; i < sg_used; i++) {
-                       temp64 = pci_map_single(h->pdev, buff[i],
-                                   buff_size[i], PCI_DMA_BIDIRECTIONAL);
+                       temp64 = dma_map_single(&h->pdev->dev, buff[i],
+                                   buff_size[i], DMA_BIDIRECTIONAL);
                        if (dma_mapping_error(&h->pdev->dev,
                                                        (dma_addr_t) temp64)) {
                                c->SG[i].Addr = cpu_to_le64(0);
                                c->SG[i].Len = cpu_to_le32(0);
                                hpsa_pci_unmap(h->pdev, c, i,
-                                       PCI_DMA_BIDIRECTIONAL);
+                                       DMA_BIDIRECTIONAL);
                                status = -ENOMEM;
                                goto cleanup0;
                        }
@@ -6467,7 +6464,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
        status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
                                                NO_TIMEOUT);
        if (sg_used)
-               hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+               hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
        check_ioctl_unit_attention(h, c);
        if (status) {
                status = -EIO;
@@ -6505,7 +6502,7 @@ cleanup1:
                kfree(buff);
        }
        kfree(buff_size);
-       kfree(ioc);
+       kvfree(ioc);
        return status;
 }
 
@@ -6579,7 +6576,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
        void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
        int cmd_type)
 {
-       int pci_dir = XFER_NONE;
+       enum dma_data_direction dir = DMA_NONE;
 
        c->cmd_type = CMD_IOCTL_PEND;
        c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6782,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
 
        switch (GET_DIR(c->Request.type_attr_dir)) {
        case XFER_READ:
-               pci_dir = PCI_DMA_FROMDEVICE;
+               dir = DMA_FROM_DEVICE;
                break;
        case XFER_WRITE:
-               pci_dir = PCI_DMA_TODEVICE;
+               dir = DMA_TO_DEVICE;
                break;
        case XFER_NONE:
-               pci_dir = PCI_DMA_NONE;
+               dir = DMA_NONE;
                break;
        default:
-               pci_dir = PCI_DMA_BIDIRECTIONAL;
+               dir = DMA_BIDIRECTIONAL;
        }
-       if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+       if (hpsa_map_one(h->pdev, c, buff, size, dir))
                return -1;
        return 0;
 }
@@ -6992,13 +6989,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
         * CCISS commands, so they must be allocated from the lower 4GiB of
         * memory.
         */
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (err) {
                iounmap(vaddr);
                return err;
        }
 
-       cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+       cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
        if (cmd == NULL) {
                iounmap(vaddr);
                return -ENOMEM;
@@ -7047,7 +7044,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
                return -ETIMEDOUT;
        }
 
-       pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+       dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
 
        if (tag & HPSA_ERROR_BIT) {
                dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7914,7 +7911,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
        kfree(h->cmd_pool_bits);
        h->cmd_pool_bits = NULL;
        if (h->cmd_pool) {
-               pci_free_consistent(h->pdev,
+               dma_free_coherent(&h->pdev->dev,
                                h->nr_cmds * sizeof(struct CommandList),
                                h->cmd_pool,
                                h->cmd_pool_dhandle);
@@ -7922,7 +7919,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
                h->cmd_pool_dhandle = 0;
        }
        if (h->errinfo_pool) {
-               pci_free_consistent(h->pdev,
+               dma_free_coherent(&h->pdev->dev,
                                h->nr_cmds * sizeof(struct ErrorInfo),
                                h->errinfo_pool,
                                h->errinfo_pool_dhandle);
@@ -7936,12 +7933,12 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
        h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
                                   sizeof(unsigned long),
                                   GFP_KERNEL);
-       h->cmd_pool = pci_alloc_consistent(h->pdev,
+       h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
                    h->nr_cmds * sizeof(*h->cmd_pool),
-                   &(h->cmd_pool_dhandle));
-       h->errinfo_pool = pci_alloc_consistent(h->pdev,
+                   &h->cmd_pool_dhandle, GFP_KERNEL);
+       h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
                    h->nr_cmds * sizeof(*h->errinfo_pool),
-                   &(h->errinfo_pool_dhandle));
+                   &h->errinfo_pool_dhandle, GFP_KERNEL);
        if ((h->cmd_pool_bits == NULL)
            || (h->cmd_pool == NULL)
            || (h->errinfo_pool == NULL)) {
@@ -8068,7 +8065,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
        for (i = 0; i < h->nreply_queues; i++) {
                if (!h->reply_queue[i].head)
                        continue;
-               pci_free_consistent(h->pdev,
+               dma_free_coherent(&h->pdev->dev,
                                        h->reply_queue_size,
                                        h->reply_queue[i].head,
                                        h->reply_queue[i].busaddr);
@@ -8594,11 +8591,11 @@ reinit_after_soft_reset:
        number_of_controllers++;
 
        /* configure PCI DMA stuff */
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        if (rc == 0) {
                dac = 1;
        } else {
-               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (rc == 0) {
                        dac = 0;
                } else {
@@ -8797,8 +8794,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
                RAID_CTLR_LUNID, TYPE_CMD)) {
                goto out;
        }
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-                                       PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+                       DEFAULT_TIMEOUT);
        if (rc)
                goto out;
        if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8830,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
                RAID_CTLR_LUNID, TYPE_CMD))
                goto errout;
 
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-               PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if ((rc != 0) || (c->err_info->CommandStatus != 0))
                goto errout;
 
@@ -8845,8 +8842,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
                RAID_CTLR_LUNID, TYPE_CMD))
                goto errout;
 
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-               PCI_DMA_TODEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+                       NO_TIMEOUT);
        if ((rc != 0)  || (c->err_info->CommandStatus != 0))
                goto errout;
 
@@ -8855,8 +8852,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
                RAID_CTLR_LUNID, TYPE_CMD))
                goto errout;
 
-       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-               PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+       rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+                       NO_TIMEOUT);
        if ((rc != 0)  || (c->err_info->CommandStatus != 0))
                goto errout;
 
@@ -9228,9 +9225,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
        BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
                        IOACCEL1_COMMANDLIST_ALIGNMENT);
        h->ioaccel_cmd_pool =
-               pci_alloc_consistent(h->pdev,
+               dma_alloc_coherent(&h->pdev->dev,
                        h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
-                       &(h->ioaccel_cmd_pool_dhandle));
+                       &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
 
        h->ioaccel1_blockFetchTable =
                kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9278,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
        BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
                        IOACCEL2_COMMANDLIST_ALIGNMENT);
        h->ioaccel2_cmd_pool =
-               pci_alloc_consistent(h->pdev,
+               dma_alloc_coherent(&h->pdev->dev,
                        h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
-                       &(h->ioaccel2_cmd_pool_dhandle));
+                       &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
 
        h->ioaccel2_blockFetchTable =
                kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9353,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
        h->reply_queue_size = h->max_commands * sizeof(u64);
 
        for (i = 0; i < h->nreply_queues; i++) {
-               h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+               h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
                                                h->reply_queue_size,
-                                               &(h->reply_queue[i].busaddr));
+                                               &h->reply_queue[i].busaddr,
+                                               GFP_KERNEL);
                if (!h->reply_queue[i].head) {
                        rc = -ENOMEM;
                        goto clean1;    /* rq, ioaccel */
index f42a619..e63aadd 100644 (file)
@@ -2266,7 +2266,6 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
        /*
         * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
         */
-       target_wait_for_sess_cmds(se_sess);
        target_remove_session(se_sess);
        tport->ibmv_nexus = NULL;
        kfree(nexus);
index bd6ac6b..ee8a1ec 100644 (file)
@@ -208,7 +208,7 @@ module_param(ips, charp, 0);
 
 #define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
                          DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
-                         PCI_DMA_BIDIRECTIONAL : \
+                         DMA_BIDIRECTIONAL : \
                          scb->scsi_cmd->sc_data_direction)
 
 #ifdef IPS_DEBUG
@@ -1529,11 +1529,12 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
        if (ha->ioctl_data && length <= ha->ioctl_len)
                return 0;
        /* there is no buffer or it's not big enough, allocate a new one */
-       bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
+       bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
+                       GFP_KERNEL);
        if (bigger_buf) {
                /* free the old memory */
-               pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
-                                   ha->ioctl_busaddr);
+               dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
+                                 ha->ioctl_data, ha->ioctl_busaddr);
                /* use the new memory */
                ha->ioctl_data = (char *) bigger_buf;
                ha->ioctl_len = length;
@@ -1678,9 +1679,8 @@ ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
                } else if (!ha->flash_data) {
                        datasize = pt->CoppCP.cmd.flashfw.total_packets *
                            pt->CoppCP.cmd.flashfw.count;
-                       ha->flash_data = pci_alloc_consistent(ha->pcidev,
-                                                             datasize,
-                                                             &ha->flash_busaddr);
+                       ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
+                                       datasize, &ha->flash_busaddr, GFP_KERNEL);
                        if (!ha->flash_data){
                                printk(KERN_WARNING "Unable to allocate a flash buffer\n");
                                return IPS_FAILURE;
@@ -1858,7 +1858,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
 
        scb->data_len = ha->flash_datasize;
        scb->data_busaddr =
-           pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
+           dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
                           IPS_DMA_DIR(scb));
        scb->flags |= IPS_SCB_MAP_SINGLE;
        scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1880,8 @@ ips_free_flash_copperhead(ips_ha_t * ha)
        if (ha->flash_data == ips_FlashData)
                test_and_clear_bit(0, &ips_FlashDataInUse);
        else if (ha->flash_data)
-               pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
-                                   ha->flash_busaddr);
+               dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
+                                 ha->flash_data, ha->flash_busaddr);
        ha->flash_data = NULL;
 }
 
@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
 
                case START_STOP:
                        scb->scsi_cmd->result = DID_OK << 16;
+                       break;
 
                case TEST_UNIT_READY:
                case INQUIRY:
@@ -4212,7 +4213,7 @@ ips_free(ips_ha_t * ha)
 
        if (ha) {
                if (ha->enq) {
-                       pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
+                       dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
                                            ha->enq, ha->enq_busaddr);
                        ha->enq = NULL;
                }
@@ -4221,7 +4222,7 @@ ips_free(ips_ha_t * ha)
                ha->conf = NULL;
 
                if (ha->adapt) {
-                       pci_free_consistent(ha->pcidev,
+                       dma_free_coherent(&ha->pcidev->dev,
                                            sizeof (IPS_ADAPTER) +
                                            sizeof (IPS_IO_CMD), ha->adapt,
                                            ha->adapt->hw_status_start);
@@ -4229,7 +4230,7 @@ ips_free(ips_ha_t * ha)
                }
 
                if (ha->logical_drive_info) {
-                       pci_free_consistent(ha->pcidev,
+                       dma_free_coherent(&ha->pcidev->dev,
                                            sizeof (IPS_LD_INFO),
                                            ha->logical_drive_info,
                                            ha->logical_drive_info_dma_addr);
@@ -4243,7 +4244,7 @@ ips_free(ips_ha_t * ha)
                ha->subsys = NULL;
 
                if (ha->ioctl_data) {
-                       pci_free_consistent(ha->pcidev, ha->ioctl_len,
+                       dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
                                            ha->ioctl_data, ha->ioctl_busaddr);
                        ha->ioctl_data = NULL;
                        ha->ioctl_datasize = 0;
@@ -4276,11 +4277,11 @@ static int
 ips_deallocatescbs(ips_ha_t * ha, int cmds)
 {
        if (ha->scbs) {
-               pci_free_consistent(ha->pcidev,
+               dma_free_coherent(&ha->pcidev->dev,
                                    IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
                                    ha->scbs->sg_list.list,
                                    ha->scbs->sg_busaddr);
-               pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
+               dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
                                    ha->scbs, ha->scbs->scb_busaddr);
                ha->scbs = NULL;
        }                       /* end if */
@@ -4307,17 +4308,16 @@ ips_allocatescbs(ips_ha_t * ha)
        METHOD_TRACE("ips_allocatescbs", 1);
 
        /* Allocate memory for the SCBs */
-       ha->scbs =
-           pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
-                                &command_dma);
+       ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
+                       ha->max_cmds * sizeof (ips_scb_t),
+                       &command_dma, GFP_KERNEL);
        if (ha->scbs == NULL)
                return 0;
-       ips_sg.list =
-           pci_alloc_consistent(ha->pcidev,
-                                IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
-                                ha->max_cmds, &sg_dma);
+       ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
+                       IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
+                       &sg_dma, GFP_KERNEL);
        if (ips_sg.list == NULL) {
-               pci_free_consistent(ha->pcidev,
+               dma_free_coherent(&ha->pcidev->dev,
                                    ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
                                    command_dma);
                return 0;
@@ -4446,8 +4446,8 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
        if (scb->flags & IPS_SCB_MAP_SG)
                 scsi_dma_unmap(scb->scsi_cmd);
        else if (scb->flags & IPS_SCB_MAP_SINGLE)
-               pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
-                                IPS_DMA_DIR(scb));
+               dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
+                                scb->data_len, IPS_DMA_DIR(scb));
 
        /* check to make sure this is not our "special" scb */
        if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4559,8 @@ ips_flush_and_reset(ips_ha_t *ha)
        dma_addr_t command_dma;
 
        /* Create a usuable SCB */
-       scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+       scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
+                       &command_dma, GFP_KERNEL);
        if (scb) {
            memset(scb, 0, sizeof(ips_scb_t));
            ips_init_scb(ha, scb);
@@ -4594,7 +4595,7 @@ ips_flush_and_reset(ips_ha_t *ha)
        /* Now RESET and INIT the adapter */
        (*ha->func.reset) (ha);
 
-       pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+       dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
        return;
 }
 
@@ -6926,29 +6927,30 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
         * are guaranteed to be < 4G.
         */
        if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
-           !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
+           !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
                (ha)->flags |= IPS_HA_ENH_SG;
        } else {
-               if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
+               if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
                        printk(KERN_WARNING "Unable to set DMA Mask\n");
                        return ips_abort_init(ha, index);
                }
        }
        if(ips_cd_boot && !ips_FlashData){
-               ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
-                                                    &ips_flashbusaddr);
+               ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
+                               PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
        }
 
-       ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
-                                      &ha->enq_busaddr);
+       ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
+                       &ha->enq_busaddr, GFP_KERNEL);
        if (!ha->enq) {
                IPS_PRINTK(KERN_WARNING, pci_dev,
                           "Unable to allocate host inquiry structure\n");
                return ips_abort_init(ha, index);
        }
 
-       ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
-                                        sizeof (IPS_IO_CMD), &dma_address);
+       ha->adapt = dma_alloc_coherent(&pci_dev->dev,
+                       sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
+                       &dma_address, GFP_KERNEL);
        if (!ha->adapt) {
                IPS_PRINTK(KERN_WARNING, pci_dev,
                           "Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6961,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
 
 
 
-       ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
+       ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
+                       sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
        if (!ha->logical_drive_info) {
                IPS_PRINTK(KERN_WARNING, pci_dev,
                           "Unable to allocate logical drive info structure\n");
@@ -6997,8 +7000,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
        if (ips_ioctlsize < PAGE_SIZE)
                ips_ioctlsize = PAGE_SIZE;
 
-       ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
-                                             &ha->ioctl_busaddr);
+       ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
+                       &ha->ioctl_busaddr, GFP_KERNEL);
        ha->ioctl_len = ips_ioctlsize;
        if (!ha->ioctl_data) {
                IPS_PRINTK(KERN_WARNING, pci_dev,
index 1ee3868..7b5deae 100644 (file)
@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
  *    the task management request.
  * @task_request: the handle to the task request object to start.
  */
-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
-                                              struct isci_remote_device *idev,
-                                              struct isci_request *ireq)
+enum sci_status sci_controller_start_task(struct isci_host *ihost,
+                                         struct isci_remote_device *idev,
+                                         struct isci_request *ireq)
 {
        enum sci_status status;
 
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
                         "%s: SCIC Controller starting task from invalid "
                         "state\n",
                         __func__);
-               return SCI_TASK_FAILURE_INVALID_STATE;
+               return SCI_FAILURE_INVALID_STATE;
        }
 
        status = sci_remote_device_start_task(ihost, idev, ireq);
index b353992..6bc3f02 100644 (file)
@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
        struct isci_remote_device *idev,
        struct isci_request *ireq);
 
-enum sci_task_status sci_controller_start_task(
+enum sci_status sci_controller_start_task(
        struct isci_host *ihost,
        struct isci_remote_device *idev,
        struct isci_request *ireq);
index ed197bc..2f15170 100644 (file)
@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
 
        if (status == SCI_SUCCESS) {
                if (ireq->stp.rsp.status & ATA_ERR)
-                       status = SCI_IO_FAILURE_RESPONSE_VALID;
+                       status = SCI_FAILURE_IO_RESPONSE_VALID;
        } else {
-               status = SCI_IO_FAILURE_RESPONSE_VALID;
+               status = SCI_FAILURE_IO_RESPONSE_VALID;
        }
 
        if (status != SCI_SUCCESS) {
index 6dcaed0..fb6eba3 100644 (file)
@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
                                 struct isci_tmf *tmf, unsigned long timeout_ms)
 {
        DECLARE_COMPLETION_ONSTACK(completion);
-       enum sci_task_status status = SCI_TASK_FAILURE;
+       enum sci_status status = SCI_FAILURE;
        struct isci_request *ireq;
        int ret = TMF_RESP_FUNC_FAILED;
        unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
        /* start the TMF io. */
        status = sci_controller_start_task(ihost, idev, ireq);
 
-       if (status != SCI_TASK_SUCCESS) {
+       if (status != SCI_SUCCESS) {
                dev_dbg(&ihost->pdev->dev,
                         "%s: start_io failed - status = 0x%x, request = %p\n",
                         __func__,
index b025a0b..23354f2 100644 (file)
@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
                        return rc;
 
                return iscsi_conn_get_addr_param((struct sockaddr_storage *)
-                                                &addr, param, buf);
+                                                &addr,
+                                                (enum iscsi_param)param, buf);
        default:
                return iscsi_host_get_param(shost, param, buf);
        }
index 6eb5ff3..1ad2826 100644 (file)
@@ -38,30 +38,6 @@ static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
        return *(volatile u8 *)(esp->regs + reg);
 }
 
-static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
-                                     size_t sz, int dir)
-{
-       return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-                                 int num_sg, int dir)
-{
-       return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-                                 size_t sz, int dir)
-{
-       dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-                             int num_sg, int dir)
-{
-       dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int jazz_esp_irq_pending(struct esp *esp)
 {
        if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +93,6 @@ static int jazz_esp_dma_error(struct esp *esp)
 static const struct esp_driver_ops jazz_esp_ops = {
        .esp_write8     =       jazz_esp_write8,
        .esp_read8      =       jazz_esp_read8,
-       .map_single     =       jazz_esp_map_single,
-       .map_sg         =       jazz_esp_map_sg,
-       .unmap_single   =       jazz_esp_unmap_single,
-       .unmap_sg       =       jazz_esp_unmap_sg,
        .irq_pending    =       jazz_esp_irq_pending,
        .reset_dma      =       jazz_esp_reset_dma,
        .dma_drain      =       jazz_esp_dma_drain,
@@ -182,7 +154,7 @@ static int esp_jazz_probe(struct platform_device *dev)
 
        dev_set_drvdata(&dev->dev, esp);
 
-       err = scsi_esp_register(esp, &dev->dev);
+       err = scsi_esp_register(esp);
        if (err)
                goto fail_free_irq;
 
index 4fae253..b1bd283 100644 (file)
@@ -1872,7 +1872,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
        struct fc_lport *lport = shost_priv(shost);
        struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
        struct fc_fcp_pkt *fsp;
-       struct fc_rport_libfc_priv *rpriv;
        int rval;
        int rc = 0;
        struct fc_stats *stats;
@@ -1894,8 +1893,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
                goto out;
        }
 
-       rpriv = rport->dd_data;
-
        if (!fc_fcp_lport_queue_ready(lport)) {
                if (lport->qfull) {
                        if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2292,7 @@ int fc_setup_fcp(void)
 
 void fc_destroy_fcp(void)
 {
-       if (scsi_pkt_cachep)
-               kmem_cache_destroy(scsi_pkt_cachep);
+       kmem_cache_destroy(scsi_pkt_cachep);
 }
 
 /**
index 372387a..1e1c0f1 100644 (file)
@@ -1038,8 +1038,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
                struct fc_els_ls_rjt *rjt;
 
                rjt = fc_frame_payload_get(fp, sizeof(*rjt));
-               FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
-                            rjt->er_reason, rjt->er_explan);
+               if (!rjt)
+                       FC_RPORT_DBG(rdata, "PLOGI bad response\n");
+               else
+                       FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+                                    rjt->er_reason, rjt->er_explan);
                fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
        }
 out:
@@ -1158,8 +1161,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
        op = fc_frame_payload_op(fp);
        if (op == ELS_LS_ACC) {
                pp = fc_frame_payload_get(fp, sizeof(*pp));
-               if (!pp)
+               if (!pp) {
+                       fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
                        goto out;
+               }
 
                resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
                FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1177,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
                                fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
                        goto out;
                }
-               if (pp->prli.prli_spp_len < sizeof(pp->spp))
+               if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
+                       fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
                        goto out;
+               }
 
                fcp_parm = ntohl(pp->spp.spp_params);
                if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1218,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
 
        } else {
                rjt = fc_frame_payload_get(fp, sizeof(*rjt));
-               FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
-                            rjt->er_reason, rjt->er_explan);
+               if (!rjt)
+                       FC_RPORT_DBG(rdata, "PRLI bad response\n");
+               else
+                       FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+                                    rjt->er_reason, rjt->er_explan);
                fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
        }
 
index 64a958a..4f6cdf5 100644 (file)
@@ -654,7 +654,7 @@ void sas_probe_sata(struct asd_sas_port *port)
                /* if libata could not bring the link up, don't surface
                 * the device
                 */
-               if (ata_dev_disabled(sas_to_ata_dev(dev)))
+               if (!ata_dev_enabled(sas_to_ata_dev(dev)))
                        sas_fail_probe(dev, __func__, -ENODEV);
        }
 
index 0148ae6..dde433a 100644 (file)
@@ -260,7 +260,7 @@ static void sas_suspend_devices(struct work_struct *work)
         * phy_list is not being mutated
         */
        list_for_each_entry(phy, &port->phy_list, port_phy_el) {
-               if (si->dft->lldd_port_formed)
+               if (si->dft->lldd_port_deformed)
                        si->dft->lldd_port_deformed(phy);
                phy->suspended = 1;
                port->suspended = 1;
index fadc99c..0d1f727 100644 (file)
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
        unsigned long flags;
 
        spin_lock_irqsave(&task->task_state_lock, flags);
-       if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
                task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+               complete(&task->slow_task->completion);
+       }
        spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-       complete(&task->slow_task->completion);
 }
 
 static void smp_task_done(struct sas_task *task)
 {
-       if (!del_timer(&task->slow_task->timer))
-               return;
+       del_timer(&task->slow_task->timer);
        complete(&task->slow_task->completion);
 }
 
@@ -2054,14 +2053,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
                return res;
        }
 
-       /* delete the old link */
-       if (SAS_ADDR(phy->attached_sas_addr) &&
-           SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
-               SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
-                           SAS_ADDR(dev->sas_addr), phy_id,
-                           SAS_ADDR(phy->attached_sas_addr));
-               sas_unregister_devs_sas_addr(dev, phy_id, last);
-       }
+       /* we always have to delete the old device when we went here */
+       SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+                   SAS_ADDR(dev->sas_addr), phy_id,
+                   SAS_ADDR(phy->attached_sas_addr));
+       sas_unregister_devs_sas_addr(dev, phy_id, last);
 
        return sas_discover_new(dev, phy_id);
 }
index 43732e8..c1eb2b0 100644 (file)
@@ -52,7 +52,7 @@ struct lpfc_sli2_slim;
                downloads using bsg */
 
 #define LPFC_MIN_SG_SLI4_BUF_SZ        0x800   /* based on LPFC_DEFAULT_SG_SEG_CNT */
-#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
 #define LPFC_MAX_SG_SEG_CNT_DIF 512    /* sg element count per scsi cmnd  */
 #define LPFC_MAX_SG_SEG_CNT    4096    /* sg element count per scsi cmnd */
 #define LPFC_MIN_SG_SEG_CNT    32      /* sg element count per scsi cmnd */
@@ -583,6 +583,25 @@ struct lpfc_mbox_ext_buf_ctx {
        struct list_head ext_dmabuf_list;
 };
 
+struct lpfc_ras_fwlog {
+       uint8_t *fwlog_buff;
+       uint32_t fw_buffcount; /* Buffer size posted to FW */
+#define LPFC_RAS_BUFF_ENTERIES  16      /* Each entry can hold max of 64k */
+#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
+#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
+#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
+       uint32_t fw_loglevel; /* Log level set */
+       struct lpfc_dmabuf lwpd;
+       struct list_head fwlog_buff_list;
+
+       /* RAS support status on adapter */
+       bool ras_hwsupport; /* RAS Support available on HW or not */
+       bool ras_enabled;   /* Ras Enabled for the function */
+#define LPFC_RAS_DISABLE_LOGGING 0x00
+#define LPFC_RAS_ENABLE_LOGGING 0x01
+       bool ras_active;    /* RAS logging running state */
+};
+
 struct lpfc_hba {
        /* SCSI interface function jump table entries */
        int (*lpfc_new_scsi_buf)
@@ -790,6 +809,7 @@ struct lpfc_hba {
        uint32_t cfg_total_seg_cnt;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_nvme_seg_cnt;
+       uint32_t cfg_scsi_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
        uint64_t cfg_soft_wwnn;
        uint64_t cfg_soft_wwpn;
@@ -833,6 +853,9 @@ struct lpfc_hba {
 #define LPFC_FDMI_SUPPORT      1       /* FDMI supported? */
        uint32_t cfg_enable_SmartSAN;
        uint32_t cfg_enable_mds_diags;
+       uint32_t cfg_ras_fwlog_level;
+       uint32_t cfg_ras_fwlog_buffsize;
+       uint32_t cfg_ras_fwlog_func;
        uint32_t cfg_enable_fc4_type;
        uint32_t cfg_enable_bbcr;       /* Enable BB Credit Recovery */
        uint32_t cfg_enable_dpp;        /* Enable Direct Packet Push */
@@ -963,6 +986,7 @@ struct lpfc_hba {
        uint32_t intr_mode;
 #define LPFC_INTR_ERROR        0xFFFFFFFF
        struct list_head port_list;
+       spinlock_t port_list_lock;      /* lock for port_list mutations */
        struct lpfc_vport *pport;       /* physical lpfc_vport pointer */
        uint16_t max_vpi;               /* Maximum virtual nports */
 #define LPFC_MAX_VPI 0xFFFF            /* Max number of VPI supported */
@@ -1092,6 +1116,9 @@ struct lpfc_hba {
        struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
        uint32_t ctx_idx;
 
+       /* RAS Support */
+       struct lpfc_ras_fwlog ras_fwlog;
+
        uint8_t menlo_flag;     /* menlo generic flags */
 #define HBA_MENLO_SUPPORT      0x1 /* HBA supports menlo commands */
        uint32_t iocb_cnt;
index 1a6ed9b..dda7f45 100644 (file)
@@ -5358,15 +5358,74 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
 
 /*
  * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 4096. The default value is
- * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
- * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * This value can be set to values between 64 and 4096. The default value
+ * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
+ * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
  * Because of the additional overhead involved in setting up T10-DIF,
  * this parameter will be limited to 128 if BlockGuard is enabled under SLI4
  * and will be limited to 512 if BlockGuard is enabled under SLI3.
  */
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
-           LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+module_param(lpfc_sg_seg_cnt, uint, 0444);
+MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
+
+/**
+ * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
+ *    configured for the adapter
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
+                    char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+       struct lpfc_hba   *phba = vport->phba;
+       int len;
+
+       len = snprintf(buf, PAGE_SIZE, "SGL sz: %d  total SGEs: %d\n",
+                      phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
+
+       len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d  SCSI: %d  NVME: %d\n",
+                       phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
+                       phba->cfg_nvme_seg_cnt);
+       return len;
+}
+
+static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
+
+/**
+ * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
+{
+       if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
+               phba->cfg_sg_seg_cnt = val;
+               return 0;
+       }
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
+                       "be set to %d, allowed range is [%d, %d]\n",
+                       val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
+       phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+       return -EINVAL;
+}
 
 /*
  * lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5376,6 +5435,31 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
  */
 LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
 
+/*
+ * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
+ *     0 = Disable firmware logging (default)
+ *     [1-4] = Multiple of 1/4th Mb of host memory for FW logging
+ * Value range [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+
+/*
+ * lpfc_ras_fwlog_level: Firmware logging verbosity level
+ * Valid only if firmware logging is enabled
+ * 0(Least Verbosity) 4 (most verbosity)
+ * Value range is [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
+
+/*
+ * lpfc_ras_fwlog_func: Firmware logging enabled on function number
+ * Default function which has RAS support : 0
+ * Value Range is [0..7].
+ * FW logging is a global action and enablement is via a specific
+ * port.
+ */
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
+
 /*
  * lpfc_enable_bbcr: Enable BB Credit Recovery
  *       0  = BB Credit Recovery disabled
@@ -5501,6 +5585,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_protocol,
        &dev_attr_lpfc_xlane_supported,
        &dev_attr_lpfc_enable_mds_diags,
+       &dev_attr_lpfc_ras_fwlog_buffsize,
+       &dev_attr_lpfc_ras_fwlog_level,
+       &dev_attr_lpfc_ras_fwlog_func,
        &dev_attr_lpfc_enable_bbcr,
        &dev_attr_lpfc_enable_dpp,
        NULL,
@@ -6587,6 +6674,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_sli_mode_init(phba, lpfc_sli_mode);
        phba->cfg_enable_dss = 1;
        lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
+       lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
+       lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
+       lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
+
+
+       /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+        * accommodate 512K and 1M IOs in a single nvme buf and supply
+        * enough NVME LS iocb buffers for larger connectivity counts.
+        */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+               phba->cfg_iocb_cnt = 5;
+       }
+
        return;
 }
 
index 90745fe..7bd7ae8 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/delay.h>
 #include <linux/list.h>
 #include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -2843,9 +2844,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
 
                if (nocopydata) {
                        bpl->tus.f.bdeFlags = 0;
-                       pci_dma_sync_single_for_device(phba->pcidev,
-                               dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
-
                } else {
                        memset((uint8_t *)dmp->dma.virt, 0, cnt);
                        bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -5308,6 +5306,330 @@ job_error:
        return rc;
 }
 
+/**
+ * lpfc_check_fwlog_support: Check FW log support on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if FW Logging support by the adapter
+ **/
+int
+lpfc_check_fwlog_support(struct lpfc_hba *phba)
+{
+       struct lpfc_ras_fwlog *ras_fwlog = NULL;
+
+       ras_fwlog = &phba->ras_fwlog;
+
+       if (ras_fwlog->ras_hwsupport == false)
+               return -EACCES;
+       else if (ras_fwlog->ras_enabled == false)
+               return -EPERM;
+       else
+               return 0;
+}
+
+/**
+ * lpfc_bsg_get_ras_config: Get RAS configuration settings
+ * @job: fc_bsg_job to handle
+ *
+ * Get RAS configuration values set.
+ **/
+static int
+lpfc_bsg_get_ras_config(struct bsg_job *job)
+{
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct lpfc_vport *vport = shost_priv(shost);
+       struct fc_bsg_reply *bsg_reply = job->reply;
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_bsg_get_ras_config_reply *ras_reply;
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+       int rc = 0;
+
+       if (job->request_len <
+           sizeof(struct fc_bsg_request) +
+           sizeof(struct lpfc_bsg_ras_req)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "6181 Received RAS_LOG request "
+                               "below minimum size\n");
+               rc = -EINVAL;
+               goto ras_job_error;
+       }
+
+       /* Check FW log status */
+       rc = lpfc_check_fwlog_support(phba);
+       if (rc == -EACCES || rc == -EPERM)
+               goto ras_job_error;
+
+       ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
+               bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+       /* Current logging state */
+       if (ras_fwlog->ras_active == true)
+               ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+       else
+               ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+
+       ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+       ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+
+ras_job_error:
+       /* make error code available to userspace */
+       bsg_reply->result = rc;
+
+       /* complete the job back to userspace */
+       bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+       return rc;
+}
+
+/**
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Disable FW logging into host memory on the adapter. To
+ * be done before reading logs from the host memory.
+ **/
+static void
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+{
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+       ras_fwlog->ras_active = false;
+
+       /* Disable FW logging to host memory */
+       writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+              phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+}
+
+/**
+ * lpfc_bsg_set_ras_config: Set FW logging parameters
+ * @job: fc_bsg_job to handle
+ *
+ * Set log-level parameters for FW-logging in host memory
+ **/
+static int
+lpfc_bsg_set_ras_config(struct bsg_job *job)
+{
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct lpfc_vport *vport = shost_priv(shost);
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_bsg_set_ras_config_req *ras_req;
+       struct fc_bsg_request *bsg_request = job->request;
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+       struct fc_bsg_reply *bsg_reply = job->reply;
+       uint8_t action = 0, log_level = 0;
+       int rc = 0;
+
+       if (job->request_len <
+           sizeof(struct fc_bsg_request) +
+           sizeof(struct lpfc_bsg_set_ras_config_req)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "6182 Received RAS_LOG request "
+                               "below minimum size\n");
+               rc = -EINVAL;
+               goto ras_job_error;
+       }
+
+       /* Check FW log status */
+       rc = lpfc_check_fwlog_support(phba);
+       if (rc == -EACCES || rc == -EPERM)
+               goto ras_job_error;
+
+       ras_req = (struct lpfc_bsg_set_ras_config_req *)
+               bsg_request->rqst_data.h_vendor.vendor_cmd;
+       action = ras_req->action;
+       log_level = ras_req->log_level;
+
+       if (action == LPFC_RASACTION_STOP_LOGGING) {
+               /* Check if already disabled */
+               if (ras_fwlog->ras_active == false) {
+                       rc = -ESRCH;
+                       goto ras_job_error;
+               }
+
+               /* Disable logging */
+               lpfc_ras_stop_fwlog(phba);
+       } else {
+               /*action = LPFC_RASACTION_START_LOGGING*/
+               if (ras_fwlog->ras_active == true) {
+                       rc = -EINPROGRESS;
+                       goto ras_job_error;
+               }
+
+               /* Enable logging */
+               rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+                                             LPFC_RAS_ENABLE_LOGGING);
+               if (rc)
+                       rc = -EINVAL;
+       }
+ras_job_error:
+       /* make error code available to userspace */
+       bsg_reply->result = rc;
+
+       /* complete the job back to userspace */
+       bsg_job_done(job, bsg_reply->result,
+                      bsg_reply->reply_payload_rcv_len);
+
+       return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_lwpd: Get log write position data
+ * @job: fc_bsg_job to handle
+ *
+ * Get Offset/Wrap count of the log message written
+ * in host memory
+ **/
+static int
+lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
+{
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct lpfc_vport *vport = shost_priv(shost);
+       struct lpfc_bsg_get_ras_lwpd *ras_reply;
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+       struct fc_bsg_reply *bsg_reply = job->reply;
+       uint32_t lwpd_offset = 0;
+       uint64_t wrap_value = 0;
+       int rc = 0;
+
+       rc = lpfc_check_fwlog_support(phba);
+       if (rc == -EACCES || rc == -EPERM)
+               goto ras_job_error;
+
+       if (job->request_len <
+           sizeof(struct fc_bsg_request) +
+           sizeof(struct lpfc_bsg_ras_req)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "6183 Received RAS_LOG request "
+                               "below minimum size\n");
+               rc = -EINVAL;
+               goto ras_job_error;
+       }
+
+       ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
+               bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+       lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
+       ras_reply->offset = be32_to_cpu(lwpd_offset);
+
+       wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
+       ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
+
+ras_job_error:
+       /* make error code available to userspace */
+       bsg_reply->result = rc;
+
+       /* complete the job back to userspace */
+       bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+       return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_fwlog: Read FW log
+ * @job: fc_bsg_job to handle
+ *
+ * Copy the FW log into the passed buffer.
+ **/
+static int
+lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
+{
+       struct Scsi_Host *shost = fc_bsg_to_shost(job);
+       struct lpfc_vport *vport = shost_priv(shost);
+       struct lpfc_hba *phba = vport->phba;
+       struct fc_bsg_request *bsg_request = job->request;
+       struct fc_bsg_reply *bsg_reply = job->reply;
+       struct lpfc_bsg_get_fwlog_req *ras_req;
+       uint32_t rd_offset, rd_index, offset, pending_wlen;
+       uint32_t boundary = 0, align_len = 0, write_len = 0;
+       void *dest, *src, *fwlog_buff;
+       struct lpfc_ras_fwlog *ras_fwlog = NULL;
+       struct lpfc_dmabuf *dmabuf, *next;
+       int rc = 0;
+
+       ras_fwlog = &phba->ras_fwlog;
+
+       rc = lpfc_check_fwlog_support(phba);
+       if (rc == -EACCES || rc == -EPERM)
+               goto ras_job_error;
+
+       /* Logging to be stopped before reading */
+       if (ras_fwlog->ras_active == true) {
+               rc = -EINPROGRESS;
+               goto ras_job_error;
+       }
+
+       if (job->request_len <
+           sizeof(struct fc_bsg_request) +
+           sizeof(struct lpfc_bsg_get_fwlog_req)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+                               "6184 Received RAS_LOG request "
+                               "below minimum size\n");
+               rc = -EINVAL;
+               goto ras_job_error;
+       }
+
+       ras_req = (struct lpfc_bsg_get_fwlog_req *)
+               bsg_request->rqst_data.h_vendor.vendor_cmd;
+       rd_offset = ras_req->read_offset;
+
+       /* Allocate memory to read fw log*/
+       fwlog_buff = vmalloc(ras_req->read_size);
+       if (!fwlog_buff) {
+               rc = -ENOMEM;
+               goto ras_job_error;
+       }
+
+       rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
+       offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
+       pending_wlen = ras_req->read_size;
+       dest = fwlog_buff;
+
+       list_for_each_entry_safe(dmabuf, next,
+                             &ras_fwlog->fwlog_buff_list, list) {
+
+               if (dmabuf->buffer_tag < rd_index)
+                       continue;
+
+               /* Align read to buffer size */
+               if (offset) {
+                       boundary = ((dmabuf->buffer_tag + 1) *
+                                   LPFC_RAS_MAX_ENTRY_SIZE);
+
+                       align_len = (boundary - offset);
+                       write_len = min_t(u32, align_len,
+                                         LPFC_RAS_MAX_ENTRY_SIZE);
+               } else {
+                       write_len = min_t(u32, pending_wlen,
+                                         LPFC_RAS_MAX_ENTRY_SIZE);
+                       align_len = 0;
+                       boundary = 0;
+               }
+               src = dmabuf->virt + offset;
+               memcpy(dest, src, write_len);
+
+               pending_wlen -= write_len;
+               if (!pending_wlen)
+                       break;
+
+               dest += write_len;
+               offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
+       }
+
+       bsg_reply->reply_payload_rcv_len =
+               sg_copy_from_buffer(job->reply_payload.sg_list,
+                                   job->reply_payload.sg_cnt,
+                                   fwlog_buff, ras_req->read_size);
+
+       vfree(fwlog_buff);
+
+ras_job_error:
+       bsg_reply->result = rc;
+       bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+       return rc;
+}
+
+
 /**
  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  * @job: fc_bsg_job to handle
@@ -5355,6 +5677,18 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
        case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
                rc = lpfc_forced_link_speed(job);
                break;
+       case LPFC_BSG_VENDOR_RAS_GET_LWPD:
+               rc = lpfc_bsg_get_ras_lwpd(job);
+               break;
+       case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
+               rc = lpfc_bsg_get_ras_fwlog(job);
+               break;
+       case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
+               rc = lpfc_bsg_get_ras_config(job);
+               break;
+       case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
+               rc = lpfc_bsg_set_ras_config(job);
+               break;
        default:
                rc = -EINVAL;
                bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5702,7 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
 
 /**
  * lpfc_bsg_request - handle a bsg request from the FC transport
- * @job: fc_bsg_job to handle
+ * @job: bsg_job to handle
  **/
 int
 lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5736,7 @@ lpfc_bsg_request(struct bsg_job *job)
 
 /**
  * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
- * @job: fc_bsg_job that has timed out
+ * @job: bsg_job that has timed out
  *
  * This function just aborts the job's IOCB.  The aborted IOCB will return to
  * the waiting function which will handle passing the error back to userspace
index 32347c8..820323f 100644 (file)
 #define LPFC_BSG_VENDOR_DIAG_MODE_END          10
 #define LPFC_BSG_VENDOR_LINK_DIAG_TEST         11
 #define LPFC_BSG_VENDOR_FORCED_LINK_SPEED      14
+#define LPFC_BSG_VENDOR_RAS_GET_LWPD           16
+#define LPFC_BSG_VENDOR_RAS_GET_FWLOG          17
+#define LPFC_BSG_VENDOR_RAS_GET_CONFIG         18
+#define LPFC_BSG_VENDOR_RAS_SET_CONFIG         19
 
 struct set_ct_event {
        uint32_t command;
@@ -296,6 +300,38 @@ struct forced_link_speed_support_reply {
        uint8_t supported;
 };
 
+struct lpfc_bsg_ras_req {
+       uint32_t command;
+};
+
+struct lpfc_bsg_get_fwlog_req {
+       uint32_t command;
+       uint32_t read_size;
+       uint32_t read_offset;
+};
+
+struct lpfc_bsg_get_ras_lwpd {
+       uint32_t offset;
+       uint32_t wrap_count;
+};
+
+struct lpfc_bsg_set_ras_config_req {
+       uint32_t command;
+       uint8_t action;
+#define LPFC_RASACTION_STOP_LOGGING     0x00
+#define LPFC_RASACTION_START_LOGGING    0x01
+       uint8_t log_level;
+};
+
+struct lpfc_bsg_get_ras_config_reply {
+       uint8_t state;
+#define LPFC_RASLOG_STATE_STOPPED 0x00
+#define LPFC_RASLOG_STATE_RUNNING 0x01
+       uint8_t log_level;
+       uint32_t log_buff_sz;
+};
+
+
 /* driver only */
 #define SLI_CONFIG_NOT_HANDLED         0
 #define SLI_CONFIG_HANDLED             1
index bea24bc..e011365 100644 (file)
@@ -545,6 +545,13 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
 int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
 void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
 
+/* RAS Interface */
+void lpfc_sli4_ras_init(struct lpfc_hba *phba);
+void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
+int  lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
+                        uint32_t fwlog_enable);
+int lpfc_check_fwlog_support(struct lpfc_hba *phba);
+
 /* NVME interfaces. */
 void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
                        struct lpfc_nodelist *ndlp);
index 1cbdc89..789ad15 100644 (file)
@@ -445,14 +445,14 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
        struct lpfc_vport *vport_curr;
        unsigned long flags;
 
-       spin_lock_irqsave(&phba->hbalock, flags);
+       spin_lock_irqsave(&phba->port_list_lock, flags);
        list_for_each_entry(vport_curr, &phba->port_list, listentry) {
                if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
-                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       spin_unlock_irqrestore(&phba->port_list_lock, flags);
                        return vport_curr;
                }
        }
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       spin_unlock_irqrestore(&phba->port_list_lock, flags);
        return NULL;
 }
 
@@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
                                "Parse GID_FTrsp: did:x%x flg:x%x x%x",
                                Did, ndlp->nlp_flag, vport->fc_flag);
 
-                       /* Don't assume the rport is always the previous
-                        * FC4 type.
-                        */
-                       ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
-
                        /* By default, the driver expects to support FCP FC4 */
                        if (fc4_type == FC_TYPE_FCP)
                                ndlp->nlp_fc4_type |= NLP_FC4_FCP;
index aec5b10..0c8005b 100644 (file)
@@ -550,7 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        struct lpfc_nodelist *ndlp;
        unsigned char *statep;
        struct nvme_fc_local_port *localport;
-       struct lpfc_nvmet_tgtport *tgtp;
        struct nvme_fc_remote_port *nrport = NULL;
        struct lpfc_nvme_rport *rport;
 
@@ -654,7 +653,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                        "\nOutstanding IO x%x\n",  outio);
 
        if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
-               tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
                len += snprintf(buf + len, size - len,
                                "\nNVME Targetport Entry ...\n");
 
index 4dda969..f1c1faa 100644 (file)
@@ -7673,8 +7673,11 @@ void
 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
 {
        struct lpfc_vport *vport;
+
+       spin_lock_irq(&phba->port_list_lock);
        list_for_each_entry(vport, &phba->port_list, listentry)
                lpfc_els_flush_cmd(vport);
+       spin_unlock_irq(&phba->port_list_lock);
 
        return;
 }
index eb71877..f4deb86 100644 (file)
@@ -4193,7 +4193,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 
        if (new_state ==  NLP_STE_MAPPED_NODE ||
            new_state == NLP_STE_UNMAPPED_NODE) {
-               if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+               if (ndlp->nlp_fc4_type ||
                    ndlp->nlp_DID == Fabric_DID ||
                    ndlp->nlp_DID == NameServer_DID ||
                    ndlp->nlp_DID == FDMI_DID) {
@@ -5428,12 +5428,10 @@ static void
 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        LIST_HEAD(completions);
-       struct lpfc_sli *psli;
        IOCB_t     *icmd;
        struct lpfc_iocbq    *iocb, *next_iocb;
        struct lpfc_sli_ring *pring;
 
-       psli = &phba->sli;
        pring = lpfc_phba_elsring(phba);
        if (unlikely(!pring))
                return;
@@ -5938,14 +5936,14 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
                }
        }
 
-       spin_lock_irqsave(&phba->hbalock, flags);
+       spin_lock_irqsave(&phba->port_list_lock, flags);
        list_for_each_entry(vport, &phba->port_list, listentry) {
                if (vport->vpi == i) {
-                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       spin_unlock_irqrestore(&phba->port_list_lock, flags);
                        return vport;
                }
        }
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       spin_unlock_irqrestore(&phba->port_list_lock, flags);
        return NULL;
 }
 
index 083f8c8..bbd0a57 100644 (file)
@@ -186,6 +186,7 @@ struct lpfc_sli_intf {
 #define LPFC_CTL_PDEV_CTL_FRL_ALL      0x00
 #define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE  0x10
 #define LPFC_CTL_PDEV_CTL_FRL_NIC      0x20
+#define LPFC_CTL_PDEV_CTL_DDL_RAS      0x1000000
 
 #define LPFC_FW_DUMP_REQUEST    (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
 
@@ -964,6 +965,7 @@ struct mbox_header {
 /* Subsystem Definitions */
 #define LPFC_MBOX_SUBSYSTEM_NA         0x0
 #define LPFC_MBOX_SUBSYSTEM_COMMON     0x1
+#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL   0xB
 #define LPFC_MBOX_SUBSYSTEM_FCOE       0xC
 
 /* Device Specific Definitions */
@@ -1030,6 +1032,9 @@ struct mbox_header {
 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE          0x22
 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK       0x23
 
+/* Low level Opcodes */
+#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION           0x37
+
 /* Mailbox command structures */
 struct eq_context {
        uint32_t word0;
@@ -1162,6 +1167,45 @@ struct lpfc_mbx_nop {
        uint32_t context[2];
 };
 
+
+
+struct lpfc_mbx_set_ras_fwlog {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word4;
+#define lpfc_fwlog_enable_SHIFT                0
+#define lpfc_fwlog_enable_MASK         0x00000001
+#define lpfc_fwlog_enable_WORD         word4
+#define lpfc_fwlog_loglvl_SHIFT                8
+#define lpfc_fwlog_loglvl_MASK         0x0000000F
+#define lpfc_fwlog_loglvl_WORD         word4
+#define lpfc_fwlog_ra_SHIFT            15
+#define lpfc_fwlog_ra_WORD             0x00000008
+#define lpfc_fwlog_buffcnt_SHIFT       16
+#define lpfc_fwlog_buffcnt_MASK                0x000000FF
+#define lpfc_fwlog_buffcnt_WORD                word4
+#define lpfc_fwlog_buffsz_SHIFT                24
+#define lpfc_fwlog_buffsz_MASK         0x000000FF
+#define lpfc_fwlog_buffsz_WORD         word4
+                       uint32_t word5;
+#define lpfc_fwlog_acqe_SHIFT          0
+#define lpfc_fwlog_acqe_MASK           0x0000FFFF
+#define lpfc_fwlog_acqe_WORD           word5
+#define lpfc_fwlog_cqid_SHIFT          16
+#define lpfc_fwlog_cqid_MASK           0x0000FFFF
+#define lpfc_fwlog_cqid_WORD           word5
+#define LPFC_MAX_FWLOG_PAGE    16
+                       struct dma_address lwpd;
+                       struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+               } response;
+       } u;
+};
+
+
 struct cq_context {
        uint32_t word0;
 #define lpfc_cq_context_event_SHIFT    31
@@ -3868,6 +3912,7 @@ struct lpfc_mqe {
                struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
                struct lpfc_mbx_set_host_data set_host_data;
                struct lpfc_mbx_nop nop;
+               struct lpfc_mbx_set_ras_fwlog ras_fwlog;
        } un;
 };
 
index 0503237..20fa678 100644 (file)
@@ -3956,7 +3956,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        if (phba->sli_rev == LPFC_SLI_REV4) {
                shost->dma_boundary =
                        phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
-               shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+               shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
        }
 
        /*
@@ -3988,9 +3988,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        if (error)
                goto out_put_shost;
 
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irq(&phba->port_list_lock);
        list_add_tail(&vport->listentry, &phba->port_list);
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irq(&phba->port_list_lock);
        return vport;
 
 out_put_shost:
@@ -4016,9 +4016,9 @@ destroy_port(struct lpfc_vport *vport)
        fc_remove_host(shost);
        scsi_remove_host(shost);
 
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irq(&phba->port_list_lock);
        list_del_init(&vport->listentry);
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irq(&phba->port_list_lock);
 
        lpfc_cleanup(vport);
        return;
@@ -5621,7 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
        /* Initialize ndlp management spinlock */
        spin_lock_init(&phba->ndlp_lock);
 
+       /* Initialize port_list spinlock */
+       spin_lock_init(&phba->port_list_lock);
        INIT_LIST_HEAD(&phba->port_list);
+
        INIT_LIST_HEAD(&phba->work_list);
        init_waitqueue_head(&phba->wait_4_mlo_m_q);
 
@@ -5919,8 +5922,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
         * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
         */
        max_buf_size = (2 * SLI4_PAGE_SIZE);
-       if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
 
        /*
         * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5942,9 +5943,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
                phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
 
-               if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
-                       phba->cfg_sg_seg_cnt =
-                               LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+               /*
+                * If supporting DIF, reduce the seg count for scsi to
+                * allow room for the DIF sges.
+                */
+               if (phba->cfg_enable_bg &&
+                   phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+                       phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+               else
+                       phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
        } else {
                /*
                 * The scsi_buf for a regular I/O holds the FCP cmnd,
@@ -5958,6 +5966,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
 
                /* Total SGEs for scsi_sg_list */
                phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+               phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
 
                /*
                 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
@@ -5965,10 +5974,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                 */
        }
 
+       /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+       if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+               if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+                                       "6300 Reducing NVME sg segment "
+                                       "cnt to %d\n",
+                                       LPFC_MAX_NVME_SEG_CNT);
+                       phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+               } else
+                       phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+       }
+
        /* Initialize the host templates with the updated values. */
-       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+       lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
 
        if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
                phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5977,9 +5998,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
                        SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
 
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
-                       "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+                       "9087 sg_seg_cnt:%d dmabuf_size:%d "
+                       "total:%d scsi:%d nvme:%d\n",
                        phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
-                       phba->cfg_total_seg_cnt);
+                       phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
+                       phba->cfg_nvme_seg_cnt);
 
        /* Initialize buffer queue management fields */
        INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
@@ -6205,6 +6228,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
        if (phba->cfg_fof)
                fof_vectors = 1;
 
+       /* Verify RAS support on adapter */
+       lpfc_sli4_ras_init(phba);
+
        /* Verify all the SLI4 queues */
        rc = lpfc_sli4_queue_verify(phba);
        if (rc)
@@ -7967,7 +7993,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
        else
                lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
                                "3028 GET_FUNCTION_CONFIG: failed to find "
-                               "Resrouce Descriptor:x%x\n",
+                               "Resource Descriptor:x%x\n",
                                LPFC_RSRC_DESC_TYPE_FCFCOE);
 
 read_cfg_out:
@@ -10492,6 +10518,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        /* Stop kthread signal shall trigger work_done one more time */
        kthread_stop(phba->worker_thread);
 
+       /* Disable FW logging to host memory */
+       writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+              phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+       /* Free RAS DMA memory */
+       if (phba->ras_fwlog.ras_enabled == true)
+               lpfc_sli4_ras_dma_free(phba);
+
        /* Unset the queues shared with the hardware then release all
         * allocated resources.
         */
@@ -10737,6 +10771,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
                phba->mds_diags_support = 1;
        else
                phba->mds_diags_support = 0;
+
        return 0;
 }
 
@@ -10965,9 +11000,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
        kfree(phba->vpi_ids);
 
        lpfc_stop_hba_timers(phba);
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irq(&phba->port_list_lock);
        list_del_init(&vport->listentry);
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irq(&phba->port_list_lock);
 
        lpfc_debugfs_terminate(vport);
 
@@ -11694,6 +11729,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 
        /* Check if there are static vports to be created. */
        lpfc_create_static_vport(phba);
+
+       /* Enable RAS FW log support */
+       lpfc_sli4_ras_setup(phba);
+
        return 0;
 
 out_disable_intr:
@@ -11773,9 +11812,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
        lpfc_sli4_hba_unset(phba);
 
        lpfc_stop_hba_timers(phba);
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irq(&phba->port_list_lock);
        list_del_init(&vport->listentry);
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irq(&phba->port_list_lock);
 
        /* Perform scsi free before driver resource_unset since scsi
         * buffers are released to their corresponding pools here.
@@ -12419,6 +12458,30 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if RAS is supported by the adapter. Check the
+ * function through which RAS support enablement is to be done.
+ **/
+void
+lpfc_sli4_ras_init(struct lpfc_hba *phba)
+{
+       switch (phba->pcidev->device) {
+       case PCI_DEVICE_ID_LANCER_G6_FC:
+       case PCI_DEVICE_ID_LANCER_G7_FC:
+               phba->ras_fwlog.ras_hwsupport = true;
+               if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
+                       phba->ras_fwlog.ras_enabled = true;
+               else
+                       phba->ras_fwlog.ras_enabled = false;
+               break;
+       default:
+               phba->ras_fwlog.ras_hwsupport = false;
+       }
+}
+
 /**
  * lpfc_fof_queue_setup - Set up all the fof queues
  * @phba: pointer to lpfc hba data structure.
index bd9bce9..269808e 100644 (file)
@@ -2318,6 +2318,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+       ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
        spin_unlock_irq(shost->host_lock);
        lpfc_disc_set_adisc(vport, ndlp);
 
@@ -2395,6 +2396,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+       ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
        spin_unlock_irq(shost->host_lock);
        lpfc_disc_set_adisc(vport, ndlp);
        return ndlp->nlp_state;
@@ -2652,6 +2654,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        lpfc_cancel_retry_delay_tmo(vport, ndlp);
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+       ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
        spin_unlock_irq(shost->host_lock);
        return ndlp->nlp_state;
 }
index 918ae18..ba831de 100644 (file)
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
        vport = lport->vport;
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
-                       "6001 ENTER.  lpfc_pnvme %p, qidx x%xi qhandle %p\n",
+                       "6001 ENTER.  lpfc_pnvme %p, qidx x%x qhandle %p\n",
                        lport, qidx, handle);
        kfree(handle);
 }
@@ -2235,13 +2235,11 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
        struct sli4_sge *sgl;
        dma_addr_t pdma_phys_sgl;
        uint16_t iotag, lxri = 0;
-       int bcnt, num_posted, sgl_size;
+       int bcnt, num_posted;
        LIST_HEAD(prep_nblist);
        LIST_HEAD(post_nblist);
        LIST_HEAD(nvme_nblist);
 
-       sgl_size = phba->cfg_sg_dma_buf_size;
-
        for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
                lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
                if (!lpfc_ncmd)
@@ -2462,17 +2460,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
        nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
        nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
 
-       /* Limit to LPFC_MAX_NVME_SEG_CNT.
-        * For now need + 1 to get around NVME transport logic.
+       /* We need to tell the transport layer + 1 because it takes page
+        * alignment into account. When space for the SGL is allocated we
+        * allocate + 3, one for cmd, one for rsp and one for this alignment
         */
-       if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
-               lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
-                                "6300 Reducing sg segment cnt to %d\n",
-                                LPFC_MAX_NVME_SEG_CNT);
-               phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
-       } else {
-               phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
-       }
        lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
        lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
 
index b766afe..6245f44 100644 (file)
@@ -1339,15 +1339,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
                        idx = 0;
        }
 
-       infop = phba->sli4_hba.nvmet_ctx_info;
-       for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
-               for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+       for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+               for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+                       infop = lpfc_get_ctx_list(phba, i, j);
                        lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
                                        "6408 TOTAL NVMET ctx for CPU %d "
                                        "MRQ %d: cnt %d nextcpu %p\n",
                                        i, j, infop->nvmet_ctx_list_cnt,
                                        infop->nvmet_ctx_next_cpu);
-                       infop++;
                }
        }
        return 0;
@@ -1373,17 +1372,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
        pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
        pinfo.port_id = vport->fc_myDID;
 
-       /* Limit to LPFC_MAX_NVME_SEG_CNT.
-        * For now need + 1 to get around NVME transport logic.
+       /* We need to tell the transport layer + 1 because it takes page
+        * alignment into account. When space for the SGL is allocated we
+        * allocate + 3, one for cmd, one for rsp and one for this alignment
         */
-       if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
-               lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
-                               "6400 Reducing sg segment cnt to %d\n",
-                               LPFC_MAX_NVME_SEG_CNT);
-               phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
-       } else {
-               phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
-       }
        lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
        lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
        lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
index 5c7858e..4fa6703 100644 (file)
@@ -202,8 +202,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
 static void
 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
 {
-       struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
-       struct lpfc_nodelist *pnode = rdata->pnode;
+       struct lpfc_rport_data *rdata;
+       struct lpfc_nodelist *pnode;
        struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
        unsigned long flags;
        struct Scsi_Host  *shost = cmd->device->host;
@@ -211,17 +211,19 @@ lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
        unsigned long latency;
        int i;
 
-       if (cmd->result)
+       if (!vport->stat_data_enabled ||
+           vport->stat_data_blocked ||
+           (cmd->result))
                return;
 
        latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+       rdata = lpfc_cmd->rdata;
+       pnode = rdata->pnode;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       if (!vport->stat_data_enabled ||
-               vport->stat_data_blocked ||
-               !pnode ||
-               !pnode->lat_data ||
-               (phba->bucket_type == LPFC_NO_BUCKET)) {
+       if (!pnode ||
+           !pnode->lat_data ||
+           (phba->bucket_type == LPFC_NO_BUCKET)) {
                spin_unlock_irqrestore(shost->host_lock, flags);
                return;
        }
@@ -1050,7 +1052,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
        if (!found)
                return NULL;
 
-       if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+       if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
                atomic_inc(&ndlp->cmd_pending);
                lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
        }
@@ -4158,9 +4160,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
        }
        lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 
-       spin_lock_irqsave(&phba->hbalock, flags);
-       lpfc_cmd->pCmd = NULL;
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       /* If pCmd was set to NULL from abort path, do not call scsi_done */
+       if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+                                "0711 FCP cmd already NULL, sid: 0x%06x, "
+                                "did: 0x%06x, oxid: 0x%04x\n",
+                                vport->fc_myDID,
+                                (pnode) ? pnode->nlp_DID : 0,
+                                phba->sli_rev == LPFC_SLI_REV4 ?
+                                lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
+               return;
+       }
 
        /* The sdev is not guaranteed to be valid post scsi_done upcall. */
        cmd->scsi_done(cmd);
index 9830bdb..783a154 100644 (file)
@@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
        struct lpfc_register doorbell;
 
        doorbell.word0 = 0;
-       bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
-       bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
-       bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
-               (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
-       bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+       bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 }
 
@@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
        struct hbq_dmabuf *dmabuf;
        struct lpfc_cq_event *cq_event;
        unsigned long iflag;
+       int count = 0;
 
        spin_lock_irqsave(&phba->hbalock, iflag);
        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
                        if (irspiocbq)
                                lpfc_sli_sp_handle_rspiocb(phba, pring,
                                                           irspiocbq);
+                       count++;
                        break;
                case CQE_CODE_RECEIVE:
                case CQE_CODE_RECEIVE_V1:
                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
                                              cq_event);
                        lpfc_sli4_handle_received_buffer(phba, dmabuf);
+                       count++;
                        break;
                default:
                        break;
                }
+
+               /* Limit the number of events to 64 to avoid soft lockups */
+               if (count == 64)
+                       break;
        }
 }
 
@@ -6145,6 +6148,271 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
        return;
 }
 
+/**
+ * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called to free memory allocated for RAS FW logging
+ * support in the driver.
+ **/
+void
+lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
+{
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+       struct lpfc_dmabuf *dmabuf, *next;
+
+       if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
+               list_for_each_entry_safe(dmabuf, next,
+                                   &ras_fwlog->fwlog_buff_list,
+                                   list) {
+                       list_del(&dmabuf->list);
+                       dma_free_coherent(&phba->pcidev->dev,
+                                         LPFC_RAS_MAX_ENTRY_SIZE,
+                                         dmabuf->virt, dmabuf->phys);
+                       kfree(dmabuf);
+               }
+       }
+
+       if (ras_fwlog->lwpd.virt) {
+               dma_free_coherent(&phba->pcidev->dev,
+                                 sizeof(uint32_t) * 2,
+                                 ras_fwlog->lwpd.virt,
+                                 ras_fwlog->lwpd.phys);
+               ras_fwlog->lwpd.virt = NULL;
+       }
+
+       ras_fwlog->ras_active = false;
+}
+
+/**
+ * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
+ * @phba: Pointer to HBA context object.
+ * @fwlog_buff_count: Count of buffers to be created.
+ *
+ * This routine DMA memory for Log Write Position Data[LPWD] and buffer
+ * to update FW log is posted to the adapter.
+ * Buffer count is calculated based on module param ras_fwlog_buffsize
+ * Size of each buffer posted to FW is 64K.
+ **/
+
+static int
+lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
+                       uint32_t fwlog_buff_count)
+{
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+       struct lpfc_dmabuf *dmabuf;
+       int rc = 0, i = 0;
+
+       /* Initialize List */
+       INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
+
+       /* Allocate memory for the LWPD */
+       ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                           sizeof(uint32_t) * 2,
+                                           &ras_fwlog->lwpd.phys,
+                                           GFP_KERNEL);
+       if (!ras_fwlog->lwpd.virt) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "6185 LWPD Memory Alloc Failed\n");
+
+               return -ENOMEM;
+       }
+
+       ras_fwlog->fw_buffcount = fwlog_buff_count;
+       for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
+               dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+                                GFP_KERNEL);
+               if (!dmabuf) {
+                       rc = -ENOMEM;
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "6186 Memory Alloc failed FW logging");
+                       goto free_mem;
+               }
+
+               dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                                 LPFC_RAS_MAX_ENTRY_SIZE,
+                                                 &dmabuf->phys,
+                                                 GFP_KERNEL);
+               if (!dmabuf->virt) {
+                       kfree(dmabuf);
+                       rc = -ENOMEM;
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "6187 DMA Alloc Failed FW logging");
+                       goto free_mem;
+               }
+               memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+               dmabuf->buffer_tag = i;
+               list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
+       }
+
+free_mem:
+       if (rc)
+               lpfc_sli4_ras_dma_free(phba);
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Completion handler for driver's RAS MBX command to the device.
+ **/
+static void
+lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+       MAILBOX_t *mb;
+       union lpfc_sli4_cfg_shdr *shdr;
+       uint32_t shdr_status, shdr_add_status;
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+       mb = &pmb->u.mb;
+
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+       if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+                               "6188 FW LOG mailbox "
+                               "completed with status x%x add_status x%x,"
+                               " mbx status x%x\n",
+                               shdr_status, shdr_add_status, mb->mbxStatus);
+               goto disable_ras;
+       }
+
+       ras_fwlog->ras_active = true;
+       mempool_free(pmb, phba->mbox_mem_pool);
+
+       return;
+
+disable_ras:
+       /* Free RAS DMA memory */
+       lpfc_sli4_ras_dma_free(phba);
+       mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @fwlog_level: Logging verbosity level.
+ * @fwlog_enable: Enable/Disable logging.
+ *
+ * Initialize memory and post mailbox command to enable FW logging in host
+ * memory.
+ **/
+int
+lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+                        uint32_t fwlog_level,
+                        uint32_t fwlog_enable)
+{
+       struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+       struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
+       struct lpfc_dmabuf *dmabuf;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
+       int rc = 0;
+
+       fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
+                         phba->cfg_ras_fwlog_buffsize);
+       fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
+
+       /*
+        * If re-enabling FW logging support use earlier allocated
+        * DMA buffers while posting MBX command.
+        **/
+       if (!ras_fwlog->lwpd.virt) {
+               rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "6189 RAS FW Log Support Not Enabled");
+                       return rc;
+               }
+       }
+
+       /* Setup Mailbox command */
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "6190 RAS MBX Alloc Failed");
+               rc = -ENOMEM;
+               goto mem_free;
+       }
+
+       ras_fwlog->fw_loglevel = fwlog_level;
+       len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
+               sizeof(struct lpfc_sli4_cfg_mhdr));
+
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
+                        LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
+                        len, LPFC_SLI4_MBX_EMBED);
+
+       mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
+       bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
+              fwlog_enable);
+       bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
+              ras_fwlog->fw_loglevel);
+       bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
+              ras_fwlog->fw_buffcount);
+       bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
+              LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
+
+       /* Update DMA buffer address */
+       list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
+               memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+
+               mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
+                       putPaddrLow(dmabuf->phys);
+
+               mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
+                       putPaddrHigh(dmabuf->phys);
+       }
+
+       /* Update LPWD address */
+       mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
+       mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+
+       mbox->vport = phba->pport;
+       mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
+
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "6191 RAS Mailbox failed. "
+                               "status %d mbxStatus : x%x", rc,
+                               bf_get(lpfc_mqe_status, &mbox->u.mqe));
+               mempool_free(mbox, phba->mbox_mem_pool);
+               rc = -EIO;
+               goto mem_free;
+       } else
+               rc = 0;
+mem_free:
+       if (rc)
+               lpfc_sli4_ras_dma_free(phba);
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if RAS is supported on the adapter and initialize it.
+ **/
+void
+lpfc_sli4_ras_setup(struct lpfc_hba *phba)
+{
+       /* Check RAS FW Log needs to be enabled or not */
+       if (lpfc_check_fwlog_support(phba))
+               return;
+
+       lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+                                LPFC_RAS_ENABLE_LOGGING);
+}
+
 /**
  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
  * @phba: Pointer to HBA context object.
@@ -10266,8 +10534,12 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
        LPFC_MBOXQ_t *pmb;
        unsigned long iflag;
 
+       /* Disable softirqs, including timers from obtaining phba->hbalock */
+       local_bh_disable();
+
        /* Flush all the mailbox commands in the mbox system */
        spin_lock_irqsave(&phba->hbalock, iflag);
+
        /* The pending mailbox command queue */
        list_splice_init(&phba->sli.mboxq, &completions);
        /* The outstanding active mailbox command */
@@ -10280,6 +10552,9 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
        list_splice_init(&phba->sli.mboxq_cmpl, &completions);
        spin_unlock_irqrestore(&phba->hbalock, iflag);
 
+       /* Enable softirqs again, done with phba->hbalock */
+       local_bh_enable();
+
        /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
        while (!list_empty(&completions)) {
                list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10419,6 +10694,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 
        lpfc_hba_down_prep(phba);
 
+       /* Disable softirqs, including timers from obtaining phba->hbalock */
+       local_bh_disable();
+
        lpfc_fabric_abort_hba(phba);
 
        spin_lock_irqsave(&phba->hbalock, flags);
@@ -10472,6 +10750,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
                kfree(buf_ptr);
        }
 
+       /* Enable softirqs again, done with phba->hbalock */
+       local_bh_enable();
+
        /* Return any active mbox cmds */
        del_timer_sync(&psli->mbox_tmo);
 
@@ -11775,6 +12056,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
        }
        timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
 
+       /* Disable softirqs, including timers from obtaining phba->hbalock */
+       local_bh_disable();
+
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
 
@@ -11788,6 +12072,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
                                                1000) + jiffies;
                spin_unlock_irq(&phba->hbalock);
 
+               /* Enable softirqs again, done with phba->hbalock */
+               local_bh_enable();
+
                while (phba->sli.mbox_active) {
                        /* Check active mailbox complete status every 2ms */
                        msleep(2);
@@ -11797,9 +12084,13 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
                                 */
                                break;
                }
-       } else
+       } else {
                spin_unlock_irq(&phba->hbalock);
 
+               /* Enable softirqs again, done with phba->hbalock */
+               local_bh_enable();
+       }
+
        lpfc_sli_mbox_sys_flush(phba);
 }
 
@@ -13136,7 +13427,6 @@ static bool
 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
 {
        bool workposted = false;
-       struct fc_frame_header *fc_hdr;
        struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
        struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
        struct lpfc_nvmet_tgtport *tgtp;
@@ -13173,9 +13463,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
                hrq->RQ_buf_posted--;
                memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
 
-               /* If a NVME LS event (type 0x28), treat it as Fast path */
-               fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
-
                /* save off the frame for the word thread to process */
                list_add_tail(&dma_buf->cq_event.list,
                              &phba->sli4_hba.sp_queue_event);
@@ -14558,13 +14845,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
        int rc, length, status = 0;
        uint32_t shdr_status, shdr_add_status;
        union lpfc_sli4_cfg_shdr *shdr;
-       uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
 
        /* sanity check on queue memory */
        if (!cq || !eq)
                return -ENODEV;
-       if (!phba->sli4_hba.pc_sli4_params.supported)
-               hw_page_size = cq->page_size;
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!mbox)
index 399c001..e76c380 100644 (file)
@@ -886,3 +886,4 @@ int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
 int lpfc_sli4_post_status_check(struct lpfc_hba *);
 uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
 uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
index 5012495..5a0d512 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "12.0.0.6"
+#define LPFC_DRIVER_VERSION "12.0.0.7"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 1ff0f7d..c340e0e 100644 (file)
@@ -207,7 +207,7 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
        struct lpfc_vport *vport;
        unsigned long flags;
 
-       spin_lock_irqsave(&phba->hbalock, flags);
+       spin_lock_irqsave(&phba->port_list_lock, flags);
        list_for_each_entry(vport, &phba->port_list, listentry) {
                if (vport == new_vport)
                        continue;
@@ -215,11 +215,11 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
                if (memcmp(&vport->fc_sparam.portName,
                           &new_vport->fc_sparam.portName,
                           sizeof(struct lpfc_name)) == 0) {
-                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       spin_unlock_irqrestore(&phba->port_list_lock, flags);
                        return 0;
                }
        }
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       spin_unlock_irqrestore(&phba->port_list_lock, flags);
        return 1;
 }
 
@@ -825,9 +825,9 @@ skip_logo:
 
        lpfc_free_vpi(phba, vport->vpi);
        vport->work_port_events = 0;
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irq(&phba->port_list_lock);
        list_del_init(&vport->listentry);
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irq(&phba->port_list_lock);
        lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
                         "1828 Vport Deleted.\n");
        scsi_host_put(shost);
@@ -844,7 +844,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
                         GFP_KERNEL);
        if (vports == NULL)
                return NULL;
-       spin_lock_irq(&phba->hbalock);
+       spin_lock_irq(&phba->port_list_lock);
        list_for_each_entry(port_iterator, &phba->port_list, listentry) {
                if (port_iterator->load_flag & FC_UNLOADING)
                        continue;
@@ -856,7 +856,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
                }
                vports[index++] = port_iterator;
        }
-       spin_unlock_irq(&phba->hbalock);
+       spin_unlock_irq(&phba->port_list_lock);
        return vports;
 }
 
index eb551f3..764d320 100644 (file)
@@ -52,14 +52,12 @@ struct mac_esp_priv {
        struct esp *esp;
        void __iomem *pdma_regs;
        void __iomem *pdma_io;
-       int error;
 };
 static struct esp *esp_chips[2];
 static DEFINE_SPINLOCK(esp_chips_lock);
 
 #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
-                              platform_get_drvdata((struct platform_device *) \
-                                                   (esp->dev)))
+                              dev_get_drvdata((esp)->dev))
 
 static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
 {
@@ -71,38 +69,6 @@ static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
        return nubus_readb(esp->regs + reg * 16);
 }
 
-/* For pseudo DMA and PIO we need the virtual address
- * so this address mapping is the identity mapping.
- */
-
-static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
-                                    size_t sz, int dir)
-{
-       return (dma_addr_t)buf;
-}
-
-static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-                         int num_sg, int dir)
-{
-       int i;
-
-       for (i = 0; i < num_sg; i++)
-               sg[i].dma_address = (u32)sg_virt(&sg[i]);
-       return num_sg;
-}
-
-static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-                                size_t sz, int dir)
-{
-       /* Nothing to do. */
-}
-
-static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-                            int num_sg, int dir)
-{
-       /* Nothing to do. */
-}
-
 static void mac_esp_reset_dma(struct esp *esp)
 {
        /* Nothing to do. */
@@ -120,12 +86,11 @@ static void mac_esp_dma_invalidate(struct esp *esp)
 
 static int mac_esp_dma_error(struct esp *esp)
 {
-       return MAC_ESP_GET_PRIV(esp)->error;
+       return esp->send_cmd_error;
 }
 
 static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
 {
-       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
        int i = 500000;
 
        do {
@@ -140,7 +105,7 @@ static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
 
        printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
               esp_read8(ESP_STATUS));
-       mep->error = 1;
+       esp->send_cmd_error = 1;
        return 1;
 }
 
@@ -166,7 +131,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
 
        printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
               esp_read8(ESP_STATUS));
-       mep->error = 1;
+       esp->send_cmd_error = 1;
        return 1;
 }
 
@@ -233,7 +198,7 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
 {
        struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
 
-       mep->error = 0;
+       esp->send_cmd_error = 0;
 
        if (!write)
                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -271,164 +236,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
        } while (esp_count);
 }
 
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
-{
-       int i = 500000;
-
-       do {
-               unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
-               if (fbytes)
-                       return fbytes;
-
-               udelay(2);
-       } while (--i);
-
-       printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
-              esp_read8(ESP_STATUS));
-       return 0;
-}
-
-static inline int mac_esp_wait_for_intr(struct esp *esp)
-{
-       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
-       int i = 500000;
-
-       do {
-               esp->sreg = esp_read8(ESP_STATUS);
-               if (esp->sreg & ESP_STAT_INTR)
-                       return 0;
-
-               udelay(2);
-       } while (--i);
-
-       printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
-       mep->error = 1;
-       return 1;
-}
-
-#define MAC_ESP_PIO_LOOP(operands, reg1) \
-       asm volatile ( \
-            "1:     moveb " operands " \n" \
-            "       subqw #1,%1        \n" \
-            "       jbne 1b            \n" \
-            : "+a" (addr), "+r" (reg1) \
-            : "a" (fifo))
-
-#define MAC_ESP_PIO_FILL(operands, reg1) \
-       asm volatile ( \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       moveb " operands " \n" \
-            "       subqw #8,%1        \n" \
-            "       subqw #8,%1        \n" \
-            : "+a" (addr), "+r" (reg1) \
-            : "a" (fifo))
-
-#define MAC_ESP_FIFO_SIZE 16
-
-static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
-                                u32 dma_count, int write, u8 cmd)
-{
-       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
-       u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
-       u8 phase = esp->sreg & ESP_STAT_PMASK;
-
-       cmd &= ~ESP_CMD_DMA;
-       mep->error = 0;
-
-       if (write) {
-               u8 *dst = (u8 *)addr;
-               u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
-               scsi_esp_cmd(esp, cmd);
-
-               while (1) {
-                       if (!mac_esp_wait_for_fifo(esp))
-                               break;
-
-                       *dst++ = esp_read8(ESP_FDATA);
-                       --esp_count;
-
-                       if (!esp_count)
-                               break;
-
-                       if (mac_esp_wait_for_intr(esp))
-                               break;
-
-                       if ((esp->sreg & ESP_STAT_PMASK) != phase)
-                               break;
-
-                       esp->ireg = esp_read8(ESP_INTRPT);
-                       if (esp->ireg & mask) {
-                               mep->error = 1;
-                               break;
-                       }
-
-                       if (phase == ESP_MIP)
-                               scsi_esp_cmd(esp, ESP_CMD_MOK);
-
-                       scsi_esp_cmd(esp, ESP_CMD_TI);
-               }
-       } else {
-               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
-               if (esp_count >= MAC_ESP_FIFO_SIZE)
-                       MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
-               else
-                       MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
-
-               scsi_esp_cmd(esp, cmd);
-
-               while (esp_count) {
-                       unsigned int n;
-
-                       if (mac_esp_wait_for_intr(esp))
-                               break;
-
-                       if ((esp->sreg & ESP_STAT_PMASK) != phase)
-                               break;
-
-                       esp->ireg = esp_read8(ESP_INTRPT);
-                       if (esp->ireg & ~ESP_INTR_BSERV) {
-                               mep->error = 1;
-                               break;
-                       }
-
-                       n = MAC_ESP_FIFO_SIZE -
-                           (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
-                       if (n > esp_count)
-                               n = esp_count;
-
-                       if (n == MAC_ESP_FIFO_SIZE) {
-                               MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
-                       } else {
-                               esp_count -= n;
-                               MAC_ESP_PIO_LOOP("%0@+,%2@", n);
-                       }
-
-                       scsi_esp_cmd(esp, ESP_CMD_TI);
-               }
-       }
-}
-
 static int mac_esp_irq_pending(struct esp *esp)
 {
        if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
@@ -470,10 +277,6 @@ static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
 static struct esp_driver_ops mac_esp_ops = {
        .esp_write8       = mac_esp_write8,
        .esp_read8        = mac_esp_read8,
-       .map_single       = mac_esp_map_single,
-       .map_sg           = mac_esp_map_sg,
-       .unmap_single     = mac_esp_unmap_single,
-       .unmap_sg         = mac_esp_unmap_sg,
        .irq_pending      = mac_esp_irq_pending,
        .dma_length_limit = mac_esp_dma_length_limit,
        .reset_dma        = mac_esp_reset_dma,
@@ -508,7 +311,7 @@ static int esp_mac_probe(struct platform_device *dev)
        esp = shost_priv(host);
 
        esp->host = host;
-       esp->dev = dev;
+       esp->dev = &dev->dev;
 
        esp->command_block = kzalloc(16, GFP_KERNEL);
        if (!esp->command_block)
@@ -551,14 +354,16 @@ static int esp_mac_probe(struct platform_device *dev)
                mep->pdma_regs = NULL;
                break;
        }
+       esp->fifo_reg = esp->regs + ESP_FDATA * 16;
 
        esp->ops = &mac_esp_ops;
+       esp->flags = ESP_FLAG_NO_DMA_MAP;
        if (mep->pdma_io == NULL) {
                printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
                esp_write8(0, ESP_TCLOW);
                esp_write8(0, ESP_TCMED);
-               esp->flags = ESP_FLAG_DISABLE_SYNC;
-               mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+               esp->flags |= ESP_FLAG_DISABLE_SYNC;
+               mac_esp_ops.send_dma_cmd = esp_send_pio_cmd;
        } else {
                printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
        }
@@ -577,7 +382,7 @@ static int esp_mac_probe(struct platform_device *dev)
        esp_chips[dev->id] = esp;
        spin_unlock(&esp_chips_lock);
 
-       err = scsi_esp_register(esp, &dev->dev);
+       err = scsi_esp_register(esp);
        if (err)
                goto fail_free_irq;
 
index 530358c..3b7abe5 100644 (file)
@@ -201,13 +201,6 @@ int mraid_debug_level = CL_ANN;
 module_param_named(debug_level, mraid_debug_level, int, 0);
 MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
 
-/*
- * ### global data ###
- */
-static uint8_t megaraid_mbox_version[8] =
-       { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
-
-
 /*
  * PCI table for all supported controllers.
  */
@@ -457,10 +450,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        // Setup the default DMA mask. This would be changed later on
        // depending on hardware capabilities
-       if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
-
+       if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
                con_log(CL_ANN, (KERN_WARNING
-                       "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+                       "megaraid: dma_set_mask failed:%d\n", __LINE__));
 
                goto out_free_adapter;
        }
@@ -484,7 +476,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        // Start the mailbox based controller
        if (megaraid_init_mbox(adapter) != 0) {
                con_log(CL_ANN, (KERN_WARNING
-                       "megaraid: maibox adapter did not initialize\n"));
+                       "megaraid: mailbox adapter did not initialize\n"));
 
                goto out_free_adapter;
        }
@@ -878,11 +870,12 @@ megaraid_init_mbox(adapter_t *adapter)
                adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
                (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
                adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
-               if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+               if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
                        con_log(CL_ANN, (KERN_WARNING
                                "megaraid: DMA mask for 64-bit failed\n"));
 
-                       if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+                       if (dma_set_mask(&adapter->pdev->dev,
+                                               DMA_BIT_MASK(32))) {
                                con_log(CL_ANN, (KERN_WARNING
                                        "megaraid: 32-bit DMA mask failed\n"));
                                goto out_free_sysfs_res;
@@ -950,7 +943,7 @@ megaraid_fini_mbox(adapter_t *adapter)
  * megaraid_alloc_cmd_packets - allocate shared mailbox
  * @adapter            : soft state of the raid controller
  *
- * Allocate and align the shared mailbox. This maibox is used to issue
+ * Allocate and align the shared mailbox. This mailbox is used to issue
  * all the commands. For IO based controllers, the mailbox is also registered
  * with the FW. Allocate memory for all commands as well.
  * This is our big allocator.
@@ -975,9 +968,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
         * Allocate the common 16-byte aligned memory for the handshake
         * mailbox.
         */
-       raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
-                                                    sizeof(mbox64_t),
-                                                    &raid_dev->una_mbox64_dma);
+       raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
+                       sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
+                       GFP_KERNEL);
 
        if (!raid_dev->una_mbox64) {
                con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
                        align;
 
        // Allocate memory for commands issued internally
-       adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
-                                             &adapter->ibuf_dma_h);
+       adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+                       &adapter->ibuf_dma_h, GFP_KERNEL);
        if (!adapter->ibuf) {
 
                con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1075,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
 
                scb->scp                = NULL;
                scb->state              = SCB_FREE;
-               scb->dma_direction      = PCI_DMA_NONE;
+               scb->dma_direction      = DMA_NONE;
                scb->dma_type           = MRAID_DMA_NONE;
                scb->dev_channel        = -1;
                scb->dev_target         = -1;
@@ -1098,10 +1091,10 @@ out_teardown_dma_pools:
 out_free_scb_list:
        kfree(adapter->kscb_list);
 out_free_ibuf:
-       pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+       dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
                adapter->ibuf_dma_h);
 out_free_common_mbox:
-       pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+       dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
                (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
 
        return -1;
@@ -1123,10 +1116,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
 
        kfree(adapter->kscb_list);
 
-       pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+       dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
                (void *)adapter->ibuf, adapter->ibuf_dma_h);
 
-       pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+       dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
                (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
        return;
 }
@@ -1428,12 +1421,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
 
        adapter->outstanding_cmds++;
 
-       if (scb->dma_direction == PCI_DMA_TODEVICE)
-               pci_dma_sync_sg_for_device(adapter->pdev,
-                                          scsi_sglist(scb->scp),
-                                          scsi_sg_count(scb->scp),
-                                          PCI_DMA_TODEVICE);
-
        mbox->busy      = 1;    // Set busy
        mbox->poll      = 0;
        mbox->ack       = 0;
@@ -2180,31 +2167,6 @@ megaraid_isr(int irq, void *devp)
 }
 
 
-/**
- * megaraid_mbox_sync_scb - sync kernel buffers
- * @adapter    : controller's soft state
- * @scb                : pointer to the resource packet
- *
- * DMA sync if required.
- */
-static void
-megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
-{
-       mbox_ccb_t      *ccb;
-
-       ccb     = (mbox_ccb_t *)scb->ccb;
-
-       if (scb->dma_direction == PCI_DMA_FROMDEVICE)
-               pci_dma_sync_sg_for_cpu(adapter->pdev,
-                                       scsi_sglist(scb->scp),
-                                       scsi_sg_count(scb->scp),
-                                       PCI_DMA_FROMDEVICE);
-
-       scsi_dma_unmap(scb->scp);
-       return;
-}
-
-
 /**
  * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
  * @devp       : pointer to HBA soft state
@@ -2403,9 +2365,7 @@ megaraid_mbox_dpc(unsigned long devp)
                        megaraid_mbox_display_scb(adapter, scb);
                }
 
-               // Free our internal resources and call the mid-layer callback
-               // routine
-               megaraid_mbox_sync_scb(adapter, scb);
+               scsi_dma_unmap(scp);
 
                // remove from local clist
                list_del_init(&scb->list);
@@ -2577,7 +2537,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
        uint8_t         raw_mbox[sizeof(mbox_t)];
        int             rval;
        int             recovery_window;
-       int             recovering;
        int             i;
        uioc_t          *kioc;
 
@@ -2591,7 +2550,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
                return FAILED;
        }
 
-
        // Under exceptional conditions, FW can take up to 3 minutes to
        // complete command processing. Wait for additional 2 minutes for the
        // pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2598,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
 
        recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
 
-       recovering = adapter->outstanding_cmds;
-
        for (i = 0; i < recovery_window; i++) {
 
                megaraid_ack_sequence(adapter);
@@ -2725,13 +2681,10 @@ static int
 mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
 {
        mraid_device_t  *raid_dev = ADAP2RAIDDEV(adapter);
-       mbox64_t        *mbox64;
        mbox_t          *mbox;
        uint8_t         status;
        int             i;
 
-
-       mbox64  = raid_dev->mbox64;
        mbox    = raid_dev->mbox;
 
        /*
@@ -2948,9 +2901,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
         * Issue an ENQUIRY3 command to find out certain adapter parameters,
         * e.g., max channels, max commands etc.
         */
-       pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
-                                     &pinfo_dma_h);
-
+       pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+                       &pinfo_dma_h, GFP_KERNEL);
        if (pinfo == NULL) {
                con_log(CL_ANN, (KERN_WARNING
                        "megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2923,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
 
                con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
 
-               pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+               dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
                        pinfo, pinfo_dma_h);
 
                return -1;
@@ -3002,7 +2954,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
                con_log(CL_ANN, (KERN_WARNING
                        "megaraid: product info failed\n"));
 
-               pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+               dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
                        pinfo, pinfo_dma_h);
 
                return -1;
@@ -3038,7 +2990,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
                "megaraid: fw version:[%s] bios version:[%s]\n",
                adapter->fw_version, adapter->bios_version));
 
-       pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+       dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
                        pinfo_dma_h);
 
        return 0;
@@ -3135,7 +3087,6 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
 static int
 megaraid_mbox_support_random_del(adapter_t *adapter)
 {
-       mbox_t          *mbox;
        uint8_t         raw_mbox[sizeof(mbox_t)];
        int             rval;
 
@@ -3157,8 +3108,6 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
                return 0;
        }
 
-       mbox = (mbox_t *)raw_mbox;
-
        memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
 
        raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3212,8 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
 static void
 megaraid_mbox_flush_cache(adapter_t *adapter)
 {
-       mbox_t  *mbox;
        uint8_t raw_mbox[sizeof(mbox_t)];
 
-
-       mbox = (mbox_t *)raw_mbox;
-
        memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
 
        raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3244,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
        mbox_t  *mbox;
        uint8_t raw_mbox[sizeof(mbox_t)];
        mraid_device_t  *raid_dev = ADAP2RAIDDEV(adapter);
-       mbox64_t *mbox64;
        int     status = 0;
        int i;
        uint32_t dword;
@@ -3310,7 +3254,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
 
        raw_mbox[0] = 0xFF;
 
-       mbox64  = raid_dev->mbox64;
        mbox    = raid_dev->mbox;
 
        /* Wait until mailbox is free */
@@ -3515,7 +3458,7 @@ megaraid_cmm_register(adapter_t *adapter)
 
                scb->scp                = NULL;
                scb->state              = SCB_FREE;
-               scb->dma_direction      = PCI_DMA_NONE;
+               scb->dma_direction      = DMA_NONE;
                scb->dma_type           = MRAID_DMA_NONE;
                scb->dev_channel        = -1;
                scb->dev_target         = -1;
@@ -3653,7 +3596,7 @@ megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
 
        scb->state              = SCB_ACTIVE;
        scb->dma_type           = MRAID_DMA_NONE;
-       scb->dma_direction      = PCI_DMA_NONE;
+       scb->dma_direction      = DMA_NONE;
 
        ccb             = (mbox_ccb_t *)scb->ccb;
        mbox64          = (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3737,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
 static int
 gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
 {
-       uint8_t dmajor;
-
-       dmajor                  = megaraid_mbox_version[0];
-
        hinfo->pci_vendor_id    = adapter->pdev->vendor;
        hinfo->pci_device_id    = adapter->pdev->device;
        hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
@@ -3843,8 +3782,8 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
 
        raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
 
-       raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
-                       PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+       raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+                       PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
 
        if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
                !raid_dev->sysfs_buffer) {
@@ -3881,7 +3820,7 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
        kfree(raid_dev->sysfs_mbox64);
 
        if (raid_dev->sysfs_buffer) {
-               pci_free_consistent(adapter->pdev, PAGE_SIZE,
+               dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
                        raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
        }
 }
index c1d86d9..e075aeb 100644 (file)
  * @raw_mbox           : raw mailbox pointer
  * @mbox               : mailbox
  * @mbox64             : extended mailbox
- * @mbox_dma_h         : maibox dma address
+ * @mbox_dma_h         : mailbox dma address
  * @sgl64              : 64-bit scatter-gather list
  * @sgl32              : 32-bit scatter-gather list
  * @sgl_dma_h          : dma handle for the scatter-gather list
index 9aa9590..9b90c71 100644 (file)
@@ -1330,11 +1330,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
        device_id = MEGASAS_DEV_INDEX(scp);
        pthru = (struct megasas_pthru_frame *)cmd->frame;
 
-       if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+       if (scp->sc_data_direction == DMA_TO_DEVICE)
                flags = MFI_FRAME_DIR_WRITE;
-       else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+       else if (scp->sc_data_direction == DMA_FROM_DEVICE)
                flags = MFI_FRAME_DIR_READ;
-       else if (scp->sc_data_direction == PCI_DMA_NONE)
+       else if (scp->sc_data_direction == DMA_NONE)
                flags = MFI_FRAME_DIR_NONE;
 
        if (instance->flag_ieee == 1) {
@@ -1428,9 +1428,9 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
        device_id = MEGASAS_DEV_INDEX(scp);
        ldio = (struct megasas_io_frame *)cmd->frame;
 
-       if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+       if (scp->sc_data_direction == DMA_TO_DEVICE)
                flags = MFI_FRAME_DIR_WRITE;
-       else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+       else if (scp->sc_data_direction == DMA_FROM_DEVICE)
                flags = MFI_FRAME_DIR_READ;
 
        if (instance->flag_ieee == 1) {
@@ -2240,9 +2240,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
                               sizeof(struct MR_LD_VF_AFFILIATION_111));
        else {
                new_affiliation_111 =
-                       pci_zalloc_consistent(instance->pdev,
+                       dma_zalloc_coherent(&instance->pdev->dev,
                                              sizeof(struct MR_LD_VF_AFFILIATION_111),
-                                             &new_affiliation_111_h);
+                                             &new_affiliation_111_h, GFP_KERNEL);
                if (!new_affiliation_111) {
                        dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
                               "memory for new affiliation for scsi%d\n",
@@ -2302,7 +2302,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
        }
 out:
        if (new_affiliation_111) {
-               pci_free_consistent(instance->pdev,
+               dma_free_coherent(&instance->pdev->dev,
                                    sizeof(struct MR_LD_VF_AFFILIATION_111),
                                    new_affiliation_111,
                                    new_affiliation_111_h);
@@ -2347,10 +2347,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
                       sizeof(struct MR_LD_VF_AFFILIATION));
        else {
                new_affiliation =
-                       pci_zalloc_consistent(instance->pdev,
+                       dma_zalloc_coherent(&instance->pdev->dev,
                                              (MAX_LOGICAL_DRIVES + 1) *
                                              sizeof(struct MR_LD_VF_AFFILIATION),
-                                             &new_affiliation_h);
+                                             &new_affiliation_h, GFP_KERNEL);
                if (!new_affiliation) {
                        dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
                               "memory for new affiliation for scsi%d\n",
@@ -2470,7 +2470,7 @@ out:
        }
 
        if (new_affiliation)
-               pci_free_consistent(instance->pdev,
+               dma_free_coherent(&instance->pdev->dev,
                                    (MAX_LOGICAL_DRIVES + 1) *
                                    sizeof(struct MR_LD_VF_AFFILIATION),
                                    new_affiliation, new_affiliation_h);
@@ -2513,9 +2513,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
 
        if (initial) {
                instance->hb_host_mem =
-                       pci_zalloc_consistent(instance->pdev,
+                       dma_zalloc_coherent(&instance->pdev->dev,
                                              sizeof(struct MR_CTRL_HB_HOST_MEM),
-                                             &instance->hb_host_mem_h);
+                                             &instance->hb_host_mem_h, GFP_KERNEL);
                if (!instance->hb_host_mem) {
                        dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
                               " memory for heartbeat host memory for scsi%d\n",
@@ -4995,9 +4995,8 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
        context_sz = sizeof(u32);
        reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
 
-       instance->reply_queue = pci_alloc_consistent(instance->pdev,
-                                                    reply_q_sz,
-                                                    &instance->reply_queue_h);
+       instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
+                       reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
 
        if (!instance->reply_queue) {
                dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5029,7 +5028,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
 
 fail_fw_init:
 
-       pci_free_consistent(instance->pdev, reply_q_sz,
+       dma_free_coherent(&instance->pdev->dev, reply_q_sz,
                            instance->reply_queue, instance->reply_queue_h);
 fail_reply_queue:
        megasas_free_cmds(instance);
@@ -5533,7 +5532,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
        else {
                if (instance->crash_dump_buf)
-                       pci_free_consistent(instance->pdev,
+                       dma_free_coherent(&instance->pdev->dev,
                                CRASH_DMA_BUF_SIZE,
                                instance->crash_dump_buf,
                                instance->crash_dump_h);
@@ -5616,7 +5615,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
        u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
 
        if (instance->reply_queue)
-               pci_free_consistent(instance->pdev, reply_q_sz,
+               dma_free_coherent(&instance->pdev->dev, reply_q_sz,
                            instance->reply_queue, instance->reply_queue_h);
 
        megasas_free_cmds(instance);
@@ -5655,10 +5654,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
        }
 
        dcmd = &cmd->frame->dcmd;
-       el_info = pci_zalloc_consistent(instance->pdev,
-                                       sizeof(struct megasas_evt_log_info),
-                                       &el_info_h);
-
+       el_info = dma_zalloc_coherent(&instance->pdev->dev,
+                       sizeof(struct megasas_evt_log_info), &el_info_h,
+                       GFP_KERNEL);
        if (!el_info) {
                megasas_return_cmd(instance, cmd);
                return -ENOMEM;
@@ -5695,8 +5693,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
        eli->boot_seq_num = el_info->boot_seq_num;
 
 dcmd_failed:
-       pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
-                           el_info, el_info_h);
+       dma_free_coherent(&instance->pdev->dev,
+                       sizeof(struct megasas_evt_log_info),
+                       el_info, el_info_h);
 
        megasas_return_cmd(instance, cmd);
 
@@ -6134,10 +6133,10 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
 
 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
 {
-       instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
-                                                 &instance->producer_h);
-       instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
-                                                 &instance->consumer_h);
+       instance->producer = dma_alloc_coherent(&instance->pdev->dev,
+                       sizeof(u32), &instance->producer_h, GFP_KERNEL);
+       instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
+                       sizeof(u32), &instance->consumer_h, GFP_KERNEL);
 
        if (!instance->producer || !instance->consumer) {
                dev_err(&instance->pdev->dev,
@@ -6199,11 +6198,11 @@ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
        kfree(instance->reply_map);
        if (instance->adapter_type == MFI_SERIES) {
                if (instance->producer)
-                       pci_free_consistent(instance->pdev, sizeof(u32),
+                       dma_free_coherent(&instance->pdev->dev, sizeof(u32),
                                            instance->producer,
                                            instance->producer_h);
                if (instance->consumer)
-                       pci_free_consistent(instance->pdev, sizeof(u32),
+                       dma_free_coherent(&instance->pdev->dev, sizeof(u32),
                                            instance->consumer,
                                            instance->consumer_h);
        } else {
@@ -6224,10 +6223,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
        struct pci_dev *pdev = instance->pdev;
        struct fusion_context *fusion = instance->ctrl_context;
 
-       instance->evt_detail =
-               pci_alloc_consistent(pdev,
-                                    sizeof(struct megasas_evt_detail),
-                                    &instance->evt_detail_h);
+       instance->evt_detail = dma_alloc_coherent(&pdev->dev,
+                       sizeof(struct megasas_evt_detail),
+                       &instance->evt_detail_h, GFP_KERNEL);
 
        if (!instance->evt_detail) {
                dev_err(&instance->pdev->dev,
@@ -6250,9 +6248,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
        }
 
        instance->pd_list_buf =
-               pci_alloc_consistent(pdev,
+               dma_alloc_coherent(&pdev->dev,
                                     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
-                                    &instance->pd_list_buf_h);
+                                    &instance->pd_list_buf_h, GFP_KERNEL);
 
        if (!instance->pd_list_buf) {
                dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +6258,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
        }
 
        instance->ctrl_info_buf =
-               pci_alloc_consistent(pdev,
+               dma_alloc_coherent(&pdev->dev,
                                     sizeof(struct megasas_ctrl_info),
-                                    &instance->ctrl_info_buf_h);
+                                    &instance->ctrl_info_buf_h, GFP_KERNEL);
 
        if (!instance->ctrl_info_buf) {
                dev_err(&pdev->dev,
@@ -6271,9 +6269,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
        }
 
        instance->ld_list_buf =
-               pci_alloc_consistent(pdev,
+               dma_alloc_coherent(&pdev->dev,
                                     sizeof(struct MR_LD_LIST),
-                                    &instance->ld_list_buf_h);
+                                    &instance->ld_list_buf_h, GFP_KERNEL);
 
        if (!instance->ld_list_buf) {
                dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +6279,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
        }
 
        instance->ld_targetid_list_buf =
-               pci_alloc_consistent(pdev,
-                                    sizeof(struct MR_LD_TARGETID_LIST),
-                                    &instance->ld_targetid_list_buf_h);
+               dma_alloc_coherent(&pdev->dev,
+                               sizeof(struct MR_LD_TARGETID_LIST),
+                               &instance->ld_targetid_list_buf_h, GFP_KERNEL);
 
        if (!instance->ld_targetid_list_buf) {
                dev_err(&pdev->dev,
@@ -6293,21 +6291,20 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
 
        if (!reset_devices) {
                instance->system_info_buf =
-                       pci_alloc_consistent(pdev,
-                                            sizeof(struct MR_DRV_SYSTEM_INFO),
-                                            &instance->system_info_h);
+                       dma_alloc_coherent(&pdev->dev,
+                                       sizeof(struct MR_DRV_SYSTEM_INFO),
+                                       &instance->system_info_h, GFP_KERNEL);
                instance->pd_info =
-                       pci_alloc_consistent(pdev,
-                                            sizeof(struct MR_PD_INFO),
-                                            &instance->pd_info_h);
+                       dma_alloc_coherent(&pdev->dev,
+                                       sizeof(struct MR_PD_INFO),
+                                       &instance->pd_info_h, GFP_KERNEL);
                instance->tgt_prop =
-                       pci_alloc_consistent(pdev,
-                                            sizeof(struct MR_TARGET_PROPERTIES),
-                                            &instance->tgt_prop_h);
+                       dma_alloc_coherent(&pdev->dev,
+                                       sizeof(struct MR_TARGET_PROPERTIES),
+                                       &instance->tgt_prop_h, GFP_KERNEL);
                instance->crash_dump_buf =
-                       pci_alloc_consistent(pdev,
-                                            CRASH_DMA_BUF_SIZE,
-                                            &instance->crash_dump_h);
+                       dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
+                                       &instance->crash_dump_h, GFP_KERNEL);
 
                if (!instance->system_info_buf)
                        dev_err(&instance->pdev->dev,
@@ -6343,7 +6340,7 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
        struct fusion_context *fusion = instance->ctrl_context;
 
        if (instance->evt_detail)
-               pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+               dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
                                    instance->evt_detail,
                                    instance->evt_detail_h);
 
@@ -6354,41 +6351,41 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
                                  fusion->ioc_init_request_phys);
 
        if (instance->pd_list_buf)
-               pci_free_consistent(pdev,
+               dma_free_coherent(&pdev->dev,
                                    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
                                    instance->pd_list_buf,
                                    instance->pd_list_buf_h);
 
        if (instance->ld_list_buf)
-               pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+               dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
                                    instance->ld_list_buf,
                                    instance->ld_list_buf_h);
 
        if (instance->ld_targetid_list_buf)
-               pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+               dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
                                    instance->ld_targetid_list_buf,
                                    instance->ld_targetid_list_buf_h);
 
        if (instance->ctrl_info_buf)
-               pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+               dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
                                    instance->ctrl_info_buf,
                                    instance->ctrl_info_buf_h);
 
        if (instance->system_info_buf)
-               pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+               dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
                                    instance->system_info_buf,
                                    instance->system_info_h);
 
        if (instance->pd_info)
-               pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+               dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
                                    instance->pd_info, instance->pd_info_h);
 
        if (instance->tgt_prop)
-               pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+               dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
                                    instance->tgt_prop, instance->tgt_prop_h);
 
        if (instance->crash_dump_buf)
-               pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+               dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
                                    instance->crash_dump_buf,
                                    instance->crash_dump_h);
 }
@@ -6516,17 +6513,20 @@ static int megasas_probe_one(struct pci_dev *pdev,
        if (instance->requestorId) {
                if (instance->PlasmaFW111) {
                        instance->vf_affiliation_111 =
-                               pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
-                                                    &instance->vf_affiliation_111_h);
+                               dma_alloc_coherent(&pdev->dev,
+                                       sizeof(struct MR_LD_VF_AFFILIATION_111),
+                                       &instance->vf_affiliation_111_h,
+                                       GFP_KERNEL);
                        if (!instance->vf_affiliation_111)
                                dev_warn(&pdev->dev, "Can't allocate "
                                       "memory for VF affiliation buffer\n");
                } else {
                        instance->vf_affiliation =
-                               pci_alloc_consistent(pdev,
-                                                    (MAX_LOGICAL_DRIVES + 1) *
-                                                    sizeof(struct MR_LD_VF_AFFILIATION),
-                                                    &instance->vf_affiliation_h);
+                               dma_alloc_coherent(&pdev->dev,
+                                       (MAX_LOGICAL_DRIVES + 1) *
+                                       sizeof(struct MR_LD_VF_AFFILIATION),
+                                       &instance->vf_affiliation_h,
+                                       GFP_KERNEL);
                        if (!instance->vf_affiliation)
                                dev_warn(&pdev->dev, "Can't allocate "
                                       "memory for VF affiliation buffer\n");
@@ -6994,19 +6994,19 @@ skip_firing_dcmds:
        }
 
        if (instance->vf_affiliation)
-               pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+               dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
                                    sizeof(struct MR_LD_VF_AFFILIATION),
                                    instance->vf_affiliation,
                                    instance->vf_affiliation_h);
 
        if (instance->vf_affiliation_111)
-               pci_free_consistent(pdev,
+               dma_free_coherent(&pdev->dev,
                                    sizeof(struct MR_LD_VF_AFFILIATION_111),
                                    instance->vf_affiliation_111,
                                    instance->vf_affiliation_111_h);
 
        if (instance->hb_host_mem)
-               pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+               dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
                                    instance->hb_host_mem,
                                    instance->hb_host_mem_h);
 
@@ -7254,7 +7254,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
 
                /*
                 * We don't change the dma_coherent_mask, so
-                * pci_alloc_consistent only returns 32bit addresses
+                * dma_alloc_coherent only returns 32bit addresses
                 */
                if (instance->consistent_mask_64bit) {
                        kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
                get_user(user_sense_off, &cioc->sense_off))
                return -EFAULT;
 
+       if (local_sense_off != user_sense_off)
+               return -EINVAL;
+
        if (local_sense_len) {
                void __user **sense_ioc_ptr =
                        (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
index c7f95ba..f74b5ea 100644 (file)
@@ -684,8 +684,8 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
        array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
                     MAX_MSIX_QUEUES_FUSION;
 
-       fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
-                                                 &fusion->rdpq_phys);
+       fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
+                       array_size, &fusion->rdpq_phys, GFP_KERNEL);
        if (!fusion->rdpq_virt) {
                dev_err(&instance->pdev->dev,
                        "Failed from %s %d\n",  __func__, __LINE__);
@@ -813,7 +813,7 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
                dma_pool_destroy(fusion->reply_frames_desc_pool_align);
 
        if (fusion->rdpq_virt)
-               pci_free_consistent(instance->pdev,
+               dma_free_coherent(&instance->pdev->dev,
                        sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
                        fusion->rdpq_virt, fusion->rdpq_phys);
 }
@@ -2209,7 +2209,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
                cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
 
-               if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+               if (scp->sc_data_direction == DMA_FROM_DEVICE)
                        cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
                else
                        cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2238,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                cdb[31] = (u8)(num_blocks & 0xff);
 
                /* set SCSI IO EEDPFlags */
-               if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+               if (scp->sc_data_direction == DMA_FROM_DEVICE) {
                        io_request->EEDPFlags = cpu_to_le16(
                                MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2621,7 +2621,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
        scsi_buff_len = scsi_bufflen(scp);
        io_request->DataLength = cpu_to_le32(scsi_buff_len);
 
-       if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+       if (scp->sc_data_direction == DMA_FROM_DEVICE)
                io_info.isRead = 1;
 
        local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -3088,9 +3088,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
 
        io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
 
-       if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+       if (scp->sc_data_direction == DMA_TO_DEVICE)
                io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
-       else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+       else if (scp->sc_data_direction == DMA_FROM_DEVICE)
                io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
 
        io_request->SGLOffset0 =
index 82e01db..ec6940f 100644 (file)
@@ -1915,8 +1915,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
        /* We use the PCI APIs for now until the generic one gets fixed
         * enough or until we get some macio-specific versions
         */
-       dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
-                                             ms->dma_cmd_size, &dma_cmd_bus);
+       dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
+                       ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
        if (dma_cmd_space == NULL) {
                printk(KERN_ERR "mesh: can't allocate DMA table\n");
                goto out_unmap;
@@ -1974,7 +1974,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
         */
        mesh_shutdown(mdev);
        set_mesh_power(ms, 0);
-       pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+       dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
                            ms->dma_cmd_space, ms->dma_cmd_bus);
  out_unmap:
        iounmap(ms->dma);
@@ -2007,7 +2007,7 @@ static int mesh_remove(struct macio_dev *mdev)
                iounmap(ms->dma);
 
        /* Free DMA commands memory */
-       pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+       dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
                            ms->dma_cmd_space, ms->dma_cmd_bus);
 
        /* Release memory resources */
index 59d7844..2500377 100644 (file)
@@ -122,8 +122,8 @@ mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
        if (!(status & MPT3_CMD_RESET))
                issue_reset = 1;
 
-       pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
-           ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+       ioc_err(ioc, "Command %s\n",
+               issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
        _debug_dump_mf(mpi_request, sz);
 
        return issue_reset;
@@ -336,9 +336,7 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
                                return ct->chain_buffer;
                }
        }
-       pr_info(MPT3SAS_FMT
-           "Provided chain_buffer_dma address is not in the lookup list\n",
-           ioc->name);
+       ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
        return NULL;
 }
 
@@ -394,7 +392,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
                /* Get scsi_cmd using smid */
                scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
                if (scmd == NULL) {
-                       pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+                       ioc_err(ioc, "scmd is NULL\n");
                        return;
                }
 
@@ -532,11 +530,11 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
        struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
        struct pci_dev *pdev;
 
-       if ((ioc == NULL))
+       if (!ioc)
                return -1;
 
        pdev = ioc->pdev;
-       if ((pdev == NULL))
+       if (!pdev)
                return -1;
        pci_stop_and_remove_bus_device_locked(pdev);
        return 0;
@@ -566,8 +564,7 @@ _base_fault_reset_work(struct work_struct *work)
 
        doorbell = mpt3sas_base_get_iocstate(ioc, 0);
        if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
-               pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
-                   ioc->name);
+               ioc_err(ioc, "SAS host is non-operational !!!!\n");
 
                /* It may be possible that EEH recovery can resolve some of
                 * pci bus failure issues rather removing the dead ioc function
@@ -600,13 +597,11 @@ _base_fault_reset_work(struct work_struct *work)
                p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
                    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
                if (IS_ERR(p))
-                       pr_err(MPT3SAS_FMT
-                       "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
-                       ioc->name, __func__);
+                       ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+                               __func__);
                else
-                       pr_err(MPT3SAS_FMT
-                       "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
-                       ioc->name, __func__);
+                       ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+                               __func__);
                return; /* don't rearm timer */
        }
 
@@ -614,8 +609,8 @@ _base_fault_reset_work(struct work_struct *work)
 
        if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
                rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-               pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
-                   __func__, (rc == 0) ? "success" : "failed");
+               ioc_warn(ioc, "%s: hard reset: %s\n",
+                        __func__, rc == 0 ? "success" : "failed");
                doorbell = mpt3sas_base_get_iocstate(ioc, 0);
                if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
                        mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +652,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
        ioc->fault_reset_work_q =
                create_singlethread_workqueue(ioc->fault_reset_work_q_name);
        if (!ioc->fault_reset_work_q) {
-               pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
-                   ioc->name, __func__, __LINE__);
+               ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
                return;
        }
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +694,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
 void
 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
 {
-       pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
-           ioc->name, fault_code);
+       ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
 }
 
 /**
@@ -728,8 +721,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
                mpt3sas_base_fault_info(ioc , doorbell);
        else {
                writel(0xC0FFEE00, &ioc->chip->Doorbell);
-               pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
-                       ioc->name);
+               ioc_err(ioc, "Firmware is halted due to command timeout\n");
        }
 
        if (ioc->fwfault_debug == 2)
@@ -956,8 +948,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
                break;
        }
 
-       pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
-               ioc->name, desc, ioc_status, request_hdr, func_str);
+       ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+                desc, ioc_status, request_hdr, func_str);
 
        _debug_dump_mf(request_hdr, frame_sz/4);
 }
@@ -1003,9 +995,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
        {
                Mpi2EventDataSasDiscovery_t *event_data =
                    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
-               pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
-                   (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
-                   "start" : "stop");
+               ioc_info(ioc, "Discovery: (%s)",
+                        event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+                        "start" : "stop");
                if (event_data->DiscoveryStatus)
                        pr_cont(" discovery_status(0x%08x)",
                            le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1051,13 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
        {
                Mpi26EventDataPCIeEnumeration_t *event_data =
                        (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
-               pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
-                          (event_data->ReasonCode ==
-                               MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
-                               "start" : "stop");
+               ioc_info(ioc, "PCIE Enumeration: (%s)",
+                        event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
+                        "start" : "stop");
                if (event_data->EnumerationStatus)
-                       pr_info("enumeration_status(0x%08x)",
-                                  le32_to_cpu(event_data->EnumerationStatus));
-               pr_info("\n");
+                       pr_cont("enumeration_status(0x%08x)",
+                               le32_to_cpu(event_data->EnumerationStatus));
+               pr_cont("\n");
                return;
        }
        case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1068,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
        if (!desc)
                return;
 
-       pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+       ioc_info(ioc, "%s\n", desc);
 }
 
 /**
@@ -1128,11 +1119,9 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
                break;
        }
 
-       pr_warn(MPT3SAS_FMT
-               "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
-               ioc->name, log_info,
-            originator_str, sas_loginfo.dw.code,
-            sas_loginfo.dw.subcode);
+       ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+                log_info,
+                originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
 }
 
 /**
@@ -1152,8 +1141,8 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 
        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
        if (unlikely(!mpi_reply)) {
-               pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
        ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1238,9 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
                delayed_event_ack->EventContext = mpi_reply->EventContext;
                list_add_tail(&delayed_event_ack->list,
                                &ioc->delayed_event_ack_list);
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                               "DELAYED: EVENT ACK: event (0x%04x)\n",
-                               ioc->name, le16_to_cpu(mpi_reply->Event)));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
+                                   le16_to_cpu(mpi_reply->Event)));
                goto out;
        }
 
@@ -2270,7 +2259,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
        sges_left = scsi_dma_map(scmd);
        if (sges_left < 0) {
                sdev_printk(KERN_ERR, scmd->device,
-                "pci_map_sg failed: request for %d bytes!\n",
+                "scsi_dma_map failed: request for %d bytes!\n",
                 scsi_bufflen(scmd));
                return -ENOMEM;
        }
@@ -2418,7 +2407,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
        sges_left = scsi_dma_map(scmd);
        if (sges_left < 0) {
                sdev_printk(KERN_ERR, scmd->device,
-                       "pci_map_sg failed: request for %d bytes!\n",
+                       "scsi_dma_map failed: request for %d bytes!\n",
                        scsi_bufflen(scmd));
                return -ENOMEM;
        }
@@ -2563,44 +2552,41 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
 static int
 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
+       u64 required_mask, coherent_mask;
        struct sysinfo s;
-       u64 consistent_dma_mask;
 
        if (ioc->is_mcpu_endpoint)
                goto try_32bit;
 
+       required_mask = dma_get_required_mask(&pdev->dev);
+       if (sizeof(dma_addr_t) == 4 || required_mask == 32)
+               goto try_32bit;
+
        if (ioc->dma_mask)
-               consistent_dma_mask = DMA_BIT_MASK(64);
+               coherent_mask = DMA_BIT_MASK(64);
        else
-               consistent_dma_mask = DMA_BIT_MASK(32);
-
-       if (sizeof(dma_addr_t) > 4) {
-               const uint64_t required_mask =
-                   dma_get_required_mask(&pdev->dev);
-               if ((required_mask > DMA_BIT_MASK(32)) &&
-                   !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-                   !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
-                       ioc->base_add_sg_single = &_base_add_sg_single_64;
-                       ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-                       ioc->dma_mask = 64;
-                       goto out;
-               }
-       }
+               coherent_mask = DMA_BIT_MASK(32);
+
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+           dma_set_coherent_mask(&pdev->dev, coherent_mask))
+               goto try_32bit;
+
+       ioc->base_add_sg_single = &_base_add_sg_single_64;
+       ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+       ioc->dma_mask = 64;
+       goto out;
 
  try_32bit:
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-           && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-               ioc->base_add_sg_single = &_base_add_sg_single_32;
-               ioc->sge_size = sizeof(Mpi2SGESimple32_t);
-               ioc->dma_mask = 32;
-       } else
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
                return -ENODEV;
 
+       ioc->base_add_sg_single = &_base_add_sg_single_32;
+       ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+       ioc->dma_mask = 32;
  out:
        si_meminfo(&s);
-       pr_info(MPT3SAS_FMT
-               "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
-               ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+       ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+                ioc->dma_mask, convert_to_kb(s.totalram));
 
        return 0;
 }
@@ -2639,8 +2625,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
 
        base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
        if (!base) {
-               dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
-                       ioc->name));
+               dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
                return -EINVAL;
        }
 
@@ -2658,9 +2643,8 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
                pci_read_config_word(ioc->pdev, base + 2, &message_control);
                ioc->msix_vector_count = (message_control & 0x3FF) + 1;
        }
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "msix is supported, vector_count(%d)\n",
-               ioc->name, ioc->msix_vector_count));
+       dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
+                                 ioc->msix_vector_count));
        return 0;
 }
 
@@ -2702,8 +2686,8 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
 
        reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
        if (!reply_q) {
-               pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
-                   ioc->name, (int)sizeof(struct adapter_reply_queue));
+               ioc_err(ioc, "unable to allocate memory %zu!\n",
+                       sizeof(struct adapter_reply_queue));
                return -ENOMEM;
        }
        reply_q->ioc = ioc;
@@ -2719,7 +2703,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
        r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
                        IRQF_SHARED, reply_q->name, reply_q);
        if (r) {
-               pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+               pr_err("%s: unable to allocate interrupt %d!\n",
                       reply_q->name, pci_irq_vector(pdev, index));
                kfree(reply_q);
                return -EBUSY;
@@ -2761,8 +2745,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
                        const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
                                                        reply_q->msix_index);
                        if (!mask) {
-                               pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
-                                       ioc->name, reply_q->msix_index);
+                               ioc_warn(ioc, "no affinity for msi %x\n",
+                                        reply_q->msix_index);
                                continue;
                        }
 
@@ -2833,9 +2817,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        ioc->reply_queue_count = min_t(int, ioc->cpu_count,
                ioc->msix_vector_count);
 
-       printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
-         ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
-         ioc->cpu_count, max_msix_vectors);
+       ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+                ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
 
        if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
                local_max_msix_vectors = (reset_devices) ? 1 : 8;
@@ -2857,9 +2840,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
                                  irq_flags);
        if (r < 0) {
-               dfailprintk(ioc, pr_info(MPT3SAS_FMT
-                       "pci_alloc_irq_vectors failed (r=%d) !!!\n",
-                       ioc->name, r));
+               dfailprintk(ioc,
+                           ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
+                                    r));
                goto try_ioapic;
        }
 
@@ -2882,9 +2865,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
        ioc->reply_queue_count = 1;
        r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
        if (r < 0) {
-               dfailprintk(ioc, pr_info(MPT3SAS_FMT
-                       "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
-                       ioc->name, r));
+               dfailprintk(ioc,
+                           ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+                                    r));
        } else
                r = _base_request_irq(ioc, 0);
 
@@ -2900,8 +2883,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 {
        struct pci_dev *pdev = ioc->pdev;
 
-       dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
-               ioc->name, __func__));
+       dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        _base_free_irq(ioc);
        _base_disable_msix(ioc);
@@ -2939,13 +2921,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        phys_addr_t chip_phys = 0;
        struct adapter_reply_queue *reply_q;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
-           ioc->name, __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
        if (pci_enable_device_mem(pdev)) {
-               pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
-                       ioc->name);
+               ioc_warn(ioc, "pci_enable_device_mem: failed\n");
                ioc->bars = 0;
                return -ENODEV;
        }
@@ -2953,8 +2933,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 
        if (pci_request_selected_regions(pdev, ioc->bars,
            ioc->driver_name)) {
-               pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
-                       ioc->name);
+               ioc_warn(ioc, "pci_request_selected_regions: failed\n");
                ioc->bars = 0;
                r = -ENODEV;
                goto out_fail;
@@ -2967,8 +2946,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 
 
        if (_base_config_dma_addressing(ioc, pdev) != 0) {
-               pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
-                   ioc->name, pci_name(pdev));
+               ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
                r = -ENODEV;
                goto out_fail;
        }
@@ -2991,8 +2969,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        }
 
        if (ioc->chip == NULL) {
-               pr_err(MPT3SAS_FMT "unable to map adapter memory! "
-                       " or resource not found\n", ioc->name);
+               ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
                r = -EINVAL;
                goto out_fail;
        }
@@ -3026,9 +3003,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
                     ioc->combined_reply_index_count,
                     sizeof(resource_size_t *), GFP_KERNEL);
                if (!ioc->replyPostRegisterIndex) {
-                       dfailprintk(ioc, printk(MPT3SAS_FMT
-                       "allocation for reply Post Register Index failed!!!\n",
-                                                                  ioc->name));
+                       dfailprintk(ioc,
+                                   ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
                        r = -ENOMEM;
                        goto out_fail;
                }
@@ -3053,15 +3029,15 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
        }
 
        list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
-               pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
-                   reply_q->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
-                   "IO-APIC enabled"),
-                   pci_irq_vector(ioc->pdev, reply_q->msix_index));
+               pr_info("%s: %s enabled: IRQ %d\n",
+                       reply_q->name,
+                       ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
+                       pci_irq_vector(ioc->pdev, reply_q->msix_index));
 
-       pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
-           ioc->name, &chip_phys, ioc->chip, memap_sz);
-       pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
-           ioc->name, (unsigned long long)pio_chip, pio_sz);
+       ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
+                &chip_phys, ioc->chip, memap_sz);
+       ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
+                (unsigned long long)pio_chip, pio_sz);
 
        /* Save PCI configuration state for recovery from PCI AER/EEH errors */
        pci_save_state(pdev);
@@ -3176,8 +3152,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
        if (list_empty(&ioc->internal_free_list)) {
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-               pr_err(MPT3SAS_FMT "%s: smid not available\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: smid not available\n", __func__);
                return 0;
        }
 
@@ -3545,89 +3520,85 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
                case MPI2_MFGPAGE_DEVID_SAS2008:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT2SAS_INTEL_RMS2LL080_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS2LL080_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS2LL080_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS2LL040_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS2LL040_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS2LL040_BRANDING);
                                break;
                        case MPT2SAS_INTEL_SSD910_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_SSD910_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_SSD910_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                "Intel(R) Controller: Subsystem ID: 0x%X\n",
-                                ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                case MPI2_MFGPAGE_DEVID_SAS2308_2:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT2SAS_INTEL_RS25GB008_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RS25GB008_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RS25GB008_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS25JB080_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS25JB080_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS25JB080_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS25JB040_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS25JB040_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS25JB040_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS25KB080_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS25KB080_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS25KB080_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS25KB040_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS25KB040_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS25KB040_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS25LB040_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS25LB040_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS25LB040_BRANDING);
                                break;
                        case MPT2SAS_INTEL_RMS25LB080_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_INTEL_RMS25LB080_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_INTEL_RMS25LB080_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                "Intel(R) Controller: Subsystem ID: 0x%X\n",
-                                ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                case MPI25_MFGPAGE_DEVID_SAS3008:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT3SAS_INTEL_RMS3JC080_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_INTEL_RMS3JC080_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_INTEL_RMS3JC080_BRANDING);
                                break;
 
                        case MPT3SAS_INTEL_RS3GC008_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_INTEL_RS3GC008_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_INTEL_RS3GC008_BRANDING);
                                break;
                        case MPT3SAS_INTEL_RS3FC044_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_INTEL_RS3FC044_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_INTEL_RS3FC044_BRANDING);
                                break;
                        case MPT3SAS_INTEL_RS3UC080_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_INTEL_RS3UC080_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_INTEL_RS3UC080_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                "Intel(R) Controller: Subsystem ID: 0x%X\n",
-                                ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                        break;
                default:
-                       pr_info(MPT3SAS_FMT
-                        "Intel(R) Controller: Subsystem ID: 0x%X\n",
-                        ioc->name, ioc->pdev->subsystem_device);
+                       ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+                                ioc->pdev->subsystem_device);
                        break;
                }
                break;
@@ -3636,57 +3607,54 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
                case MPI2_MFGPAGE_DEVID_SAS2008:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
                                break;
                        case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
                                break;
                        case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
                                break;
                        case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
                                break;
                        case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
                                break;
                        case MPT2SAS_DELL_PERC_H200_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_PERC_H200_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_PERC_H200_BRANDING);
                                break;
                        case MPT2SAS_DELL_6GBPS_SAS_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_DELL_6GBPS_SAS_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_DELL_6GBPS_SAS_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                  "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
-                                  ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                        break;
                case MPI25_MFGPAGE_DEVID_SAS3008:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT3SAS_DELL_12G_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_DELL_12G_HBA_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_DELL_12G_HBA_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                  "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
-                                  ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                        break;
                default:
-                       pr_info(MPT3SAS_FMT
-                          "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
-                          ioc->pdev->subsystem_device);
+                       ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
+                                ioc->pdev->subsystem_device);
                        break;
                }
                break;
@@ -3695,46 +3663,42 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
                case MPI25_MFGPAGE_DEVID_SAS3008:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
                                break;
                        case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
                                break;
                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                       MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
-                                 ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                        break;
                case MPI25_MFGPAGE_DEVID_SAS3108_1:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                               MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
                                break;
                        case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                               MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
-                               );
+                               ioc_info(ioc, "%s\n",
+                                        MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
-                                ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                        break;
                default:
-                       pr_info(MPT3SAS_FMT
-                          "Cisco SAS HBA: Subsystem ID: 0x%X\n",
-                          ioc->name, ioc->pdev->subsystem_device);
+                       ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
+                                ioc->pdev->subsystem_device);
                        break;
                }
                break;
@@ -3743,43 +3707,40 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
                case MPI2_MFGPAGE_DEVID_SAS2004:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                  "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
-                                  ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                case MPI2_MFGPAGE_DEVID_SAS2308_2:
                        switch (ioc->pdev->subsystem_device) {
                        case MPT2SAS_HP_2_4_INTERNAL_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_HP_2_4_INTERNAL_BRANDING);
                                break;
                        case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
                                break;
                        case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
                                break;
                        case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
-                               pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                                   MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+                               ioc_info(ioc, "%s\n",
+                                        MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
                                break;
                        default:
-                               pr_info(MPT3SAS_FMT
-                                  "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
-                                  ioc->name, ioc->pdev->subsystem_device);
+                               ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+                                        ioc->pdev->subsystem_device);
                                break;
                        }
                default:
-                       pr_info(MPT3SAS_FMT
-                          "HP SAS HBA: Subsystem ID: 0x%X\n",
-                          ioc->name, ioc->pdev->subsystem_device);
+                       ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
+                                ioc->pdev->subsystem_device);
                        break;
                }
        default:
@@ -3806,28 +3767,25 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
        u16 smid, ioc_status;
        size_t data_length;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-                               __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
-               pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-                               ioc->name, __func__);
+               ioc_err(ioc, "%s: internal command already in use\n", __func__);
                return -EAGAIN;
        }
 
        data_length = sizeof(Mpi2FWImageHeader_t);
-       fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
-                       &fwpkg_data_dma);
+       fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+                       &fwpkg_data_dma, GFP_KERNEL);
        if (!fwpkg_data) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                               ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -ENOMEM;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                               ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                r = -EAGAIN;
                goto out;
        }
@@ -3846,11 +3804,9 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
        /* Wait for 15 seconds */
        wait_for_completion_timeout(&ioc->base_cmds.done,
                        FW_IMG_HDR_READ_TIMEOUT*HZ);
-       pr_info(MPT3SAS_FMT "%s: complete\n",
-                       ioc->name, __func__);
+       ioc_info(ioc, "%s: complete\n", __func__);
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                               ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                                sizeof(Mpi25FWUploadRequest_t)/4);
                r = -ETIME;
@@ -3864,13 +3820,11 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
                        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
                                FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
                                if (FWImgHdr->PackageVersion.Word) {
-                                       pr_info(MPT3SAS_FMT "FW Package Version"
-                                       "(%02d.%02d.%02d.%02d)\n",
-                                       ioc->name,
-                                       FWImgHdr->PackageVersion.Struct.Major,
-                                       FWImgHdr->PackageVersion.Struct.Minor,
-                                       FWImgHdr->PackageVersion.Struct.Unit,
-                                       FWImgHdr->PackageVersion.Struct.Dev);
+                                       ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
+                                                FWImgHdr->PackageVersion.Struct.Major,
+                                                FWImgHdr->PackageVersion.Struct.Minor,
+                                                FWImgHdr->PackageVersion.Struct.Unit,
+                                                FWImgHdr->PackageVersion.Struct.Dev);
                                }
                        } else {
                                _debug_dump_mf(&mpi_reply,
@@ -3881,7 +3835,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 out:
        if (fwpkg_data)
-               pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+               dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
                                fwpkg_data_dma);
        return r;
 }
@@ -3900,18 +3854,17 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
 
        bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
        strncpy(desc, ioc->manu_pg0.ChipName, 16);
-       pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
-          "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
-           ioc->name, desc,
-          (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
-          (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
-          (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
-          ioc->facts.FWVersion.Word & 0x000000FF,
-          ioc->pdev->revision,
-          (bios_version & 0xFF000000) >> 24,
-          (bios_version & 0x00FF0000) >> 16,
-          (bios_version & 0x0000FF00) >> 8,
-           bios_version & 0x000000FF);
+       ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+                desc,
+                (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+                (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+                (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+                ioc->facts.FWVersion.Word & 0x000000FF,
+                ioc->pdev->revision,
+                (bios_version & 0xFF000000) >> 24,
+                (bios_version & 0x00FF0000) >> 16,
+                (bios_version & 0x0000FF00) >> 8,
+                bios_version & 0x000000FF);
 
        _base_display_OEMs_branding(ioc);
 
@@ -3920,82 +3873,81 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
                i++;
        }
 
-       pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+       ioc_info(ioc, "Protocol=(");
 
        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
-               pr_info("Initiator");
+               pr_cont("Initiator");
                i++;
        }
 
        if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
-               pr_info("%sTarget", i ? "," : "");
+               pr_cont("%sTarget", i ? "," : "");
                i++;
        }
 
        i = 0;
-       pr_info("), ");
-       pr_info("Capabilities=(");
+       pr_cont("), Capabilities=(");
 
        if (!ioc->hide_ir_msg) {
                if (ioc->facts.IOCCapabilities &
                    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
-                       pr_info("Raid");
+                       pr_cont("Raid");
                        i++;
                }
        }
 
        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
-               pr_info("%sTLR", i ? "," : "");
+               pr_cont("%sTLR", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
-               pr_info("%sMulticast", i ? "," : "");
+               pr_cont("%sMulticast", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities &
            MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
-               pr_info("%sBIDI Target", i ? "," : "");
+               pr_cont("%sBIDI Target", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
-               pr_info("%sEEDP", i ? "," : "");
+               pr_cont("%sEEDP", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities &
            MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
-               pr_info("%sSnapshot Buffer", i ? "," : "");
+               pr_cont("%sSnapshot Buffer", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities &
            MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
-               pr_info("%sDiag Trace Buffer", i ? "," : "");
+               pr_cont("%sDiag Trace Buffer", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities &
            MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
-               pr_info("%sDiag Extended Buffer", i ? "," : "");
+               pr_cont("%sDiag Extended Buffer", i ? "," : "");
                i++;
        }
 
        if (ioc->facts.IOCCapabilities &
            MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
-               pr_info("%sTask Set Full", i ? "," : "");
+               pr_cont("%sTask Set Full", i ? "," : "");
                i++;
        }
 
        iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
        if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
-               pr_info("%sNCQ", i ? "," : "");
+               pr_cont("%sNCQ", i ? "," : "");
                i++;
        }
 
-       pr_info(")\n");
+       pr_cont(")\n");
 }
 
 /**
@@ -4028,21 +3980,21 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
            sizeof(Mpi2SasIOUnit1PhyData_t));
        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg1) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
            sas_iounit_pg1, sz))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
 
@@ -4074,11 +4026,11 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
                else
                        dmd_new =
                    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
-               pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
-                       ioc->name, dmd_orignal, dmd_new);
-               pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
-                       ioc->name, io_missing_delay_original,
-                   io_missing_delay);
+               ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
+                        dmd_orignal, dmd_new);
+               ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
+                        io_missing_delay_original,
+                        io_missing_delay);
                ioc->device_missing_delay = dmd_new;
                ioc->io_missing_delay = io_missing_delay;
        }
@@ -4189,33 +4141,32 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        struct chain_tracker *ct;
        struct reply_post_struct *rps;
 
-       dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        if (ioc->request) {
-               pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+               dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
                    ioc->request,  ioc->request_dma);
-               dexitprintk(ioc, pr_info(MPT3SAS_FMT
-                       "request_pool(0x%p): free\n",
-                       ioc->name, ioc->request));
+               dexitprintk(ioc,
+                           ioc_info(ioc, "request_pool(0x%p): free\n",
+                                    ioc->request));
                ioc->request = NULL;
        }
 
        if (ioc->sense) {
                dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
                dma_pool_destroy(ioc->sense_dma_pool);
-               dexitprintk(ioc, pr_info(MPT3SAS_FMT
-                       "sense_pool(0x%p): free\n",
-                       ioc->name, ioc->sense));
+               dexitprintk(ioc,
+                           ioc_info(ioc, "sense_pool(0x%p): free\n",
+                                    ioc->sense));
                ioc->sense = NULL;
        }
 
        if (ioc->reply) {
                dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
                dma_pool_destroy(ioc->reply_dma_pool);
-               dexitprintk(ioc, pr_info(MPT3SAS_FMT
-                       "reply_pool(0x%p): free\n",
-                       ioc->name, ioc->reply));
+               dexitprintk(ioc,
+                           ioc_info(ioc, "reply_pool(0x%p): free\n",
+                                    ioc->reply));
                ioc->reply = NULL;
        }
 
@@ -4223,9 +4174,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
                    ioc->reply_free_dma);
                dma_pool_destroy(ioc->reply_free_dma_pool);
-               dexitprintk(ioc, pr_info(MPT3SAS_FMT
-                       "reply_free_pool(0x%p): free\n",
-                       ioc->name, ioc->reply_free));
+               dexitprintk(ioc,
+                           ioc_info(ioc, "reply_free_pool(0x%p): free\n",
+                                    ioc->reply_free));
                ioc->reply_free = NULL;
        }
 
@@ -4237,9 +4188,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                                    ioc->reply_post_free_dma_pool,
                                    rps->reply_post_free,
                                    rps->reply_post_free_dma);
-                               dexitprintk(ioc, pr_info(MPT3SAS_FMT
-                                   "reply_post_free_pool(0x%p): free\n",
-                                   ioc->name, rps->reply_post_free));
+                               dexitprintk(ioc,
+                                           ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
+                                                    rps->reply_post_free));
                                rps->reply_post_free = NULL;
                        }
                } while (ioc->rdpq_array_enable &&
@@ -4267,10 +4218,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        }
 
        if (ioc->config_page) {
-               dexitprintk(ioc, pr_info(MPT3SAS_FMT
-                   "config_page(0x%p): free\n", ioc->name,
-                   ioc->config_page));
-               pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+               dexitprintk(ioc,
+                           ioc_info(ioc, "config_page(0x%p): free\n",
+                                    ioc->config_page));
+               dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
                    ioc->config_page, ioc->config_page_dma);
        }
 
@@ -4338,8 +4289,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        int i, j;
        struct chain_tracker *ct;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
 
        retry_sz = 0;
@@ -4368,10 +4318,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
                        sg_tablesize = min_t(unsigned short, sg_tablesize,
                                        SG_MAX_SEGMENTS);
-                       pr_warn(MPT3SAS_FMT
-                               "sg_tablesize(%u) is bigger than kernel "
-                               "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
-                               sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+                       ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
+                                sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
                }
                ioc->shost->sg_tablesize = sg_tablesize;
        }
@@ -4381,9 +4329,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
                if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
                                INTERNAL_SCSIIO_CMDS_COUNT)) {
-                       pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
-                           Credits, it has just %d number of credits\n",
-                           ioc->name, facts->RequestCredit);
+                       ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
+                               facts->RequestCredit);
                        return -ENOMEM;
                }
                ioc->internal_depth = 10;
@@ -4482,11 +4429,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
        }
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
-           "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
-           "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
-           ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
-           ioc->chains_needed_per_io));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
+                            ioc->max_sges_in_main_message,
+                            ioc->max_sges_in_chain_message,
+                            ioc->shost->sg_tablesize,
+                            ioc->chains_needed_per_io));
 
        /* reply post queue, 16 byte align */
        reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +4449,40 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
            sizeof(struct reply_post_struct), GFP_KERNEL);
 
        if (!ioc->reply_post) {
-               pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
-                       ioc->name);
+               ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
                goto out;
        }
        ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
            &ioc->pdev->dev, sz, 16, 0);
        if (!ioc->reply_post_free_dma_pool) {
-               pr_err(MPT3SAS_FMT
-                "reply_post_free pool: dma_pool_create failed\n",
-                ioc->name);
+               ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
                goto out;
        }
        i = 0;
        do {
                ioc->reply_post[i].reply_post_free =
-                   dma_pool_alloc(ioc->reply_post_free_dma_pool,
+                   dma_pool_zalloc(ioc->reply_post_free_dma_pool,
                    GFP_KERNEL,
                    &ioc->reply_post[i].reply_post_free_dma);
                if (!ioc->reply_post[i].reply_post_free) {
-                       pr_err(MPT3SAS_FMT
-                       "reply_post_free pool: dma_pool_alloc failed\n",
-                       ioc->name);
+                       ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
                        goto out;
                }
-               memset(ioc->reply_post[i].reply_post_free, 0, sz);
-               dinitprintk(ioc, pr_info(MPT3SAS_FMT
-                   "reply post free pool (0x%p): depth(%d),"
-                   "element_size(%d), pool_size(%d kB)\n", ioc->name,
-                   ioc->reply_post[i].reply_post_free,
-                   ioc->reply_post_queue_depth, 8, sz/1024));
-               dinitprintk(ioc, pr_info(MPT3SAS_FMT
-                   "reply_post_free_dma = (0x%llx)\n", ioc->name,
-                   (unsigned long long)
-                   ioc->reply_post[i].reply_post_free_dma));
+               dinitprintk(ioc,
+                           ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+                                    ioc->reply_post[i].reply_post_free,
+                                    ioc->reply_post_queue_depth,
+                                    8, sz / 1024));
+               dinitprintk(ioc,
+                           ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
+                                    (u64)ioc->reply_post[i].reply_post_free_dma));
                total_sz += sz;
        } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
 
        if (ioc->dma_mask == 64) {
                if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
-                       pr_warn(MPT3SAS_FMT
-                           "no suitable consistent DMA mask for %s\n",
-                           ioc->name, pci_name(ioc->pdev));
+                       ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+                                pci_name(ioc->pdev));
                        goto out;
                }
        }
@@ -4554,9 +4494,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
         * with some internal commands that could be outstanding
         */
        ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "scsi host: can_queue depth (%d)\n",
-               ioc->name, ioc->shost->can_queue));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
+                            ioc->shost->can_queue));
 
 
        /* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +4512,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        sz += (ioc->internal_depth * ioc->request_sz);
 
        ioc->request_dma_sz = sz;
-       ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+       ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
+                       &ioc->request_dma, GFP_KERNEL);
        if (!ioc->request) {
-               pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
-                   "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
-                   "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
-                   ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+               ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
+                       ioc->hba_queue_depth, ioc->chains_needed_per_io,
+                       ioc->request_sz, sz / 1024);
                if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
                        goto out;
                retry_sz = 64;
@@ -4587,10 +4527,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        }
 
        if (retry_sz)
-               pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
-                   "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
-                   "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
-                   ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+               ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
+                       ioc->hba_queue_depth, ioc->chains_needed_per_io,
+                       ioc->request_sz, sz / 1024);
 
        /* hi-priority queue */
        ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +4543,26 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
            ioc->request_sz);
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-               ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
-           (ioc->hba_queue_depth * ioc->request_sz)/1024));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+                            ioc->request, ioc->hba_queue_depth,
+                            ioc->request_sz,
+                            (ioc->hba_queue_depth * ioc->request_sz) / 1024));
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
-           ioc->name, (unsigned long long) ioc->request_dma));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "request pool: dma(0x%llx)\n",
+                            (unsigned long long)ioc->request_dma));
        total_sz += sz;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
-               ioc->name, ioc->request, ioc->scsiio_depth));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
+                            ioc->request, ioc->scsiio_depth));
 
        ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
        sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
        ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
        if (!ioc->chain_lookup) {
-               pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
-                               "failed\n", ioc->name);
+               ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
                goto out;
        }
 
@@ -4629,8 +4570,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        for (i = 0; i < ioc->scsiio_depth; i++) {
                ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
                if (!ioc->chain_lookup[i].chains_per_smid) {
-                       pr_err(MPT3SAS_FMT "chain_lookup: "
-                                       " kzalloc failed\n", ioc->name);
+                       ioc_err(ioc, "chain_lookup: kzalloc failed\n");
                        goto out;
                }
        }
@@ -4639,29 +4579,27 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
            sizeof(struct request_tracker), GFP_KERNEL);
        if (!ioc->hpr_lookup) {
-               pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
-                   ioc->name);
+               ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
                goto out;
        }
        ioc->hi_priority_smid = ioc->scsiio_depth + 1;
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "hi_priority(0x%p): depth(%d), start smid(%d)\n",
-               ioc->name, ioc->hi_priority,
-           ioc->hi_priority_depth, ioc->hi_priority_smid));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+                            ioc->hi_priority,
+                            ioc->hi_priority_depth, ioc->hi_priority_smid));
 
        /* initialize internal queue smid's */
        ioc->internal_lookup = kcalloc(ioc->internal_depth,
            sizeof(struct request_tracker), GFP_KERNEL);
        if (!ioc->internal_lookup) {
-               pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
-                   ioc->name);
+               ioc_err(ioc, "internal_lookup: kcalloc failed\n");
                goto out;
        }
        ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "internal(0x%p): depth(%d), start smid(%d)\n",
-               ioc->name, ioc->internal,
-           ioc->internal_depth, ioc->internal_smid));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
+                            ioc->internal,
+                            ioc->internal_depth, ioc->internal_smid));
        /*
         * The number of NVMe page sized blocks needed is:
         *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +4623,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
                ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
                if (!ioc->pcie_sg_lookup) {
-                       pr_info(MPT3SAS_FMT
-                           "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+                       ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
                        goto out;
                }
                sz = nvme_blocks_needed * ioc->page_size;
                ioc->pcie_sgl_dma_pool =
                        dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
                if (!ioc->pcie_sgl_dma_pool) {
-                       pr_info(MPT3SAS_FMT
-                           "PCIe SGL pool: dma_pool_create failed\n",
-                           ioc->name);
+                       ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
                        goto out;
                }
 
@@ -4708,9 +4643,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                                ioc->pcie_sgl_dma_pool, GFP_KERNEL,
                                &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
                        if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
-                               pr_info(MPT3SAS_FMT
-                                   "PCIe SGL pool: dma_pool_alloc failed\n",
-                                   ioc->name);
+                               ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
                                goto out;
                        }
                        for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +4657,20 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                        }
                }
 
-               dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
-                       "element_size(%d), pool_size(%d kB)\n", ioc->name,
-                       ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
-               dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
-                   "fit in a PRP page(%d)\n", ioc->name,
-                   ioc->chains_per_prp_buffer));
+               dinitprintk(ioc,
+                           ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+                                    ioc->scsiio_depth, sz,
+                                    (sz * ioc->scsiio_depth) / 1024));
+               dinitprintk(ioc,
+                           ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+                                    ioc->chains_per_prp_buffer));
                total_sz += sz * ioc->scsiio_depth;
        }
 
        ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
            ioc->chain_segment_sz, 16, 0);
        if (!ioc->chain_dma_pool) {
-               pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
-                       ioc->name);
+               ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
                goto out;
        }
        for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +4681,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                                        ioc->chain_dma_pool, GFP_KERNEL,
                                        &ct->chain_buffer_dma);
                        if (!ct->chain_buffer) {
-                               pr_err(MPT3SAS_FMT "chain_lookup: "
-                               " pci_pool_alloc failed\n", ioc->name);
+                               ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
                                _base_release_memory_pools(ioc);
                                goto out;
                        }
@@ -4757,25 +4689,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                total_sz += ioc->chain_segment_sz;
        }
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
-               ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
-               ((ioc->chain_depth *  ioc->chain_segment_sz))/1024));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+                            ioc->chain_depth, ioc->chain_segment_sz,
+                            (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
 
        /* sense buffers, 4 byte align */
        sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
        ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
                                              4, 0);
        if (!ioc->sense_dma_pool) {
-               pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
-                   ioc->name);
+               ioc_err(ioc, "sense pool: dma_pool_create failed\n");
                goto out;
        }
        ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
            &ioc->sense_dma);
        if (!ioc->sense) {
-               pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
-                   ioc->name);
+               ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
                goto out;
        }
        /* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +4727,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                        dma_pool_create("sense pool", &ioc->pdev->dev, sz,
                                                roundup_pow_of_two(sz), 0);
                if (!ioc->sense_dma_pool) {
-                       pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
-                                       ioc->name);
+                       ioc_err(ioc, "sense pool: pci_pool_create failed\n");
                        goto out;
                }
                ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
                                &ioc->sense_dma);
                if (!ioc->sense) {
-                       pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
-                                       ioc->name);
+                       ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
                        goto out;
                }
        }
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-           "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
-           "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
-           SCSI_SENSE_BUFFERSIZE, sz/1024));
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
-           ioc->name, (unsigned long long)ioc->sense_dma));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+                            ioc->sense, ioc->scsiio_depth,
+                            SCSI_SENSE_BUFFERSIZE, sz / 1024));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "sense_dma(0x%llx)\n",
+                            (unsigned long long)ioc->sense_dma));
        total_sz += sz;
 
        /* reply pool, 4 byte align */
@@ -4822,25 +4751,24 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
                                              4, 0);
        if (!ioc->reply_dma_pool) {
-               pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
-                   ioc->name);
+               ioc_err(ioc, "reply pool: dma_pool_create failed\n");
                goto out;
        }
        ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
            &ioc->reply_dma);
        if (!ioc->reply) {
-               pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
-                   ioc->name);
+               ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
                goto out;
        }
        ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
        ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
-               ioc->name, ioc->reply,
-           ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
-           ioc->name, (unsigned long long)ioc->reply_dma));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+                            ioc->reply, ioc->reply_free_queue_depth,
+                            ioc->reply_sz, sz / 1024));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "reply_dma(0x%llx)\n",
+                            (unsigned long long)ioc->reply_dma));
        total_sz += sz;
 
        /* reply free queue, 16 byte align */
@@ -4848,24 +4776,22 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
        ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
            &ioc->pdev->dev, sz, 16, 0);
        if (!ioc->reply_free_dma_pool) {
-               pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
-                       ioc->name);
+               ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
                goto out;
        }
-       ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
+       ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
            &ioc->reply_free_dma);
        if (!ioc->reply_free) {
-               pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
-                       ioc->name);
+               ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
                goto out;
        }
-       memset(ioc->reply_free, 0, sz);
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
-           "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
-           ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "reply_free_dma (0x%llx)\n",
-               ioc->name, (unsigned long long)ioc->reply_free_dma));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+                            ioc->reply_free, ioc->reply_free_queue_depth,
+                            4, sz / 1024));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "reply_free_dma (0x%llx)\n",
+                            (unsigned long long)ioc->reply_free_dma));
        total_sz += sz;
 
        if (ioc->rdpq_array_enable) {
@@ -4876,8 +4802,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
                if (!ioc->reply_post_free_array_dma_pool) {
                        dinitprintk(ioc,
-                           pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
-                           "dma_pool_create failed\n", ioc->name));
+                                   ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
                        goto out;
                }
                ioc->reply_post_free_array =
@@ -4885,34 +4810,31 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                    GFP_KERNEL, &ioc->reply_post_free_array_dma);
                if (!ioc->reply_post_free_array) {
                        dinitprintk(ioc,
-                           pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
-                           "dma_pool_alloc failed\n", ioc->name));
+                                   ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
                        goto out;
                }
        }
        ioc->config_page_sz = 512;
-       ioc->config_page = pci_alloc_consistent(ioc->pdev,
-           ioc->config_page_sz, &ioc->config_page_dma);
+       ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
+                       ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
        if (!ioc->config_page) {
-               pr_err(MPT3SAS_FMT
-                       "config page: dma_pool_alloc failed\n",
-                       ioc->name);
+               ioc_err(ioc, "config page: dma_pool_alloc failed\n");
                goto out;
        }
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "config page(0x%p): size(%d)\n",
-               ioc->name, ioc->config_page, ioc->config_page_sz));
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
-               ioc->name, (unsigned long long)ioc->config_page_dma));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "config page(0x%p): size(%d)\n",
+                            ioc->config_page, ioc->config_page_sz));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "config_page_dma(0x%llx)\n",
+                            (unsigned long long)ioc->config_page_dma));
        total_sz += ioc->config_page_sz;
 
-       pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
-           ioc->name, total_sz/1024);
-       pr_info(MPT3SAS_FMT
-               "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
-           ioc->name, ioc->shost->can_queue, facts->RequestCredit);
-       pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
-           ioc->name, ioc->shost->sg_tablesize);
+       ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
+                total_sz / 1024);
+       ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+                ioc->shost->can_queue, facts->RequestCredit);
+       ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
+                ioc->shost->sg_tablesize);
        return 0;
 
  out:
@@ -4990,9 +4912,9 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
        do {
                int_status = readl(&ioc->chip->HostInterruptStatus);
                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-                       dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                               "%s: successful count(%d), timeout(%d)\n",
-                               ioc->name, __func__, count, timeout));
+                       dhsprintk(ioc,
+                                 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+                                          __func__, count, timeout));
                        return 0;
                }
 
@@ -5000,9 +4922,8 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
                count++;
        } while (--cntdn);
 
-       pr_err(MPT3SAS_FMT
-               "%s: failed due to timeout count(%d), int_status(%x)!\n",
-               ioc->name, __func__, count, int_status);
+       ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+               __func__, count, int_status);
        return -EFAULT;
 }
 
@@ -5017,9 +4938,9 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
        do {
                int_status = readl(&ioc->chip->HostInterruptStatus);
                if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
-                       dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                               "%s: successful count(%d), timeout(%d)\n",
-                               ioc->name, __func__, count, timeout));
+                       dhsprintk(ioc,
+                                 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+                                          __func__, count, timeout));
                        return 0;
                }
 
@@ -5027,9 +4948,8 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
                count++;
        } while (--cntdn);
 
-       pr_err(MPT3SAS_FMT
-               "%s: failed due to timeout count(%d), int_status(%x)!\n",
-               ioc->name, __func__, count, int_status);
+       ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+               __func__, count, int_status);
        return -EFAULT;
 
 }
@@ -5056,9 +4976,9 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
        do {
                int_status = readl(&ioc->chip->HostInterruptStatus);
                if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
-                       dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                               "%s: successful count(%d), timeout(%d)\n",
-                               ioc->name, __func__, count, timeout));
+                       dhsprintk(ioc,
+                                 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+                                          __func__, count, timeout));
                        return 0;
                } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
                        doorbell = readl(&ioc->chip->Doorbell);
@@ -5075,9 +4995,8 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
        } while (--cntdn);
 
  out:
-       pr_err(MPT3SAS_FMT
-        "%s: failed due to timeout count(%d), int_status(%x)!\n",
-        ioc->name, __func__, count, int_status);
+       ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+               __func__, count, int_status);
        return -EFAULT;
 }
 
@@ -5099,9 +5018,9 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
        do {
                doorbell_reg = readl(&ioc->chip->Doorbell);
                if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
-                       dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                               "%s: successful count(%d), timeout(%d)\n",
-                               ioc->name, __func__, count, timeout));
+                       dhsprintk(ioc,
+                                 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+                                          __func__, count, timeout));
                        return 0;
                }
 
@@ -5109,9 +5028,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
                count++;
        } while (--cntdn);
 
-       pr_err(MPT3SAS_FMT
-               "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
-               ioc->name, __func__, count, doorbell_reg);
+       ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+               __func__, count, doorbell_reg);
        return -EFAULT;
 }
 
@@ -5130,8 +5048,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
        int r = 0;
 
        if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
-               pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: unknown reset_type\n", __func__);
                return -EFAULT;
        }
 
@@ -5139,7 +5056,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
           MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
                return -EFAULT;
 
-       pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+       ioc_info(ioc, "sending message unit reset !!\n");
 
        writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
            &ioc->chip->Doorbell);
@@ -5149,15 +5066,14 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
        }
        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
        if (ioc_state) {
-               pr_err(MPT3SAS_FMT
-                       "%s: failed going to ready state (ioc_state=0x%x)\n",
-                       ioc->name, __func__, ioc_state);
+               ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+                       __func__, ioc_state);
                r = -EFAULT;
                goto out;
        }
  out:
-       pr_info(MPT3SAS_FMT "message unit reset: %s\n",
-           ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+       ioc_info(ioc, "message unit reset: %s\n",
+                r == 0 ? "SUCCESS" : "FAILED");
        return r;
 }
 
@@ -5183,9 +5099,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        /* make sure doorbell is not in use */
        if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
-               pr_err(MPT3SAS_FMT
-                       "doorbell is in use (line=%d)\n",
-                       ioc->name, __LINE__);
+               ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
                return -EFAULT;
        }
 
@@ -5200,17 +5114,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
            &ioc->chip->Doorbell);
 
        if ((_base_spin_on_doorbell_int(ioc, 5))) {
-               pr_err(MPT3SAS_FMT
-                       "doorbell handshake int failed (line=%d)\n",
-                       ioc->name, __LINE__);
+               ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+                       __LINE__);
                return -EFAULT;
        }
        writel(0, &ioc->chip->HostInterruptStatus);
 
        if ((_base_wait_for_doorbell_ack(ioc, 5))) {
-               pr_err(MPT3SAS_FMT
-                       "doorbell handshake ack failed (line=%d)\n",
-                       ioc->name, __LINE__);
+               ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
+                       __LINE__);
                return -EFAULT;
        }
 
@@ -5222,17 +5134,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
 
        if (failed) {
-               pr_err(MPT3SAS_FMT
-                       "doorbell handshake sending request failed (line=%d)\n",
-                       ioc->name, __LINE__);
+               ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
+                       __LINE__);
                return -EFAULT;
        }
 
        /* now wait for the reply */
        if ((_base_wait_for_doorbell_int(ioc, timeout))) {
-               pr_err(MPT3SAS_FMT
-                       "doorbell handshake int failed (line=%d)\n",
-                       ioc->name, __LINE__);
+               ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+                       __LINE__);
                return -EFAULT;
        }
 
@@ -5241,9 +5151,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
        if ((_base_wait_for_doorbell_int(ioc, 5))) {
-               pr_err(MPT3SAS_FMT
-                       "doorbell handshake int failed (line=%d)\n",
-                       ioc->name, __LINE__);
+               ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+                       __LINE__);
                return -EFAULT;
        }
        reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
@@ -5252,9 +5161,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        for (i = 2; i < default_reply->MsgLength * 2; i++)  {
                if ((_base_wait_for_doorbell_int(ioc, 5))) {
-                       pr_err(MPT3SAS_FMT
-                               "doorbell handshake int failed (line=%d)\n",
-                               ioc->name, __LINE__);
+                       ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+                               __LINE__);
                        return -EFAULT;
                }
                if (i >=  reply_bytes/2) /* overflow case */
@@ -5267,8 +5175,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        _base_wait_for_doorbell_int(ioc, 5);
        if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
-               dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                       "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+               dhsprintk(ioc,
+                         ioc_info(ioc, "doorbell is in use (line=%d)\n",
+                                  __LINE__));
        }
        writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5308,14 +5217,12 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
        void *request;
        u16 wait_state_count;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        mutex_lock(&ioc->base_cmds.mutex);
 
        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: base_cmd in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -5324,23 +5231,20 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        rc = -EFAULT;
                        goto out;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name, __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -5408,14 +5312,12 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
        void *request;
        u16 wait_state_count;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        mutex_lock(&ioc->base_cmds.mutex);
 
        if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: base_cmd in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -5424,24 +5326,20 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        rc = -EFAULT;
                        goto out;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name,
-                   __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -5495,8 +5393,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
        struct mpt3sas_port_facts *pfacts;
        int mpi_reply_sz, mpi_request_sz, r;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
        mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5404,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
        if (r != 0) {
-               pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-                   ioc->name, __func__, r);
+               ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
                return r;
        }
 
@@ -5536,26 +5432,26 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
        u32 ioc_state;
        int rc;
 
-       dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        if (ioc->pci_error_recovery) {
-               dfailprintk(ioc, printk(MPT3SAS_FMT
-                   "%s: host in pci error recovery\n", ioc->name, __func__));
+               dfailprintk(ioc,
+                           ioc_info(ioc, "%s: host in pci error recovery\n",
+                                    __func__));
                return -EFAULT;
        }
 
        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-       dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
-           ioc->name, __func__, ioc_state));
+       dhsprintk(ioc,
+                 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+                          __func__, ioc_state));
 
        if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
            (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
                return 0;
 
        if (ioc_state & MPI2_DOORBELL_USED) {
-               dhsprintk(ioc, printk(MPT3SAS_FMT
-                   "unexpected doorbell active!\n", ioc->name));
+               dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
                goto issue_diag_reset;
        }
 
@@ -5567,9 +5463,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
 
        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
        if (ioc_state) {
-               dfailprintk(ioc, printk(MPT3SAS_FMT
-                   "%s: failed going to ready state (ioc_state=0x%x)\n",
-                   ioc->name, __func__, ioc_state));
+               dfailprintk(ioc,
+                           ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+                                    __func__, ioc_state));
                return -EFAULT;
        }
 
@@ -5592,14 +5488,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
        struct mpt3sas_facts *facts;
        int mpi_reply_sz, mpi_request_sz, r;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        r = _base_wait_for_iocstate(ioc, 10);
        if (r) {
-               dfailprintk(ioc, printk(MPT3SAS_FMT
-                   "%s: failed getting to correct state\n",
-                   ioc->name, __func__));
+               dfailprintk(ioc,
+                           ioc_info(ioc, "%s: failed getting to correct state\n",
+                                    __func__));
                return r;
        }
        mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +5505,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
            (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
        if (r != 0) {
-               pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-                   ioc->name, __func__, r);
+               ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
                return r;
        }
 
@@ -5663,20 +5557,20 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
         */
        ioc->page_size = 1 << facts->CurrentHostPageSize;
        if (ioc->page_size == 1) {
-               pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
-                       "default host page size to 4k\n", ioc->name);
+               ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
                ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
        }
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
-               ioc->name, facts->CurrentHostPageSize));
-
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "hba queue depth(%d), max chains per io(%d)\n",
-               ioc->name, facts->RequestCredit,
-           facts->MaxChainDepth));
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-               "request frame size(%d), reply frame size(%d)\n", ioc->name,
-           facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "CurrentHostPageSize(%d)\n",
+                            facts->CurrentHostPageSize));
+
+       dinitprintk(ioc,
+                   ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
+                            facts->RequestCredit, facts->MaxChainDepth));
+       dinitprintk(ioc,
+                   ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
+                            facts->IOCRequestFrameSize * 4,
+                            facts->ReplyFrameSize * 4));
        return 0;
 }
 
@@ -5696,8 +5590,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
        u16 ioc_status;
        u32 reply_post_free_array_sz = 0;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
        mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +5656,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
            sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
 
        if (r != 0) {
-               pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
-                   ioc->name, __func__, r);
+               ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
                return r;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
            mpi_reply.IOCLogInfo) {
-               pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+               ioc_err(ioc, "%s: failed\n", __func__);
                r = -EIO;
        }
 
@@ -5842,18 +5734,16 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
        u16 smid;
        u16 ioc_status;
 
-       pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+       ioc_info(ioc, "sending port enable !!\n");
 
        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
-               pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: internal command already in use\n", __func__);
                return -EAGAIN;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                return -EAGAIN;
        }
 
@@ -5867,8 +5757,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
        if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2PortEnableRequest_t)/4);
                if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +5770,15 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
        mpi_reply = ioc->port_enable_cmds.reply;
        ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
-                   ioc->name, __func__, ioc_status);
+               ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
+                       __func__, ioc_status);
                r = -EFAULT;
                goto out;
        }
 
  out:
        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
-       pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
-           "SUCCESS" : "FAILED"));
+       ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
        return r;
 }
 
@@ -5906,18 +5794,16 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
        Mpi2PortEnableRequest_t *mpi_request;
        u16 smid;
 
-       pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+       ioc_info(ioc, "sending port enable !!\n");
 
        if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
-               pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: internal command already in use\n", __func__);
                return -EAGAIN;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                return -EAGAIN;
        }
 
@@ -6020,19 +5906,16 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
        int r = 0;
        int i;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
-               pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: internal command already in use\n", __func__);
                return -EAGAIN;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                return -EAGAIN;
        }
        ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6049,8 +5932,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
        if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2EventNotificationRequest_t)/4);
                if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +5940,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
                else
                        r = -ETIME;
        } else
-               dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
-                   ioc->name, __func__));
+               dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
        ioc->base_cmds.status = MPT3_CMD_NOT_USED;
        return r;
 }
@@ -6115,18 +5996,16 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
        u32 count;
        u32 hcb_size;
 
-       pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+       ioc_info(ioc, "sending diag reset !!\n");
 
-       drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
-           ioc->name));
+       drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
 
        count = 0;
        do {
                /* Write magic sequence to WriteSequence register
                 * Loop until in diagnostic mode
                 */
-               drsprintk(ioc, pr_info(MPT3SAS_FMT
-                       "write magic sequence\n", ioc->name));
+               drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
                writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
                writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
                writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6142,16 +6021,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
                        goto out;
 
                host_diagnostic = readl(&ioc->chip->HostDiagnostic);
-               drsprintk(ioc, pr_info(MPT3SAS_FMT
-                       "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
-                   ioc->name, count, host_diagnostic));
+               drsprintk(ioc,
+                         ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+                                  count, host_diagnostic));
 
        } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
 
        hcb_size = readl(&ioc->chip->HCBSize);
 
-       drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
-           ioc->name));
+       drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
        writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
             &ioc->chip->HostDiagnostic);
 
@@ -6174,43 +6052,38 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 
        if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
 
-               drsprintk(ioc, pr_info(MPT3SAS_FMT
-               "restart the adapter assuming the HCB Address points to good F/W\n",
-                   ioc->name));
+               drsprintk(ioc,
+                         ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
                host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
                host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
                writel(host_diagnostic, &ioc->chip->HostDiagnostic);
 
-               drsprintk(ioc, pr_info(MPT3SAS_FMT
-                   "re-enable the HCDW\n", ioc->name));
+               drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
                writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
                    &ioc->chip->HCBSize);
        }
 
-       drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
-           ioc->name));
+       drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
        writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
            &ioc->chip->HostDiagnostic);
 
-       drsprintk(ioc, pr_info(MPT3SAS_FMT
-               "disable writes to the diagnostic register\n", ioc->name));
+       drsprintk(ioc,
+                 ioc_info(ioc, "disable writes to the diagnostic register\n"));
        writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
 
-       drsprintk(ioc, pr_info(MPT3SAS_FMT
-               "Wait for FW to go to the READY state\n", ioc->name));
+       drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
        ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
        if (ioc_state) {
-               pr_err(MPT3SAS_FMT
-                       "%s: failed going to ready state (ioc_state=0x%x)\n",
-                       ioc->name, __func__, ioc_state);
+               ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+                       __func__, ioc_state);
                goto out;
        }
 
-       pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+       ioc_info(ioc, "diag reset: SUCCESS\n");
        return 0;
 
  out:
-       pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+       ioc_err(ioc, "diag reset: FAILED\n");
        return -EFAULT;
 }
 
@@ -6228,15 +6101,15 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
        int rc;
        int count;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        if (ioc->pci_error_recovery)
                return 0;
 
        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
-       dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
-           ioc->name, __func__, ioc_state));
+       dhsprintk(ioc,
+                 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+                          __func__, ioc_state));
 
        /* if in RESET state, it should move to READY state shortly */
        count = 0;
@@ -6244,9 +6117,8 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
                while ((ioc_state & MPI2_IOC_STATE_MASK) !=
                    MPI2_IOC_STATE_READY) {
                        if (count++ == 10) {
-                               pr_err(MPT3SAS_FMT
-                                       "%s: failed going to ready state (ioc_state=0x%x)\n",
-                                   ioc->name, __func__, ioc_state);
+                               ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+                                       __func__, ioc_state);
                                return -EFAULT;
                        }
                        ssleep(1);
@@ -6258,9 +6130,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
                return 0;
 
        if (ioc_state & MPI2_DOORBELL_USED) {
-               dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                       "unexpected doorbell active!\n",
-                       ioc->name));
+               dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
                goto issue_diag_reset;
        }
 
@@ -6304,8 +6174,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
        struct adapter_reply_queue *reply_q;
        Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        /* clean the delayed target reset list */
        list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6465,8 +6334,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
 void
 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
 {
-       dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        /* synchronizing freeing resource with pci_access_mutex lock */
        mutex_lock(&ioc->pci_access_mutex);
@@ -6494,8 +6362,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        int r, i;
        int cpu_id, last_cpu_id = 0;
 
-       dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        /* setup cpu_msix_table */
        ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6372,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
        ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
        ioc->reply_queue_count = 1;
        if (!ioc->cpu_msix_table) {
-               dfailprintk(ioc, pr_info(MPT3SAS_FMT
-                       "allocation for cpu_msix_table failed!!!\n",
-                       ioc->name));
+               dfailprintk(ioc,
+                           ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
                r = -ENOMEM;
                goto out_free_resources;
        }
@@ -6516,9 +6382,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
                ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
                    sizeof(resource_size_t *), GFP_KERNEL);
                if (!ioc->reply_post_host_index) {
-                       dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
-                               "for reply_post_host_index failed!!!\n",
-                               ioc->name));
+                       dfailprintk(ioc,
+                                   ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
                        r = -ENOMEM;
                        goto out_free_resources;
                }
@@ -6747,8 +6612,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 void
 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
 {
-       dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
 
        mpt3sas_base_stop_watchdog(ioc);
        mpt3sas_base_free_resources(ioc);
@@ -6781,8 +6645,7 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
        mpt3sas_scsih_pre_reset_handler(ioc);
        mpt3sas_ctl_pre_reset_handler(ioc);
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 }
 
 /**
@@ -6793,8 +6656,7 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
        mpt3sas_scsih_after_reset_handler(ioc);
        mpt3sas_ctl_after_reset_handler(ioc);
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
        if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
                ioc->transport_cmds.status |= MPT3_CMD_RESET;
                mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +6697,7 @@ static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
 {
        mpt3sas_scsih_reset_done_handler(ioc);
        mpt3sas_ctl_reset_done_handler(ioc);
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 }
 
 /**
@@ -6883,12 +6744,10 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
        u32 ioc_state;
        u8 is_fault = 0, is_trigger = 0;
 
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-           __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
        if (ioc->pci_error_recovery) {
-               pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
                r = 0;
                goto out_unlocked;
        }
@@ -6942,8 +6801,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
                _base_reset_done_handler(ioc);
 
  out:
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
-           ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+       dtmprintk(ioc,
+                 ioc_info(ioc, "%s: %s\n",
+                          __func__, r == 0 ? "SUCCESS" : "FAILED"));
 
        spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
        ioc->shost_recovery = 0;
@@ -6959,7 +6819,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
                        mpt3sas_trigger_master(ioc,
                            MASTER_TRIGGER_ADAPTER_RESET);
        }
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
        return r;
 }
index 96dc15e..8f1d6b0 100644 (file)
@@ -158,7 +158,14 @@ struct mpt3sas_nvme_cmd {
 /*
  * logging format
  */
-#define MPT3SAS_FMT                    "%s: "
+#define ioc_err(ioc, fmt, ...)                                         \
+       pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...)                                      \
+       pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...)                                                \
+       pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...)                                                \
+       pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
 
 /*
  *  WarpDrive Specific Log codes
index d29a2dc..0220944 100644 (file)
@@ -175,20 +175,18 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        if (!desc)
                return;
 
-       pr_info(MPT3SAS_FMT
-               "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
-               ioc->name, calling_function_name, desc,
-           mpi_request->Header.PageNumber, mpi_request->Action,
-           le32_to_cpu(mpi_request->PageAddress), smid);
+       ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+                calling_function_name, desc,
+                mpi_request->Header.PageNumber, mpi_request->Action,
+                le32_to_cpu(mpi_request->PageAddress), smid);
 
        if (!mpi_reply)
                return;
 
        if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
-               pr_info(MPT3SAS_FMT
-                   "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
-                   ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-                   le32_to_cpu(mpi_reply->IOCLogInfo));
+               ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+                        le16_to_cpu(mpi_reply->IOCStatus),
+                        le32_to_cpu(mpi_reply->IOCLogInfo));
 }
 
 /**
@@ -210,9 +208,8 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
                mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
                    &mem->page_dma, GFP_KERNEL);
                if (!mem->page) {
-                       pr_err(MPT3SAS_FMT
-                               "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
-                           ioc->name, __func__, mem->sz);
+                       ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+                               __func__, mem->sz);
                        r = -ENOMEM;
                }
        } else { /* use tmp buffer if less than 512 bytes */
@@ -313,8 +310,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 
        mutex_lock(&ioc->config_cmds.mutex);
        if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: config_cmd in use\n", __func__);
                mutex_unlock(&ioc->config_cmds.mutex);
                return -EAGAIN;
        }
@@ -362,34 +358,30 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                        r = -EFAULT;
                        goto free_mem;
                }
-               pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
-                   ioc->name, __func__, retry_count);
+               ioc_info(ioc, "%s: attempting retry (%d)\n",
+                        __func__, retry_count);
        }
        wait_state_count = 0;
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        ioc->config_cmds.status = MPT3_CMD_NOT_USED;
                        r = -EFAULT;
                        goto free_mem;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name, __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
        if (wait_state_count)
-               pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-                   ioc->name, __func__);
+               ioc_info(ioc, "%s: ioc is operational\n", __func__);
 
        smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                ioc->config_cmds.status = MPT3_CMD_NOT_USED;
                r = -EAGAIN;
                goto free_mem;
@@ -429,12 +421,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                    (mpi_reply->Header.PageType & 0xF)) {
                        _debug_dump_mf(mpi_request, ioc->request_sz/4);
                        _debug_dump_reply(mpi_reply, ioc->request_sz/4);
-                       panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
-                           " mpi_reply mismatch: Requested PageType(0x%02x)" \
-                           " Reply PageType(0x%02x)\n", \
-                           ioc->name, __func__,
-                           (mpi_request->Header.PageType & 0xF),
-                           (mpi_reply->Header.PageType & 0xF));
+                       panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+                             ioc->name, __func__,
+                             mpi_request->Header.PageType & 0xF,
+                             mpi_reply->Header.PageType & 0xF);
                }
 
                if (((mpi_request->Header.PageType & 0xF) ==
@@ -442,19 +432,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                    mpi_request->ExtPageType != mpi_reply->ExtPageType) {
                        _debug_dump_mf(mpi_request, ioc->request_sz/4);
                        _debug_dump_reply(mpi_reply, ioc->request_sz/4);
-                       panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
-                           " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
-                           " Reply ExtPageType(0x%02x)\n",
-                           ioc->name, __func__, mpi_request->ExtPageType,
-                           mpi_reply->ExtPageType);
+                       panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+                             ioc->name, __func__,
+                             mpi_request->ExtPageType,
+                             mpi_reply->ExtPageType);
                }
                ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
                    & MPI2_IOCSTATUS_MASK;
        }
 
        if (retry_count)
-               pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
-                   ioc->name, __func__, retry_count);
+               ioc_info(ioc, "%s: retry (%d) completed!!\n",
+                        __func__, retry_count);
 
        if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
            config_page && mpi_request->Action ==
@@ -469,14 +458,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                                _debug_dump_reply(mpi_reply, ioc->request_sz/4);
                                _debug_dump_config(p, min_t(u16, mem.sz,
                                    config_page_sz)/4);
-                               panic(KERN_WARNING MPT3SAS_FMT
-                                       "%s: Firmware BUG:" \
-                                   " config page mismatch:"
-                                   " Requested PageType(0x%02x)"
-                                   " Reply PageType(0x%02x)\n",
-                                   ioc->name, __func__,
-                                   (mpi_request->Header.PageType & 0xF),
-                                   (p[3] & 0xF));
+                               panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+                                     ioc->name, __func__,
+                                     mpi_request->Header.PageType & 0xF,
+                                     p[3] & 0xF);
                        }
 
                        if (((mpi_request->Header.PageType & 0xF) ==
@@ -486,13 +471,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                                _debug_dump_reply(mpi_reply, ioc->request_sz/4);
                                _debug_dump_config(p, min_t(u16, mem.sz,
                                    config_page_sz)/4);
-                               panic(KERN_WARNING MPT3SAS_FMT
-                                       "%s: Firmware BUG:" \
-                                   " config page mismatch:"
-                                   " Requested ExtPageType(0x%02x)"
-                                   " Reply ExtPageType(0x%02x)\n",
-                                   ioc->name, __func__,
-                                   mpi_request->ExtPageType, p[6]);
+                               panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+                                     ioc->name, __func__,
+                                     mpi_request->ExtPageType, p[6]);
                        }
                }
                memcpy(config_page, mem.page, min_t(u16, mem.sz,
index 5e8c059..4afa597 100644 (file)
@@ -185,17 +185,15 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
        if (!desc)
                return;
 
-       pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
-           ioc->name, calling_function_name, desc, smid);
+       ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
 
        if (!mpi_reply)
                return;
 
        if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
-               pr_info(MPT3SAS_FMT
-                   "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
-                   ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-                   le32_to_cpu(mpi_reply->IOCLogInfo));
+               ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+                        le16_to_cpu(mpi_reply->IOCStatus),
+                        le32_to_cpu(mpi_reply->IOCLogInfo));
 
        if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
            mpi_request->Function ==
@@ -208,38 +206,32 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
                sas_device = mpt3sas_get_sdev_by_handle(ioc,
                    le16_to_cpu(scsi_reply->DevHandle));
                if (sas_device) {
-                       pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
-                               ioc->name, (unsigned long long)
-                           sas_device->sas_address, sas_device->phy);
-                       pr_warn(MPT3SAS_FMT
-                           "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
-                           ioc->name, (unsigned long long)
-                           sas_device->enclosure_logical_id, sas_device->slot);
+                       ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+                                (u64)sas_device->sas_address,
+                                sas_device->phy);
+                       ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+                                (u64)sas_device->enclosure_logical_id,
+                                sas_device->slot);
                        sas_device_put(sas_device);
                }
                if (!sas_device) {
                        pcie_device = mpt3sas_get_pdev_by_handle(ioc,
                                le16_to_cpu(scsi_reply->DevHandle));
                        if (pcie_device) {
-                               pr_warn(MPT3SAS_FMT
-                                   "\tWWID(0x%016llx), port(%d)\n", ioc->name,
-                                   (unsigned long long)pcie_device->wwid,
-                                   pcie_device->port_num);
+                               ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
+                                        (unsigned long long)pcie_device->wwid,
+                                        pcie_device->port_num);
                                if (pcie_device->enclosure_handle != 0)
-                                       pr_warn(MPT3SAS_FMT
-                                           "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
-                                           ioc->name, (unsigned long long)
-                                           pcie_device->enclosure_logical_id,
-                                           pcie_device->slot);
+                                       ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+                                                (u64)pcie_device->enclosure_logical_id,
+                                                pcie_device->slot);
                                pcie_device_put(pcie_device);
                        }
                }
                if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
-                       pr_info(MPT3SAS_FMT
-                           "\tscsi_state(0x%02x), scsi_status"
-                           "(0x%02x)\n", ioc->name,
-                           scsi_reply->SCSIState,
-                           scsi_reply->SCSIStatus);
+                       ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
+                                scsi_reply->SCSIState,
+                                scsi_reply->SCSIStatus);
        }
 }
 
@@ -466,8 +458,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
        int i;
        u8 issue_reset;
 
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
        for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
                if (!(ioc->diag_buffer_status[i] &
                      MPT3_DIAG_BUFFER_IS_REGISTERED))
@@ -487,8 +478,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
  */
 void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
        if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
                ioc->ctl_cmds.status |= MPT3_CMD_RESET;
                mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -506,8 +496,7 @@ void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
 {
        int i;
 
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
 
        for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
                if (!(ioc->diag_buffer_status[i] &
@@ -612,10 +601,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
        }
 
        if (!found) {
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: handle(0x%04x), lun(%d), no active mid!!\n",
-                       ioc->name,
-                   desc, le16_to_cpu(tm_request->DevHandle), lun));
+               dctlprintk(ioc,
+                          ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+                                   desc, le16_to_cpu(tm_request->DevHandle),
+                                   lun));
                tm_reply = ioc->ctl_cmds.reply;
                tm_reply->DevHandle = tm_request->DevHandle;
                tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -631,10 +620,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
                return 1;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
-           desc, le16_to_cpu(tm_request->DevHandle), lun,
-            le16_to_cpu(tm_request->TaskMID)));
+       dctlprintk(ioc,
+                  ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
+                           desc, le16_to_cpu(tm_request->DevHandle), lun,
+                           le16_to_cpu(tm_request->TaskMID)));
        return 0;
 }
 
@@ -672,8 +661,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        issue_reset = 0;
 
        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
                ret = -EAGAIN;
                goto out;
        }
@@ -682,28 +670,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        ret = -EFAULT;
                        goto out;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name,
-                   __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
        if (wait_state_count)
-               pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-                   ioc->name, __func__);
+               ioc_info(ioc, "%s: ioc is operational\n", __func__);
 
        mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
        if (!mpi_request) {
-               pr_err(MPT3SAS_FMT
-                       "%s: failed obtaining a memory for mpi_request\n",
-                       ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
+                       __func__);
                ret = -ENOMEM;
                goto out;
        }
@@ -726,8 +709,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
                smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
                if (!smid) {
-                       pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                        ret = -EAGAIN;
                        goto out;
                }
@@ -762,8 +744,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 
        /* obtain dma-able memory for data transfer */
        if (data_out_sz) /* WRITE */ {
-               data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
-                   &data_out_dma);
+               data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+                               &data_out_dma, GFP_KERNEL);
                if (!data_out) {
                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
                            __LINE__, __func__);
@@ -782,8 +764,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        }
 
        if (data_in_sz) /* READ */ {
-               data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
-                   &data_in_dma);
+               data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+                               &data_in_dma, GFP_KERNEL);
                if (!data_in) {
                        pr_err("failure at %s:%d/%s()!\n", __FILE__,
                            __LINE__, __func__);
@@ -823,9 +805,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
                    data_out_dma, data_out_sz, data_in_dma, data_in_sz);
                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-                       dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
-                           "ioctl failed due to device removal in progress\n",
-                           ioc->name, device_handle));
+                       dtmprintk(ioc,
+                                 ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
+                                          device_handle));
                        mpt3sas_base_free_smid(ioc, smid);
                        ret = -EINVAL;
                        goto out;
@@ -843,9 +825,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
                memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-                       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                               "handle(0x%04x) :ioctl failed due to device removal in progress\n",
-                               ioc->name, device_handle));
+                       dtmprintk(ioc,
+                                 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+                                          device_handle));
                        mpt3sas_base_free_smid(ioc, smid);
                        ret = -EINVAL;
                        goto out;
@@ -863,10 +845,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                Mpi2SCSITaskManagementRequest_t *tm_request =
                    (Mpi2SCSITaskManagementRequest_t *)request;
 
-               dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
-                       ioc->name,
-                   le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+               dtmprintk(ioc,
+                         ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+                                  le16_to_cpu(tm_request->DevHandle),
+                                  tm_request->TaskType));
                ioc->got_task_abort_from_ioctl = 1;
                if (tm_request->TaskType ==
                    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
@@ -881,9 +863,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                ioc->got_task_abort_from_ioctl = 0;
 
                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-                       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                               "handle(0x%04x) :ioctl failed due to device removal in progress\n",
-                               ioc->name, device_handle));
+                       dtmprintk(ioc,
+                                 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+                                          device_handle));
                        mpt3sas_base_free_smid(ioc, smid);
                        ret = -EINVAL;
                        goto out;
@@ -929,9 +911,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
        case MPI2_FUNCTION_SATA_PASSTHROUGH:
        {
                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
-                       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                               "handle(0x%04x) :ioctl failed due to device removal in progress\n",
-                               ioc->name, device_handle));
+                       dtmprintk(ioc,
+                                 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+                                          device_handle));
                        mpt3sas_base_free_smid(ioc, smid);
                        ret = -EINVAL;
                        goto out;
@@ -1017,12 +999,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                Mpi2SCSITaskManagementReply_t *tm_reply =
                    (Mpi2SCSITaskManagementReply_t *)mpi_reply;
 
-               pr_info(MPT3SAS_FMT "TASK_MGMT: " \
-                   "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
-                   "TerminationCount(0x%08x)\n", ioc->name,
-                   le16_to_cpu(tm_reply->IOCStatus),
-                   le32_to_cpu(tm_reply->IOCLogInfo),
-                   le32_to_cpu(tm_reply->TerminationCount));
+               ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
+                        le16_to_cpu(tm_reply->IOCStatus),
+                        le32_to_cpu(tm_reply->IOCLogInfo),
+                        le32_to_cpu(tm_reply->TerminationCount));
        }
 
        /* copy out xdata to user */
@@ -1054,9 +1034,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
            MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
            MPI2_FUNCTION_NVME_ENCAPSULATED)) {
                if (karg.sense_data_ptr == NULL) {
-                       pr_info(MPT3SAS_FMT "Response buffer provided"
-                           " by application is NULL; Response data will"
-                           " not be returned.\n", ioc->name);
+                       ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
                        goto out;
                }
                sz_arg = (mpi_request->Function ==
@@ -1079,9 +1057,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                    mpi_request->Function ==
                    MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
                    mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
-                       pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
-                               ioc->name,
-                               le16_to_cpu(mpi_request->FunctionDependent1));
+                       ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
+                                le16_to_cpu(mpi_request->FunctionDependent1));
                        mpt3sas_halt_firmware(ioc);
                        pcie_device = mpt3sas_get_pdev_by_handle(ioc,
                                le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1106,11 +1083,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 
        /* free memory associated with sg buffers */
        if (data_in)
-               pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+               dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
                    data_in_dma);
 
        if (data_out)
-               pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+               dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
                    data_out_dma);
 
        kfree(mpi_request);
@@ -1128,8 +1105,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 {
        struct mpt3_ioctl_iocinfo karg;
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+                                __func__));
 
        memset(&karg, 0 , sizeof(karg));
        if (ioc->pfacts)
@@ -1188,8 +1165,8 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+                                __func__));
 
        karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
        memcpy(karg.event_types, ioc->event_type,
@@ -1219,8 +1196,8 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+                                __func__));
 
        memcpy(ioc->event_type, karg.event_types,
            MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
@@ -1259,8 +1236,8 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+                                __func__));
 
        number_bytes = karg.hdr.max_data_size -
            sizeof(struct mpt3_ioctl_header);
@@ -1306,12 +1283,11 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
            ioc->is_driver_loading)
                return -EAGAIN;
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+                                __func__));
 
        retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
-       pr_info(MPT3SAS_FMT "host reset: %s\n",
-           ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+       ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
        return 0;
 }
 
@@ -1440,8 +1416,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        rc = _ctl_btdh_search_sas_device(ioc, &karg);
        if (!rc)
@@ -1512,53 +1488,46 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
        u32 ioc_state;
        u8 issue_reset = 0;
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-               pr_err(MPT3SAS_FMT
-                   "%s: failed due to ioc not operational\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                       __func__);
                rc = -EAGAIN;
                goto out;
        }
 
        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
 
        buffer_type = diag_register->buffer_type;
        if (!_ctl_diag_capability(ioc, buffer_type)) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have capability for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -EPERM;
        }
 
        if (ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_REGISTERED) {
-               pr_err(MPT3SAS_FMT
-                       "%s: already has a registered buffer for buffer_type(0x%02x)\n",
-                       ioc->name, __func__,
-                   buffer_type);
+               ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -EINVAL;
        }
 
        if (diag_register->requested_buffer_size % 4)  {
-               pr_err(MPT3SAS_FMT
-                       "%s: the requested_buffer_size is not 4 byte aligned\n",
-                       ioc->name, __func__);
+               ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
+                       __func__);
                return -EINVAL;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1580,9 +1549,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
        if (request_data) {
                request_data_dma = ioc->diag_buffer_dma[buffer_type];
                if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
-                       pci_free_consistent(ioc->pdev,
-                           ioc->diag_buffer_sz[buffer_type],
-                           request_data, request_data_dma);
+                       dma_free_coherent(&ioc->pdev->dev,
+                                       ioc->diag_buffer_sz[buffer_type],
+                                       request_data, request_data_dma);
                        request_data = NULL;
                }
        }
@@ -1590,12 +1559,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
        if (request_data == NULL) {
                ioc->diag_buffer_sz[buffer_type] = 0;
                ioc->diag_buffer_dma[buffer_type] = 0;
-               request_data = pci_alloc_consistent(
-                       ioc->pdev, request_data_sz, &request_data_dma);
+               request_data = dma_alloc_coherent(&ioc->pdev->dev,
+                               request_data_sz, &request_data_dma, GFP_KERNEL);
                if (request_data == NULL) {
-                       pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
-                           " for diag buffers, requested size(%d)\n",
-                           ioc->name, __func__, request_data_sz);
+                       ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
+                               __func__, request_data_sz);
                        mpt3sas_base_free_smid(ioc, smid);
                        return -ENOMEM;
                }
@@ -1612,11 +1580,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
        mpi_request->VF_ID = 0; /* TODO */
        mpi_request->VP_ID = 0;
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
-               ioc->name, __func__, request_data,
-           (unsigned long long)request_data_dma,
-           le32_to_cpu(mpi_request->BufferLength)));
+       dctlprintk(ioc,
+                  ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+                           __func__, request_data,
+                           (unsigned long long)request_data_dma,
+                           le32_to_cpu(mpi_request->BufferLength)));
 
        for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
                mpi_request->ProductSpecific[i] =
@@ -1637,8 +1605,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 
        /* process the completed Reply Message Frame */
        if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
-               pr_err(MPT3SAS_FMT "%s: no reply message\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: no reply message\n", __func__);
                rc = -EFAULT;
                goto out;
        }
@@ -1649,13 +1616,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
                ioc->diag_buffer_status[buffer_type] |=
                        MPT3_DIAG_BUFFER_IS_REGISTERED;
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
-                   ioc->name, __func__));
+               dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
        } else {
-               pr_info(MPT3SAS_FMT
-                       "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
-                       ioc->name, __func__,
-                   ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+               ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+                        __func__,
+                        ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
                rc = -EFAULT;
        }
 
@@ -1666,7 +1631,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
  out:
 
        if (rc && request_data)
-               pci_free_consistent(ioc->pdev, request_data_sz,
+               dma_free_coherent(&ioc->pdev->dev, request_data_sz,
                    request_data, request_data_dma);
 
        ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
@@ -1689,8 +1654,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
        memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
 
        if (bits_to_register & 1) {
-               pr_info(MPT3SAS_FMT "registering trace buffer support\n",
-                   ioc->name);
+               ioc_info(ioc, "registering trace buffer support\n");
                ioc->diag_trigger_master.MasterData =
                    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
@@ -1701,8 +1665,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
        }
 
        if (bits_to_register & 2) {
-               pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
-                   ioc->name);
+               ioc_info(ioc, "registering snapshot buffer support\n");
                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
                /* register for 2MB buffers  */
                diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1711,8 +1674,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
        }
 
        if (bits_to_register & 4) {
-               pr_info(MPT3SAS_FMT "registering extended buffer support\n",
-                   ioc->name);
+               ioc_info(ioc, "registering extended buffer support\n");
                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
                /* register for 2MB buffers  */
                diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1768,51 +1730,46 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        buffer_type = karg.unique_id & 0x000000ff;
        if (!_ctl_diag_capability(ioc, buffer_type)) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have capability for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -EPERM;
        }
 
        if ((ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-               pr_err(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) is not registered\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+                       __func__, buffer_type);
                return -EINVAL;
        }
        if ((ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
-               pr_err(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) has not been released\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
+                       __func__, buffer_type);
                return -EINVAL;
        }
 
        if (karg.unique_id != ioc->unique_id[buffer_type]) {
-               pr_err(MPT3SAS_FMT
-                       "%s: unique_id(0x%08x) is not registered\n",
-                       ioc->name, __func__, karg.unique_id);
+               ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+                       __func__, karg.unique_id);
                return -EINVAL;
        }
 
        request_data = ioc->diag_buffer[buffer_type];
        if (!request_data) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -ENOMEM;
        }
 
        request_data_sz = ioc->diag_buffer_sz[buffer_type];
        request_data_dma = ioc->diag_buffer_dma[buffer_type];
-       pci_free_consistent(ioc->pdev, request_data_sz,
-           request_data, request_data_dma);
+       dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+                       request_data, request_data_dma);
        ioc->diag_buffer[buffer_type] = NULL;
        ioc->diag_buffer_status[buffer_type] = 0;
        return 0;
@@ -1841,41 +1798,37 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        karg.application_flags = 0;
        buffer_type = karg.buffer_type;
 
        if (!_ctl_diag_capability(ioc, buffer_type)) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have capability for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -EPERM;
        }
 
        if ((ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-               pr_err(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) is not registered\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+                       __func__, buffer_type);
                return -EINVAL;
        }
 
        if (karg.unique_id & 0xffffff00) {
                if (karg.unique_id != ioc->unique_id[buffer_type]) {
-                       pr_err(MPT3SAS_FMT
-                               "%s: unique_id(0x%08x) is not registered\n",
-                               ioc->name, __func__, karg.unique_id);
+                       ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+                               __func__, karg.unique_id);
                        return -EINVAL;
                }
        }
 
        request_data = ioc->diag_buffer[buffer_type];
        if (!request_data) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have buffer for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -ENOMEM;
        }
 
@@ -1897,9 +1850,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
        karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
 
        if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
-               pr_err(MPT3SAS_FMT
-                       "%s: unable to write mpt3_diag_query data @ %p\n",
-                       ioc->name, __func__, arg);
+               ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
+                       __func__, arg);
                return -EFAULT;
        }
        return 0;
@@ -1923,8 +1875,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
        u32 ioc_state;
        int rc;
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        rc = 0;
        *issue_reset = 0;
@@ -1935,24 +1887,22 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
                    MPT3_DIAG_BUFFER_IS_REGISTERED)
                        ioc->diag_buffer_status[buffer_type] |=
                            MPT3_DIAG_BUFFER_IS_RELEASED;
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: skipping due to FAULT state\n", ioc->name,
-                   __func__));
+               dctlprintk(ioc,
+                          ioc_info(ioc, "%s: skipping due to FAULT state\n",
+                                   __func__));
                rc = -EAGAIN;
                goto out;
        }
 
        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1982,8 +1932,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 
        /* process the completed Reply Message Frame */
        if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
-               pr_err(MPT3SAS_FMT "%s: no reply message\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: no reply message\n", __func__);
                rc = -EFAULT;
                goto out;
        }
@@ -1994,13 +1943,11 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
                ioc->diag_buffer_status[buffer_type] |=
                    MPT3_DIAG_BUFFER_IS_RELEASED;
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
-                   ioc->name, __func__));
+               dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
        } else {
-               pr_info(MPT3SAS_FMT
-                       "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
-                       ioc->name, __func__,
-                   ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+               ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+                        __func__,
+                        ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
                rc = -EFAULT;
        }
 
@@ -2033,47 +1980,41 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        buffer_type = karg.unique_id & 0x000000ff;
        if (!_ctl_diag_capability(ioc, buffer_type)) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have capability for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -EPERM;
        }
 
        if ((ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-               pr_err(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) is not registered\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+                       __func__, buffer_type);
                return -EINVAL;
        }
 
        if (karg.unique_id != ioc->unique_id[buffer_type]) {
-               pr_err(MPT3SAS_FMT
-                       "%s: unique_id(0x%08x) is not registered\n",
-                       ioc->name, __func__, karg.unique_id);
+               ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+                       __func__, karg.unique_id);
                return -EINVAL;
        }
 
        if (ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_RELEASED) {
-               pr_err(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) is already released\n",
-                       ioc->name, __func__,
-                   buffer_type);
+               ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
+                       __func__, buffer_type);
                return 0;
        }
 
        request_data = ioc->diag_buffer[buffer_type];
 
        if (!request_data) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -ENOMEM;
        }
 
@@ -2084,9 +2025,8 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                    MPT3_DIAG_BUFFER_IS_RELEASED;
                ioc->diag_buffer_status[buffer_type] &=
                    ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
-               pr_err(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) was released due to host reset\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
+                       __func__, buffer_type);
                return 0;
        }
 
@@ -2124,38 +2064,34 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EFAULT;
        }
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
-           __func__));
+       dctlprintk(ioc, ioc_info(ioc, "%s\n",
+                                __func__));
 
        buffer_type = karg.unique_id & 0x000000ff;
        if (!_ctl_diag_capability(ioc, buffer_type)) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have capability for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -EPERM;
        }
 
        if (karg.unique_id != ioc->unique_id[buffer_type]) {
-               pr_err(MPT3SAS_FMT
-                       "%s: unique_id(0x%08x) is not registered\n",
-                       ioc->name, __func__, karg.unique_id);
+               ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+                       __func__, karg.unique_id);
                return -EINVAL;
        }
 
        request_data = ioc->diag_buffer[buffer_type];
        if (!request_data) {
-               pr_err(MPT3SAS_FMT
-                       "%s: doesn't have buffer for buffer_type(0x%02x)\n",
-                       ioc->name, __func__, buffer_type);
+               ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+                       __func__, buffer_type);
                return -ENOMEM;
        }
 
        request_size = ioc->diag_buffer_sz[buffer_type];
 
        if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
-               pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
-                   "or bytes_to_read are not 4 byte aligned\n", ioc->name,
-                   __func__);
+               ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
+                       __func__);
                return -EINVAL;
        }
 
@@ -2163,10 +2099,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
                return -EINVAL;
 
        diag_data = (void *)(request_data + karg.starting_offset);
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
-               ioc->name, __func__,
-           diag_data, karg.starting_offset, karg.bytes_to_read));
+       dctlprintk(ioc,
+                  ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+                           __func__, diag_data, karg.starting_offset,
+                           karg.bytes_to_read));
 
        /* Truncate data on requests that are too large */
        if ((diag_data + karg.bytes_to_read < diag_data) ||
@@ -2177,39 +2113,36 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 
        if (copy_to_user((void __user *)uarg->diagnostic_data,
            diag_data, copy_size)) {
-               pr_err(MPT3SAS_FMT
-                       "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
-                       ioc->name, __func__, diag_data);
+               ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+                       __func__, diag_data);
                return -EFAULT;
        }
 
        if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
                return 0;
 
-       dctlprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: Reregister buffer_type(0x%02x)\n",
-               ioc->name, __func__, buffer_type));
+       dctlprintk(ioc,
+                  ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
+                           __func__, buffer_type));
        if ((ioc->diag_buffer_status[buffer_type] &
            MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: buffer_type(0x%02x) is still registered\n",
-                       ioc->name, __func__, buffer_type));
+               dctlprintk(ioc,
+                          ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
+                                   __func__, buffer_type));
                return 0;
        }
        /* Get a free request frame and save the message context.
        */
 
        if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
 
        smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -2247,8 +2180,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 
        /* process the completed Reply Message Frame */
        if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
-               pr_err(MPT3SAS_FMT "%s: no reply message\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: no reply message\n", __func__);
                rc = -EFAULT;
                goto out;
        }
@@ -2259,13 +2191,11 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
        if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
                ioc->diag_buffer_status[buffer_type] |=
                    MPT3_DIAG_BUFFER_IS_REGISTERED;
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
-                   ioc->name, __func__));
+               dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
        } else {
-               pr_info(MPT3SAS_FMT
-                       "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
-                       ioc->name, __func__,
-                   ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+               ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+                        __func__, ioc_status,
+                        le32_to_cpu(mpi_reply->IOCLogInfo));
                rc = -EFAULT;
        }
 
@@ -2450,8 +2380,9 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
                        ret = _ctl_diag_read_buffer(ioc, arg);
                break;
        default:
-               dctlprintk(ioc, pr_info(MPT3SAS_FMT
-                   "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+               dctlprintk(ioc,
+                          ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
+                                   cmd));
                break;
        }
 
@@ -2840,8 +2771,8 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
                return -EINVAL;
 
        ioc->logging_level = val;
-       pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
-           ioc->logging_level);
+       ioc_info(ioc, "logging_level=%08xh\n",
+                ioc->logging_level);
        return strlen(buf);
 }
 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
@@ -2877,8 +2808,8 @@ _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
                return -EINVAL;
 
        ioc->fwfault_debug = val;
-       pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
-           ioc->fwfault_debug);
+       ioc_info(ioc, "fwfault_debug=%d\n",
+                ioc->fwfault_debug);
        return strlen(buf);
 }
 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
@@ -2958,8 +2889,8 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
        ssize_t rc = 0;
 
        if (!ioc->is_warpdrive) {
-               pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
-                   " warpdrive\n", ioc->name, __func__);
+               ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
+                       __func__);
                goto out;
        }
        /* pci_access_mutex lock acquired by sysfs show path */
@@ -2973,30 +2904,28 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
        sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
        io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
        if (!io_unit_pg3) {
-               pr_err(MPT3SAS_FMT "%s: failed allocating memory "
-                   "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+               ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
+                       __func__, sz);
                goto out;
        }
 
        if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
            0) {
-               pr_err(MPT3SAS_FMT
-                   "%s: failed reading iounit_pg3\n", ioc->name,
-                   __func__);
+               ioc_err(ioc, "%s: failed reading iounit_pg3\n",
+                       __func__);
                goto out;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
-                   "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+               ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
+                       __func__, ioc_status);
                goto out;
        }
 
        if (io_unit_pg3->GPIOCount < 25) {
-               pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
-                    "25 entries, detected (%d) entries\n", ioc->name, __func__,
-                   io_unit_pg3->GPIOCount);
+               ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
+                       __func__, io_unit_pg3->GPIOCount);
                goto out;
        }
 
@@ -3039,17 +2968,15 @@ _ctl_host_trace_buffer_size_show(struct device *cdev,
        struct DIAG_BUFFER_START *request_data;
 
        if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
-               pr_err(MPT3SAS_FMT
-                       "%s: host_trace_buffer is not registered\n",
-                       ioc->name, __func__);
+               ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+                       __func__);
                return 0;
        }
 
        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-               pr_err(MPT3SAS_FMT
-                       "%s: host_trace_buffer is not registered\n",
-                       ioc->name, __func__);
+               ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+                       __func__);
                return 0;
        }
 
@@ -3089,17 +3016,15 @@ _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
        u32 size;
 
        if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
-               pr_err(MPT3SAS_FMT
-                       "%s: host_trace_buffer is not registered\n",
-                       ioc->name, __func__);
+               ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+                       __func__);
                return 0;
        }
 
        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
            MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
-               pr_err(MPT3SAS_FMT
-                       "%s: host_trace_buffer is not registered\n",
-                       ioc->name, __func__);
+               ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+                       __func__);
                return 0;
        }
 
@@ -3188,8 +3113,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
                    MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
                        goto out;
                memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
-               pr_info(MPT3SAS_FMT "posting host trace buffers\n",
-                   ioc->name);
+               ioc_info(ioc, "posting host trace buffers\n");
                diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
                diag_register.requested_buffer_size = (1024 * 1024);
                diag_register.unique_id = 0x7075900;
@@ -3205,8 +3129,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
                if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
                    MPT3_DIAG_BUFFER_IS_RELEASED))
                        goto out;
-               pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
-                   ioc->name);
+               ioc_info(ioc, "releasing host trace buffer\n");
                mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
                    &issue_reset);
        }
@@ -3658,8 +3581,10 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
                        if ((ioc->diag_buffer_status[i] &
                            MPT3_DIAG_BUFFER_IS_RELEASED))
                                continue;
-                       pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
-                       ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+                       dma_free_coherent(&ioc->pdev->dev,
+                                         ioc->diag_buffer_sz[i],
+                                         ioc->diag_buffer[i],
+                                         ioc->diag_buffer_dma[i]);
                        ioc->diag_buffer[i] = NULL;
                        ioc->diag_buffer_status[i] = 0;
                }
index 86eaa89..03c5284 100644 (file)
@@ -418,8 +418,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
 
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
-               __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -ENXIO;
        }
 
@@ -442,10 +442,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                return -ENXIO;
 
        /* else error case */
-       pr_err(MPT3SAS_FMT
-               "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
-               ioc->name, handle, ioc_status,
-            __FILE__, __LINE__, __func__);
+       ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+               handle, ioc_status, __FILE__, __LINE__, __func__);
        return -EIO;
 }
 
@@ -508,10 +506,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
                    (ioc->bios_pg2.ReqBootDeviceForm &
                    MPI2_BIOSPAGE2_FORM_MASK),
                    &ioc->bios_pg2.RequestedBootDevice)) {
-                       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-                          "%s: req_boot_device(0x%016llx)\n",
-                           ioc->name, __func__,
-                           (unsigned long long)sas_address));
+                       dinitprintk(ioc,
+                                   ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
+                                            __func__, (u64)sas_address));
                        ioc->req_boot_device.device = device;
                        ioc->req_boot_device.channel = channel;
                }
@@ -523,10 +520,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
                    (ioc->bios_pg2.ReqAltBootDeviceForm &
                    MPI2_BIOSPAGE2_FORM_MASK),
                    &ioc->bios_pg2.RequestedAltBootDevice)) {
-                       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-                          "%s: req_alt_boot_device(0x%016llx)\n",
-                           ioc->name, __func__,
-                           (unsigned long long)sas_address));
+                       dinitprintk(ioc,
+                                   ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
+                                            __func__, (u64)sas_address));
                        ioc->req_alt_boot_device.device = device;
                        ioc->req_alt_boot_device.channel = channel;
                }
@@ -538,10 +534,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
                    (ioc->bios_pg2.CurrentBootDeviceForm &
                    MPI2_BIOSPAGE2_FORM_MASK),
                    &ioc->bios_pg2.CurrentBootDevice)) {
-                       dinitprintk(ioc, pr_info(MPT3SAS_FMT
-                          "%s: current_boot_device(0x%016llx)\n",
-                           ioc->name, __func__,
-                           (unsigned long long)sas_address));
+                       dinitprintk(ioc,
+                                   ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
+                                            __func__, (u64)sas_address));
                        ioc->current_boot_device.device = device;
                        ioc->current_boot_device.channel = channel;
                }
@@ -752,19 +747,16 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
                            sas_device->chassis_slot);
        } else {
                if (sas_device->enclosure_handle != 0)
-                       pr_info(MPT3SAS_FMT
-                           "enclosure logical id(0x%016llx), slot(%d) \n",
-                           ioc->name, (unsigned long long)
-                           sas_device->enclosure_logical_id,
-                           sas_device->slot);
+                       ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
+                                (u64)sas_device->enclosure_logical_id,
+                                sas_device->slot);
                if (sas_device->connector_name[0] != '\0')
-                       pr_info(MPT3SAS_FMT
-                           "enclosure level(0x%04x), connector name( %s)\n",
-                           ioc->name, sas_device->enclosure_level,
-                           sas_device->connector_name);
+                       ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
+                                sas_device->enclosure_level,
+                                sas_device->connector_name);
                if (sas_device->is_chassis_slot_valid)
-                       pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
-                           ioc->name, sas_device->chassis_slot);
+                       ioc_info(ioc, "chassis slot(0x%04x)\n",
+                                sas_device->chassis_slot);
        }
 }
 
@@ -784,10 +776,8 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
 
        if (!sas_device)
                return;
-       pr_info(MPT3SAS_FMT
-           "removing handle(0x%04x), sas_addr(0x%016llx)\n",
-           ioc->name, sas_device->handle,
-           (unsigned long long) sas_device->sas_address);
+       ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+                sas_device->handle, (u64)sas_device->sas_address);
 
        _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
 
@@ -872,10 +862,10 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
 {
        unsigned long flags;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
-               ioc->name, __func__, sas_device->handle,
-               (unsigned long long)sas_device->sas_address));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+                           __func__, sas_device->handle,
+                           (u64)sas_device->sas_address));
 
        dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
            NULL, NULL));
@@ -923,10 +913,10 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
 {
        unsigned long flags;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-               __func__, sas_device->handle,
-               (unsigned long long)sas_device->sas_address));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+                           __func__, sas_device->handle,
+                           (u64)sas_device->sas_address));
 
        dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
            NULL, NULL));
@@ -1073,21 +1063,16 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
 
        if (!pcie_device)
                return;
-       pr_info(MPT3SAS_FMT
-               "removing handle(0x%04x), wwid(0x%016llx)\n",
-               ioc->name, pcie_device->handle,
-               (unsigned long long) pcie_device->wwid);
+       ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+                pcie_device->handle, (u64)pcie_device->wwid);
        if (pcie_device->enclosure_handle != 0)
-               pr_info(MPT3SAS_FMT
-                       "removing enclosure logical id(0x%016llx), slot(%d)\n",
-                       ioc->name,
-                       (unsigned long long)pcie_device->enclosure_logical_id,
-               pcie_device->slot);
+               ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
+                        (u64)pcie_device->enclosure_logical_id,
+                        pcie_device->slot);
        if (pcie_device->connector_name[0] != '\0')
-               pr_info(MPT3SAS_FMT
-                   "removing enclosure level(0x%04x), connector name( %s)\n",
-                       ioc->name, pcie_device->enclosure_level,
-                       pcie_device->connector_name);
+               ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
+                        pcie_device->enclosure_level,
+                        pcie_device->connector_name);
 
        spin_lock_irqsave(&ioc->pcie_device_lock, flags);
        if (!list_empty(&pcie_device->list)) {
@@ -1146,20 +1131,21 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
 {
        unsigned long flags;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-               pcie_device->handle, (unsigned long long)pcie_device->wwid));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+                           __func__,
+                           pcie_device->handle, (u64)pcie_device->wwid));
        if (pcie_device->enclosure_handle != 0)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: enclosure logical id(0x%016llx), slot( %d)\n",
-                       ioc->name, __func__,
-                       (unsigned long long)pcie_device->enclosure_logical_id,
-                       pcie_device->slot));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+                                   __func__,
+                                   (u64)pcie_device->enclosure_logical_id,
+                                   pcie_device->slot));
        if (pcie_device->connector_name[0] != '\0')
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: enclosure level(0x%04x), connector name( %s)\n",
-                       ioc->name, __func__, pcie_device->enclosure_level,
-                       pcie_device->connector_name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+                                   __func__, pcie_device->enclosure_level,
+                                   pcie_device->connector_name));
 
        spin_lock_irqsave(&ioc->pcie_device_lock, flags);
        pcie_device_get(pcie_device);
@@ -1191,20 +1177,21 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
 {
        unsigned long flags;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-               pcie_device->handle, (unsigned long long)pcie_device->wwid));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+                           __func__,
+                           pcie_device->handle, (u64)pcie_device->wwid));
        if (pcie_device->enclosure_handle != 0)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: enclosure logical id(0x%016llx), slot( %d)\n",
-                       ioc->name, __func__,
-                       (unsigned long long)pcie_device->enclosure_logical_id,
-                       pcie_device->slot));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+                                   __func__,
+                                   (u64)pcie_device->enclosure_logical_id,
+                                   pcie_device->slot));
        if (pcie_device->connector_name[0] != '\0')
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: enclosure level(0x%04x), connector name( %s)\n",
-                       ioc->name, __func__, pcie_device->enclosure_level,
-                       pcie_device->connector_name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+                                   __func__, pcie_device->enclosure_level,
+                                   pcie_device->connector_name));
 
        spin_lock_irqsave(&ioc->pcie_device_lock, flags);
        pcie_device_get(pcie_device);
@@ -1304,9 +1291,10 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
 {
        unsigned long flags;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-           raid_device->handle, (unsigned long long)raid_device->wwid));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
+                           __func__,
+                           raid_device->handle, (u64)raid_device->wwid));
 
        spin_lock_irqsave(&ioc->raid_device_lock, flags);
        list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -1857,16 +1845,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
 
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
@@ -1952,8 +1940,8 @@ scsih_get_resync(struct device *dev)
        if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
             MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
             sizeof(Mpi2RaidVolPage0_t))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                percent_complete = 0;
                goto out;
        }
@@ -2006,8 +1994,8 @@ scsih_get_state(struct device *dev)
        if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
             MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
             sizeof(Mpi2RaidVolPage0_t))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
 
@@ -2103,9 +2091,9 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
 
        if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
            &num_pds)) || !num_pds) {
-               dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-                   __func__));
+               dfailprintk(ioc,
+                           ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                    __FILE__, __LINE__, __func__));
                return 1;
        }
 
@@ -2114,17 +2102,17 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
            sizeof(Mpi2RaidVol0PhysDisk_t));
        vol_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!vol_pg0) {
-               dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-                   __func__));
+               dfailprintk(ioc,
+                           ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                    __FILE__, __LINE__, __func__));
                return 1;
        }
 
        if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
             MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
-               dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-                   __func__));
+               dfailprintk(ioc,
+                           ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                    __FILE__, __LINE__, __func__));
                kfree(vol_pg0);
                return 1;
        }
@@ -2215,16 +2203,16 @@ scsih_slave_configure(struct scsi_device *sdev)
                raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
                spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
                if (!raid_device) {
-                       dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                           "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
-                           __LINE__, __func__));
+                       dfailprintk(ioc,
+                                   ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                            __FILE__, __LINE__, __func__));
                        return 1;
                }
 
                if (_scsih_get_volume_capabilities(ioc, raid_device)) {
-                       dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                           "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
-                           __LINE__, __func__));
+                       dfailprintk(ioc,
+                                   ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                            __FILE__, __LINE__, __func__));
                        return 1;
                }
 
@@ -2308,16 +2296,16 @@ scsih_slave_configure(struct scsi_device *sdev)
        if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
                if (mpt3sas_config_get_volume_handle(ioc, handle,
                    &volume_handle)) {
-                       dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                           "failure at %s:%d/%s()!\n", ioc->name,
-                           __FILE__, __LINE__, __func__));
+                       dfailprintk(ioc,
+                                   ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                            __FILE__, __LINE__, __func__));
                        return 1;
                }
                if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
                    volume_handle, &volume_wwid)) {
-                       dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                           "failure at %s:%d/%s()!\n", ioc->name,
-                           __FILE__, __LINE__, __func__));
+                       dfailprintk(ioc,
+                                   ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                            __FILE__, __LINE__, __func__));
                        return 1;
                }
        }
@@ -2329,9 +2317,9 @@ scsih_slave_configure(struct scsi_device *sdev)
                                sas_device_priv_data->sas_target->sas_address);
                if (!pcie_device) {
                        spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
-                       dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                               "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
-                               __LINE__, __func__));
+                       dfailprintk(ioc,
+                                   ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                            __FILE__, __LINE__, __func__));
                        return 1;
                }
 
@@ -2377,9 +2365,9 @@ scsih_slave_configure(struct scsi_device *sdev)
           sas_device_priv_data->sas_target->sas_address);
        if (!sas_device) {
                spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
-               dfailprintk(ioc, pr_warn(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
-                   __func__));
+               dfailprintk(ioc,
+                           ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+                                    __FILE__, __LINE__, __func__));
                return 1;
        }
 
@@ -2515,8 +2503,7 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
                desc = "unknown";
                break;
        }
-       pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
-               ioc->name, response_code, desc);
+       ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
 }
 
 /**
@@ -2640,22 +2627,19 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
        lockdep_assert_held(&ioc->tm_cmds.mutex);
 
        if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
                return FAILED;
        }
 
        if (ioc->shost_recovery || ioc->remove_host ||
            ioc->pci_error_recovery) {
-               pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: host reset in progress!\n", __func__);
                return FAILED;
        }
 
        ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
        if (ioc_state & MPI2_DOORBELL_USED) {
-               dhsprintk(ioc, pr_info(MPT3SAS_FMT
-                       "unexpected doorbell active!\n", ioc->name));
+               dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
                rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
                return (!rc) ? SUCCESS : FAILED;
        }
@@ -2669,14 +2653,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
 
        smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                return FAILED;
        }
 
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-               "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
-               ioc->name, handle, type, smid_task, timeout, tr_method));
+       dtmprintk(ioc,
+                 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+                          handle, type, smid_task, timeout, tr_method));
        ioc->tm_cmds.status = MPT3_CMD_PENDING;
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
        ioc->tm_cmds.smid = smid;
@@ -2709,11 +2692,11 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
        if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
                mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
                mpi_reply = ioc->tm_cmds.reply;
-               dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
-                   "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
-                   ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-                   le32_to_cpu(mpi_reply->IOCLogInfo),
-                   le32_to_cpu(mpi_reply->TerminationCount)));
+               dtmprintk(ioc,
+                         ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+                                  le16_to_cpu(mpi_reply->IOCStatus),
+                                  le32_to_cpu(mpi_reply->IOCLogInfo),
+                                  le32_to_cpu(mpi_reply->TerminationCount)));
                if (ioc->logging_level & MPT_DEBUG_TM) {
                        _scsih_response_code(ioc, mpi_reply->ResponseCode);
                        if (mpi_reply->IOCStatus)
@@ -3060,13 +3043,11 @@ scsih_host_reset(struct scsi_cmnd *scmd)
        struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
        int r, retval;
 
-       pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
-           ioc->name, scmd);
+       ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
        scsi_print_command(scmd);
 
        if (ioc->is_driver_loading || ioc->remove_host) {
-               pr_info(MPT3SAS_FMT "Blocking the host reset\n",
-                   ioc->name);
+               ioc_info(ioc, "Blocking the host reset\n");
                r = FAILED;
                goto out;
        }
@@ -3074,8 +3055,8 @@ scsih_host_reset(struct scsi_cmnd *scmd)
        retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
        r = (retval < 0) ? FAILED : SUCCESS;
 out:
-       pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
-           ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+       ioc_info(ioc, "host reset: %s scmd(%p)\n",
+                r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
 
        return r;
 }
@@ -3567,18 +3548,16 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        u8 tr_method = 0;
 
        if (ioc->pci_error_recovery) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: host in pci error recovery: handle(0x%04x)\n",
-                       __func__, ioc->name,
-                   handle));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
+                                   __func__, handle));
                return;
        }
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: host is not operational: handle(0x%04x)\n",
-                       __func__, ioc->name,
-                  handle));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
+                                   __func__, handle));
                return;
        }
 
@@ -3614,39 +3593,31 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                        tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
        }
        if (sas_target_priv_data) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
-                       ioc->name, handle,
-                   (unsigned long long)sas_address));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+                                   handle, (u64)sas_address));
                if (sas_device) {
                        if (sas_device->enclosure_handle != 0)
-                               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                                   "setting delete flag:enclosure logical "
-                                   "id(0x%016llx), slot(%d)\n", ioc->name,
-                                   (unsigned long long)
-                                   sas_device->enclosure_logical_id,
-                                   sas_device->slot));
+                               dewtprintk(ioc,
+                                          ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
+                                                   (u64)sas_device->enclosure_logical_id,
+                                                   sas_device->slot));
                        if (sas_device->connector_name[0] != '\0')
-                               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                                   "setting delete flag: enclosure "
-                                   "level(0x%04x), connector name( %s)\n",
-                                   ioc->name, sas_device->enclosure_level,
-                                   sas_device->connector_name));
+                               dewtprintk(ioc,
+                                          ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
+                                                   sas_device->enclosure_level,
+                                                   sas_device->connector_name));
                } else if (pcie_device) {
                        if (pcie_device->enclosure_handle != 0)
-                               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                                   "setting delete flag: logical "
-                                   "id(0x%016llx), slot(%d)\n", ioc->name,
-                                   (unsigned long long)
-                                   pcie_device->enclosure_logical_id,
-                                   pcie_device->slot));
+                               dewtprintk(ioc,
+                                          ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
+                                                   (u64)pcie_device->enclosure_logical_id,
+                                                   pcie_device->slot));
                        if (pcie_device->connector_name[0] != '\0')
-                               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                                   "setting delete flag:, enclosure "
-                                   "level(0x%04x), "
-                                   "connector name( %s)\n", ioc->name,
-                                   pcie_device->enclosure_level,
-                                   pcie_device->connector_name));
+                               dewtprintk(ioc,
+                                          ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
+                                                   pcie_device->enclosure_level,
+                                                   pcie_device->connector_name));
                }
                _scsih_ublock_io_device(ioc, sas_address);
                sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
@@ -3660,16 +3631,15 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                INIT_LIST_HEAD(&delayed_tr->list);
                delayed_tr->handle = handle;
                list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "DELAYED:tr:handle(0x%04x), (open)\n",
-                   ioc->name, handle));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+                                   handle));
                goto out;
        }
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-               ioc->name, handle, smid,
-           ioc->tm_tr_cb_idx));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+                           handle, smid, ioc->tm_tr_cb_idx));
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
        memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
        mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3717,39 +3687,39 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
        struct _sc_list *delayed_sc;
 
        if (ioc->pci_error_recovery) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: host in pci error recovery\n", __func__,
-                       ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host in pci error recovery\n",
+                                   __func__));
                return 1;
        }
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: host is not operational\n", __func__, ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host is not operational\n",
+                                   __func__));
                return 1;
        }
        if (unlikely(!mpi_reply)) {
-               pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 1;
        }
        mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
        handle = le16_to_cpu(mpi_request_tm->DevHandle);
        if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
-               dewtprintk(ioc, pr_err(MPT3SAS_FMT
-                       "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
-                       ioc->name, handle,
-                   le16_to_cpu(mpi_reply->DevHandle), smid));
+               dewtprintk(ioc,
+                          ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+                                  handle,
+                                  le16_to_cpu(mpi_reply->DevHandle), smid));
                return 0;
        }
 
        mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-           "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
-           "loginfo(0x%08x), completed(%d)\n", ioc->name,
-           handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
-           le32_to_cpu(mpi_reply->IOCLogInfo),
-           le32_to_cpu(mpi_reply->TerminationCount)));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+                           handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+                           le32_to_cpu(mpi_reply->IOCLogInfo),
+                           le32_to_cpu(mpi_reply->TerminationCount)));
 
        smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
        if (!smid_sas_ctrl) {
@@ -3759,16 +3729,15 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
                INIT_LIST_HEAD(&delayed_sc->list);
                delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
                list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "DELAYED:sc:handle(0x%04x), (open)\n",
-                   ioc->name, handle));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
+                                   handle));
                return _scsih_check_for_pending_tm(ioc, smid);
        }
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-               ioc->name, handle, smid_sas_ctrl,
-           ioc->tm_sas_control_cb_idx));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+                           handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
        memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
        mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -3803,20 +3772,19 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
            mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
        if (likely(mpi_reply)) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "sc_complete:handle(0x%04x), (open) "
-               "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
-               ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
-               le16_to_cpu(mpi_reply->IOCStatus),
-               le32_to_cpu(mpi_reply->IOCLogInfo)));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                   le16_to_cpu(mpi_reply->DevHandle), smid,
+                                   le16_to_cpu(mpi_reply->IOCStatus),
+                                   le32_to_cpu(mpi_reply->IOCLogInfo)));
                if (le16_to_cpu(mpi_reply->IOCStatus) ==
                     MPI2_IOCSTATUS_SUCCESS) {
                        clear_bit(le16_to_cpu(mpi_reply->DevHandle),
                            ioc->device_remove_in_progress);
                }
        } else {
-               pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
        }
        return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
 }
@@ -3839,9 +3807,9 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        struct _tr_list *delayed_tr;
 
        if (ioc->pci_error_recovery) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: host reset in progress!\n",
-                       __func__, ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host reset in progress!\n",
+                                   __func__));
                return;
        }
 
@@ -3853,16 +3821,15 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                INIT_LIST_HEAD(&delayed_tr->list);
                delayed_tr->handle = handle;
                list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "DELAYED:tr:handle(0x%04x), (open)\n",
-                   ioc->name, handle));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+                                   handle));
                return;
        }
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-               ioc->name, handle, smid,
-           ioc->tm_tr_volume_cb_idx));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+                           handle, smid, ioc->tm_tr_volume_cb_idx));
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
        memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
        mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3892,33 +3859,32 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
            mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: host reset in progress!\n",
-                       __func__, ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host reset in progress!\n",
+                                   __func__));
                return 1;
        }
        if (unlikely(!mpi_reply)) {
-               pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 1;
        }
 
        mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
        handle = le16_to_cpu(mpi_request_tm->DevHandle);
        if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
-               dewtprintk(ioc, pr_err(MPT3SAS_FMT
-                       "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
-                       ioc->name, handle,
-                   le16_to_cpu(mpi_reply->DevHandle), smid));
+               dewtprintk(ioc,
+                          ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+                                  handle, le16_to_cpu(mpi_reply->DevHandle),
+                                  smid));
                return 0;
        }
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-           "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
-           "loginfo(0x%08x), completed(%d)\n", ioc->name,
-           handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
-           le32_to_cpu(mpi_reply->IOCLogInfo),
-           le32_to_cpu(mpi_reply->TerminationCount)));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+                           handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+                           le32_to_cpu(mpi_reply->IOCLogInfo),
+                           le32_to_cpu(mpi_reply->TerminationCount)));
 
        return _scsih_check_for_pending_tm(ioc, smid);
 }
@@ -3948,10 +3914,9 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
        ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
-               ioc->name, le16_to_cpu(event), smid,
-               ioc->base_cb_idx));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+                           le16_to_cpu(event), smid, ioc->base_cb_idx));
        ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
        memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
        ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
@@ -3981,21 +3946,21 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
        unsigned long flags;
 
        if (ioc->remove_host) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                           "%s: host has been removed\n",
-                            __func__, ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host has been removed\n",
+                                   __func__));
                return;
        } else if (ioc->pci_error_recovery) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                           "%s: host in pci error recovery\n",
-                           __func__, ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host in pci error recovery\n",
+                                   __func__));
                return;
        }
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s: host is not operational\n",
-                   __func__, ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: host is not operational\n",
+                                   __func__));
                return;
        }
 
@@ -4007,10 +3972,9 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
        ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-           "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
-           ioc->name, handle, smid,
-           ioc->tm_sas_control_cb_idx));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+                           handle, smid, ioc->tm_sas_control_cb_idx));
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
        memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
        mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -4171,8 +4135,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
                    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
                        if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
                            expander_handle) {
-                               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                                   "setting ignoring flag\n", ioc->name));
+                               dewtprintk(ioc,
+                                          ioc_info(ioc, "setting ignoring flag\n"));
                                fw_event->ignore = 1;
                        }
                }
@@ -4243,9 +4207,8 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
                    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
                        if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
                                switch_handle) {
-                               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                                       "setting ignoring flag for switch event\n",
-                                       ioc->name));
+                               dewtprintk(ioc,
+                                          ioc_info(ioc, "setting ignoring flag for switch event\n"));
                                fw_event->ignore = 1;
                        }
                }
@@ -4274,10 +4237,9 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                sas_target_priv_data =
                    raid_device->starget->hostdata;
                sas_target_priv_data->deleted = 1;
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "setting delete flag: handle(0x%04x), "
-                   "wwid(0x%016llx)\n", ioc->name, handle,
-                   (unsigned long long) raid_device->wwid));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
+                                   handle, (u64)raid_device->wwid));
        }
        spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
 }
@@ -4379,9 +4341,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
                        INIT_LIST_HEAD(&delayed_tr->list);
                        delayed_tr->handle = handle;
                        list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                           "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
-                           handle));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+                                           handle));
                } else
                        _scsih_tm_tr_send(ioc, handle);
        }
@@ -4424,15 +4386,14 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
        Mpi2EventDataTemperature_t *event_data)
 {
        if (ioc->temp_sensors_count >= event_data->SensorNum) {
-               pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
-                 " exceeded for Sensor: %d !!!\n", ioc->name,
-                 ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
-                 ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
-                 ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
-                 ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
-                 event_data->SensorNum);
-               pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
-                       ioc->name, event_data->CurrentTemperature);
+               ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
+                       le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
+                       le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
+                       le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
+                       le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
+                       event_data->SensorNum);
+               ioc_err(ioc, "Current Temp In Celsius: %d\n",
+                       event_data->CurrentTemperature);
        }
 }
 
@@ -4480,8 +4441,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
                        scmd->result = DID_RESET << 16;
                scmd->scsi_done(scmd);
        }
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
-           ioc->name, count));
+       dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
 }
 
 /**
@@ -4680,8 +4640,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 
        smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                _scsih_set_satl_pending(scmd, false);
                goto out;
        }
@@ -4919,37 +4878,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
        scsi_print_command(scmd);
 
        if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
-               pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
-                   device_str, (unsigned long long)priv_target->sas_address);
+               ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
+                        device_str, (u64)priv_target->sas_address);
        } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
                pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
                if (pcie_device) {
-                       pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
-                           ioc->name,
-                           (unsigned long long)pcie_device->wwid,
-                           pcie_device->port_num);
+                       ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
+                                (u64)pcie_device->wwid, pcie_device->port_num);
                        if (pcie_device->enclosure_handle != 0)
-                               pr_info(MPT3SAS_FMT
-                                   "\tenclosure logical id(0x%016llx), "
-                                   "slot(%d)\n", ioc->name,
-                                   (unsigned long long)
-                                   pcie_device->enclosure_logical_id,
-                                   pcie_device->slot);
+                               ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
+                                        (u64)pcie_device->enclosure_logical_id,
+                                        pcie_device->slot);
                        if (pcie_device->connector_name[0])
-                               pr_info(MPT3SAS_FMT
-                                   "\tenclosure level(0x%04x),"
-                                   "connector name( %s)\n",
-                                   ioc->name, pcie_device->enclosure_level,
-                                   pcie_device->connector_name);
+                               ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
+                                        pcie_device->enclosure_level,
+                                        pcie_device->connector_name);
                        pcie_device_put(pcie_device);
                }
        } else {
                sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
                if (sas_device) {
-                       pr_warn(MPT3SAS_FMT
-                               "\tsas_address(0x%016llx), phy(%d)\n",
-                               ioc->name, (unsigned long long)
-                           sas_device->sas_address, sas_device->phy);
+                       ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+                                (u64)sas_device->sas_address, sas_device->phy);
 
                        _scsih_display_enclosure_chassis_info(ioc, sas_device,
                            NULL, NULL);
@@ -4958,30 +4908,23 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
                }
        }
 
-       pr_warn(MPT3SAS_FMT
-               "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
-               ioc->name, le16_to_cpu(mpi_reply->DevHandle),
-           desc_ioc_state, ioc_status, smid);
-       pr_warn(MPT3SAS_FMT
-               "\trequest_len(%d), underflow(%d), resid(%d)\n",
-               ioc->name, scsi_bufflen(scmd), scmd->underflow,
-           scsi_get_resid(scmd));
-       pr_warn(MPT3SAS_FMT
-               "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
-               ioc->name, le16_to_cpu(mpi_reply->TaskTag),
-           le32_to_cpu(mpi_reply->TransferCount), scmd->result);
-       pr_warn(MPT3SAS_FMT
-               "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
-               ioc->name, desc_scsi_status,
-           scsi_status, desc_scsi_state, scsi_state);
+       ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+                le16_to_cpu(mpi_reply->DevHandle),
+                desc_ioc_state, ioc_status, smid);
+       ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
+                scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
+       ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+                le16_to_cpu(mpi_reply->TaskTag),
+                le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+       ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+                desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
 
        if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
                struct sense_info data;
                _scsih_normalize_sense(scmd->sense_buffer, &data);
-               pr_warn(MPT3SAS_FMT
-                 "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
-                 ioc->name, data.skey,
-                 data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+               ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+                        data.skey, data.asc, data.ascq,
+                        le32_to_cpu(mpi_reply->SenseCount));
        }
        if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
                response_info = le32_to_cpu(mpi_reply->ResponseInfo);
@@ -5016,17 +4959,17 @@ _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
        if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
            &mpi_request)) != 0) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
-               __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        sas_device->pfa_led_on = 1;
 
        if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
-                       ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
-                   le32_to_cpu(mpi_reply.IOCLogInfo)));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+                                   le16_to_cpu(mpi_reply.IOCStatus),
+                                   le32_to_cpu(mpi_reply.IOCLogInfo)));
                goto out;
        }
 out:
@@ -5056,16 +4999,16 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
        mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
        if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
                &mpi_request)) != 0) {
-               printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
-               __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
        if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
-               dewtprintk(ioc, printk(MPT3SAS_FMT
-                "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
-                ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
-                le32_to_cpu(mpi_reply.IOCLogInfo)));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+                                   le16_to_cpu(mpi_reply.IOCStatus),
+                                   le32_to_cpu(mpi_reply.IOCLogInfo)));
                return;
        }
 }
@@ -5133,8 +5076,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
             sizeof(Mpi2EventDataSasDeviceStatusChange_t);
        event_reply = kzalloc(sz, GFP_KERNEL);
        if (!event_reply) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
 
@@ -5424,16 +5367,16 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
        u16 attached_handle;
        u8 link_rate;
 
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-           "updating handles for sas_host(0x%016llx)\n",
-           ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+       dtmprintk(ioc,
+                 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
+                          (u64)ioc->sas_hba.sas_address));
 
        sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
            * sizeof(Mpi2SasIOUnit0PhyData_t));
        sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg0) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
@@ -5483,15 +5426,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
 
        mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
        if (!num_phys) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
        ioc->sas_hba.phy = kcalloc(num_phys,
            sizeof(struct _sas_phy), GFP_KERNEL);
        if (!ioc->sas_hba.phy) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        ioc->sas_hba.num_phys = num_phys;
@@ -5501,21 +5444,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
            sizeof(Mpi2SasIOUnit0PhyData_t));
        sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg0) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
        if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
            sas_iounit_pg0, sz))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
 
@@ -5524,21 +5467,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
            sizeof(Mpi2SasIOUnit1PhyData_t));
        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg1) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
            sas_iounit_pg1, sz))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
 
@@ -5557,15 +5500,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
        for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
                if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
                    i))) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        goto out;
                }
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        goto out;
                }
 
@@ -5579,18 +5522,17 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
        }
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out;
        }
        ioc->sas_hba.enclosure_handle =
            le16_to_cpu(sas_device_pg0.EnclosureHandle);
        ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
-       pr_info(MPT3SAS_FMT
-               "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
-               ioc->name, ioc->sas_hba.handle,
-           (unsigned long long) ioc->sas_hba.sas_address,
-           ioc->sas_hba.num_phys) ;
+       ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+                ioc->sas_hba.handle,
+                (u64)ioc->sas_hba.sas_address,
+                ioc->sas_hba.num_phys);
 
        if (ioc->sas_hba.enclosure_handle) {
                if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -5639,16 +5581,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 
        if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
            MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
 
@@ -5656,8 +5598,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
        if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
            != 0) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
        if (sas_address_parent != ioc->sas_hba.sas_address) {
@@ -5684,8 +5626,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        sas_expander = kzalloc(sizeof(struct _sas_node),
            GFP_KERNEL);
        if (!sas_expander) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
 
@@ -5694,18 +5636,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        sas_expander->sas_address_parent = sas_address_parent;
        sas_expander->sas_address = sas_address;
 
-       pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
-           " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
-           handle, parent_handle, (unsigned long long)
-           sas_expander->sas_address, sas_expander->num_phys);
+       ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+                handle, parent_handle,
+                (u64)sas_expander->sas_address, sas_expander->num_phys);
 
        if (!sas_expander->num_phys)
                goto out_fail;
        sas_expander->phy = kcalloc(sas_expander->num_phys,
            sizeof(struct _sas_phy), GFP_KERNEL);
        if (!sas_expander->phy) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -1;
                goto out_fail;
        }
@@ -5714,8 +5655,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
            sas_address_parent);
        if (!mpt3sas_port) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -1;
                goto out_fail;
        }
@@ -5724,8 +5665,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        for (i = 0 ; i < sas_expander->num_phys ; i++) {
                if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
                    &expander_pg1, i, handle))) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        rc = -1;
                        goto out_fail;
                }
@@ -5735,8 +5676,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                if ((mpt3sas_transport_add_expander_phy(ioc,
                    &sas_expander->phy[i], expander_pg1,
                    sas_expander->parent_dev))) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        rc = -1;
                        goto out_fail;
                }
@@ -5883,9 +5824,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
        if (!rc)
                return 0;
 
-       pr_err(MPT3SAS_FMT
-               "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
-               ioc->name, desc, (unsigned long long)sas_address, handle);
+       ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+               desc, (u64)sas_address, handle);
        return rc;
 }
 
@@ -5979,9 +5919,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
        /* check if device is present */
        if (!(le16_to_cpu(sas_device_pg0.Flags) &
            MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
-               pr_err(MPT3SAS_FMT
-                       "device is not present handle(0x%04x), flags!!!\n",
-                       ioc->name, handle);
+               ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
+                       handle);
                goto out_unlock;
        }
 
@@ -6028,16 +5967,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
 
@@ -6051,8 +5990,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
        /* check if device is present */
        if (!(le16_to_cpu(sas_device_pg0.Flags) &
            MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
-               pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
-                       ioc->name, handle);
+               ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+                       handle);
                return -1;
        }
 
@@ -6074,16 +6013,15 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
                        mpt3sas_scsih_enclosure_find_by_handle(ioc,
                            le16_to_cpu(sas_device_pg0.EnclosureHandle));
                if (enclosure_dev == NULL)
-                       pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
-                           "doesn't match with enclosure device!\n",
-                           ioc->name, sas_device_pg0.EnclosureHandle);
+                       ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+                                sas_device_pg0.EnclosureHandle);
        }
 
        sas_device = kzalloc(sizeof(struct _sas_device),
            GFP_KERNEL);
        if (!sas_device) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 0;
        }
 
@@ -6092,8 +6030,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
        if (_scsih_get_sas_address(ioc,
            le16_to_cpu(sas_device_pg0.ParentDevHandle),
            &sas_device->sas_address_parent) != 0)
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
        sas_device->enclosure_handle =
            le16_to_cpu(sas_device_pg0.EnclosureHandle);
        if (sas_device->enclosure_handle != 0)
@@ -6158,11 +6096,10 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
                sas_device->pfa_led_on = 0;
        }
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
-               ioc->name, __func__,
-           sas_device->handle, (unsigned long long)
-           sas_device->sas_address));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+                           __func__,
+                           sas_device->handle, (u64)sas_device->sas_address));
 
        dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
            NULL, NULL));
@@ -6180,18 +6117,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
                    sas_device->sas_address,
                    sas_device->sas_address_parent);
 
-       pr_info(MPT3SAS_FMT
-               "removing handle(0x%04x), sas_addr(0x%016llx)\n",
-               ioc->name, sas_device->handle,
-           (unsigned long long) sas_device->sas_address);
+       ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+                sas_device->handle, (u64)sas_device->sas_address);
 
        _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
-               ioc->name, __func__,
-               sas_device->handle, (unsigned long long)
-               sas_device->sas_address));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+                           __func__,
+                           sas_device->handle, (u64)sas_device->sas_address));
        dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
            NULL, NULL));
 }
@@ -6231,8 +6165,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
                status_str = "unknown status";
                break;
        }
-       pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
-           ioc->name, status_str);
+       ioc_info(ioc, "sas topology change: (%s)\n", status_str);
        pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
            "start_phy(%02d), count(%d)\n",
            le16_to_cpu(event_data->ExpanderDevHandle),
@@ -6309,8 +6242,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
                _scsih_sas_host_refresh(ioc);
 
        if (fw_event->ignore) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "ignoring expander event\n", ioc->name));
+               dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
                return 0;
        }
 
@@ -6339,8 +6271,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
        /* handle siblings events */
        for (i = 0; i < event_data->NumEntries; i++) {
                if (fw_event->ignore) {
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                               "ignoring expander event\n", ioc->name));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "ignoring expander event\n"));
                        return 0;
                }
                if (ioc->remove_host || ioc->pci_error_recovery)
@@ -6464,15 +6396,14 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
                reason_str = "unknown reason";
                break;
        }
-       pr_info(MPT3SAS_FMT "device status change: (%s)\n"
-           "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
-           ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
-           (unsigned long long)le64_to_cpu(event_data->SASAddress),
-           le16_to_cpu(event_data->TaskTag));
+       ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+                reason_str, le16_to_cpu(event_data->DevHandle),
+                (u64)le64_to_cpu(event_data->SASAddress),
+                le16_to_cpu(event_data->TaskTag));
        if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
-               pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
-                   event_data->ASC, event_data->ASCQ);
-       pr_info("\n");
+               pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+                       event_data->ASC, event_data->ASCQ);
+       pr_cont("\n");
 }
 
 /**
@@ -6605,20 +6536,16 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
                desc = "nvme failure status";
                break;
        default:
-               pr_err(MPT3SAS_FMT
-                   " NVMe discovery error(0x%02x): wwid(0x%016llx),"
-                       "handle(0x%04x)\n", ioc->name, access_status,
-                       (unsigned long long)wwid, handle);
+               ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
+                       access_status, (u64)wwid, handle);
                return rc;
        }
 
        if (!rc)
                return rc;
 
-       pr_info(MPT3SAS_FMT
-               "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
-                       ioc->name, desc,
-                       (unsigned long long)wwid, handle);
+       ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+                desc, (u64)wwid, handle);
        return rc;
 }
 
@@ -6634,22 +6561,22 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
 {
        struct MPT3SAS_TARGET *sas_target_priv_data;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-           "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-           pcie_device->handle, (unsigned long long)
-           pcie_device->wwid));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
+                           __func__,
+                           pcie_device->handle, (u64)pcie_device->wwid));
        if (pcie_device->enclosure_handle != 0)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
-                   ioc->name, __func__,
-                   (unsigned long long)pcie_device->enclosure_logical_id,
-                   pcie_device->slot));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+                                   __func__,
+                                   (u64)pcie_device->enclosure_logical_id,
+                                   pcie_device->slot));
        if (pcie_device->connector_name[0] != '\0')
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
-                   ioc->name, __func__,
-                   pcie_device->enclosure_level,
-                   pcie_device->connector_name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
+                                   __func__,
+                                   pcie_device->enclosure_level,
+                                   pcie_device->connector_name));
 
        if (pcie_device->starget && pcie_device->starget->hostdata) {
                sas_target_priv_data = pcie_device->starget->hostdata;
@@ -6658,39 +6585,35 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
                sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
        }
 
-       pr_info(MPT3SAS_FMT
-               "removing handle(0x%04x), wwid (0x%016llx)\n",
-               ioc->name, pcie_device->handle,
-               (unsigned long long) pcie_device->wwid);
+       ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+                pcie_device->handle, (u64)pcie_device->wwid);
        if (pcie_device->enclosure_handle != 0)
-               pr_info(MPT3SAS_FMT
-                   "removing : enclosure logical id(0x%016llx), slot(%d)\n",
-                   ioc->name,
-                   (unsigned long long)pcie_device->enclosure_logical_id,
-                   pcie_device->slot);
+               ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+                        (u64)pcie_device->enclosure_logical_id,
+                        pcie_device->slot);
        if (pcie_device->connector_name[0] != '\0')
-               pr_info(MPT3SAS_FMT
-                   "removing: enclosure level(0x%04x), connector name( %s)\n",
-                   ioc->name, pcie_device->enclosure_level,
-                   pcie_device->connector_name);
+               ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
+                        pcie_device->enclosure_level,
+                        pcie_device->connector_name);
 
        if (pcie_device->starget)
                scsi_remove_target(&pcie_device->starget->dev);
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-           "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
-           pcie_device->handle, (unsigned long long)
-           pcie_device->wwid));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
+                           __func__,
+                           pcie_device->handle, (u64)pcie_device->wwid));
        if (pcie_device->enclosure_handle != 0)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
-                       ioc->name, __func__,
-                       (unsigned long long)pcie_device->enclosure_logical_id,
-                       pcie_device->slot));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+                                   __func__,
+                                   (u64)pcie_device->enclosure_logical_id,
+                                   pcie_device->slot));
        if (pcie_device->connector_name[0] != '\0')
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
-                   ioc->name, __func__, pcie_device->enclosure_level,
-                   pcie_device->connector_name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+                                   __func__,
+                                   pcie_device->enclosure_level,
+                                   pcie_device->connector_name));
 
        kfree(pcie_device->serial_number);
 }
@@ -6760,9 +6683,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        /* check if device is present */
        if (!(le32_to_cpu(pcie_device_pg0.Flags) &
            MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
-               pr_info(MPT3SAS_FMT
-                   "device is not present handle(0x%04x), flags!!!\n",
-                   ioc->name, handle);
+               ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
+                        handle);
                spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
                pcie_device_put(pcie_device);
                return;
@@ -6806,16 +6728,15 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 
        if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
            &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 0;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 0;
        }
 
@@ -6825,9 +6746,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        /* check if device is present */
        if (!(le32_to_cpu(pcie_device_pg0.Flags) &
                MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
-               pr_err(MPT3SAS_FMT
-                   "device is not present handle(0x04%x)!!!\n",
-                   ioc->name, handle);
+               ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+                       handle);
                return 0;
        }
 
@@ -6848,8 +6768,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 
        pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
        if (!pcie_device) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                       ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 0;
        }
 
@@ -6890,16 +6810,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
        /* TODO -- Add device name once FW supports it */
        if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
                &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                               ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                kfree(pcie_device);
                return 0;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                       ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                kfree(pcie_device);
                return 0;
        }
@@ -6956,8 +6876,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
                status_str = "unknown status";
                break;
        }
-       pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
-               ioc->name, status_str);
+       ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
        pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
                "start_port(%02d), count(%d)\n",
                le16_to_cpu(event_data->SwitchDevHandle),
@@ -7030,16 +6949,15 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
                return;
 
        if (fw_event->ignore) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
-                       ioc->name));
+               dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
                return;
        }
 
        /* handle siblings events */
        for (i = 0; i < event_data->NumEntries; i++) {
                if (fw_event->ignore) {
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                               "ignoring switch event\n", ioc->name));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "ignoring switch event\n"));
                        return;
                }
                if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7084,9 +7002,9 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
                        if (!test_bit(handle, ioc->pend_os_device_add))
                                break;
 
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                               "handle(0x%04x) device not found: convert "
-                               "event to a device add\n", ioc->name, handle));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
+                                           handle));
                        event_data->PortEntry[i].PortStatus &= 0xF0;
                        event_data->PortEntry[i].PortStatus |=
                                MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
@@ -7169,15 +7087,15 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
                break;
        }
 
-       pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
-               "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
-               ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
-               (unsigned long long)le64_to_cpu(event_data->WWID),
-               le16_to_cpu(event_data->TaskTag));
+       ioc_info(ioc, "PCIE device status change: (%s)\n"
+                "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+                reason_str, le16_to_cpu(event_data->DevHandle),
+                (u64)le64_to_cpu(event_data->WWID),
+                le16_to_cpu(event_data->TaskTag));
        if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
-               pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+               pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
                        event_data->ASC, event_data->ASCQ);
-       pr_info("\n");
+       pr_cont("\n");
 }
 
 /**
@@ -7255,12 +7173,12 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
                break;
        }
 
-       pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
-           "\thandle(0x%04x), enclosure logical id(0x%016llx)"
-           " number slots(%d)\n", ioc->name, reason_str,
-           le16_to_cpu(event_data->EnclosureHandle),
-           (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
-           le16_to_cpu(event_data->StartSlot));
+       ioc_info(ioc, "enclosure status change: (%s)\n"
+                "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
+                reason_str,
+                le16_to_cpu(event_data->EnclosureHandle),
+                (u64)le64_to_cpu(event_data->EnclosureLogicalID),
+                le16_to_cpu(event_data->StartSlot));
 }
 
 /**
@@ -7298,9 +7216,8 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
                                kzalloc(sizeof(struct _enclosure_node),
                                        GFP_KERNEL);
                        if (!enclosure_dev) {
-                               pr_info(MPT3SAS_FMT
-                                       "failure at %s:%d/%s()!\n", ioc->name,
-                                       __FILE__, __LINE__, __func__);
+                               ioc_info(ioc, "failure at %s:%d/%s()!\n",
+                                        __FILE__, __LINE__, __func__);
                                return;
                        }
                        rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -7358,10 +7275,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
        u8 task_abort_retries;
 
        mutex_lock(&ioc->tm_cmds.mutex);
-       pr_info(MPT3SAS_FMT
-               "%s: enter: phy number(%d), width(%d)\n",
-               ioc->name, __func__, event_data->PhyNum,
-            event_data->PortWidth);
+       ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
+                __func__, event_data->PhyNum, event_data->PortWidth);
 
        _scsih_block_io_all_device(ioc);
 
@@ -7371,12 +7286,12 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 
        /* sanity checks for retrying this loop */
        if (max_retries++ == 5) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
-                   ioc->name, __func__));
+               dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
                goto out;
        } else if (max_retries > 1)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
-                   ioc->name, __func__, max_retries - 1));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: %d retry\n",
+                                   __func__, max_retries - 1));
 
        termination_count = 0;
        query_count = 0;
@@ -7443,9 +7358,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
                task_abort_retries = 0;
  tm_retry:
                if (task_abort_retries++ == 60) {
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                           "%s: ABORT_TASK: giving up\n", ioc->name,
-                           __func__));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
+                                           __func__));
                        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
                        goto broadcast_aen_retry;
                }
@@ -7474,9 +7389,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
        }
 
        if (ioc->broadcast_aen_pending) {
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: loop back due to pending AEN\n",
-                       ioc->name, __func__));
+               dewtprintk(ioc,
+                          ioc_info(ioc,
+                                   "%s: loop back due to pending AEN\n",
+                                   __func__));
                 ioc->broadcast_aen_pending = 0;
                 goto broadcast_aen_retry;
        }
@@ -7485,9 +7401,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  out_no_lock:
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-           "%s - exit, query_count = %d termination_count = %d\n",
-           ioc->name, __func__, query_count, termination_count));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
+                           __func__, query_count, termination_count));
 
        ioc->broadcast_aen_busy = 0;
        if (!ioc->shost_recovery)
@@ -7509,13 +7425,13 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
                (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
 
        if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
-               pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
-                   (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
-                   "start" : "stop");
+               ioc_info(ioc, "discovery event: (%s)",
+                        event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+                        "start" : "stop");
                if (event_data->DiscoveryStatus)
-                       pr_info("discovery_status(0x%08x)",
-                           le32_to_cpu(event_data->DiscoveryStatus));
-               pr_info("\n");
+                       pr_cont("discovery_status(0x%08x)",
+                               le32_to_cpu(event_data->DiscoveryStatus));
+               pr_cont("\n");
        }
 
        if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
@@ -7545,20 +7461,16 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
 
        switch (event_data->ReasonCode) {
        case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
-               pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
-                       "(handle:0x%04x, sas_address:0x%016llx,"
-                       "physical_port:0x%02x) has failed",
-                       ioc->name, le16_to_cpu(event_data->DevHandle),
-                       (unsigned long long)le64_to_cpu(event_data->SASAddress),
-                       event_data->PhysicalPort);
+               ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
+                        le16_to_cpu(event_data->DevHandle),
+                        (u64)le64_to_cpu(event_data->SASAddress),
+                        event_data->PhysicalPort);
                break;
        case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
-               pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
-                       "(handle:0x%04x, sas_address:0x%016llx,"
-                       "physical_port:0x%02x) has timed out",
-                       ioc->name, le16_to_cpu(event_data->DevHandle),
-                       (unsigned long long)le64_to_cpu(event_data->SASAddress),
-                       event_data->PhysicalPort);
+               ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
+                        le16_to_cpu(event_data->DevHandle),
+                        (u64)le64_to_cpu(event_data->SASAddress),
+                        event_data->PhysicalPort);
                break;
        default:
                break;
@@ -7581,11 +7493,10 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
        if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
                return;
 
-       pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
-               ioc->name,
-               (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
-                       "started" : "completed",
-               event_data->Flags);
+       ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
+                (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+                "started" : "completed",
+                event_data->Flags);
        if (event_data->EnumerationStatus)
                pr_cont("enumeration_status(0x%08x)",
                        le32_to_cpu(event_data->EnumerationStatus));
@@ -7617,8 +7528,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
        mutex_lock(&ioc->scsih_cmds.mutex);
 
        if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -7626,8 +7536,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
 
        smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
                rc = -EAGAIN;
                goto out;
@@ -7641,9 +7550,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
        mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
        mpi_request->PhysDiskNum = phys_disk_num;
 
-       dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
-           "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
-           handle, phys_disk_num));
+       dewtprintk(ioc,
+                  ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
+                           handle, phys_disk_num));
 
        init_completion(&ioc->scsih_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
@@ -7668,15 +7577,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
                        log_info = 0;
                ioc_status &= MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                           "IR RAID_ACTION: failed: ioc_status(0x%04x), "
-                           "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
-                           log_info));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
+                                           ioc_status, log_info));
                        rc = -EFAULT;
                } else
-                       dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                           "IR RAID_ACTION: completed successfully\n",
-                           ioc->name));
+                       dewtprintk(ioc,
+                                  ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
        }
 
  out:
@@ -7721,9 +7628,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
 
        mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
        if (!wwid) {
-               pr_err(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n", ioc->name,
-                   __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
@@ -7736,9 +7642,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
 
        raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
        if (!raid_device) {
-               pr_err(MPT3SAS_FMT
-                   "failure at %s:%d/%s()!\n", ioc->name,
-                   __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
@@ -7781,9 +7686,8 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
                        sas_target_priv_data = starget->hostdata;
                        sas_target_priv_data->deleted = 1;
                }
-               pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
-                       ioc->name,  raid_device->handle,
-                   (unsigned long long) raid_device->wwid);
+               ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+                        raid_device->handle, (u64)raid_device->wwid);
                list_del(&raid_device->list);
                kfree(raid_device);
        }
@@ -7925,16 +7829,16 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
 
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return;
        }
 
@@ -7964,10 +7868,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
 
        element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
 
-       pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
-           ioc->name, (le32_to_cpu(event_data->Flags) &
-           MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
-           "foreign" : "native", event_data->NumElements);
+       ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
+                le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
+                "foreign" : "native",
+                event_data->NumElements);
        for (i = 0; i < event_data->NumElements; i++, element++) {
                switch (element->ReasonCode) {
                case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -8123,10 +8027,11 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
        handle = le16_to_cpu(event_data->VolDevHandle);
        state = le32_to_cpu(event_data->NewValue);
        if (!ioc->hide_ir_msg)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
-                   ioc->name, __func__,  handle,
-                   le32_to_cpu(event_data->PreviousValue), state));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+                                   __func__, handle,
+                                   le32_to_cpu(event_data->PreviousValue),
+                                   state));
        switch (state) {
        case MPI2_RAID_VOL_STATE_MISSING:
        case MPI2_RAID_VOL_STATE_FAILED:
@@ -8146,17 +8051,15 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
 
                mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
                if (!wwid) {
-                       pr_err(MPT3SAS_FMT
-                           "failure at %s:%d/%s()!\n", ioc->name,
-                           __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        break;
                }
 
                raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
                if (!raid_device) {
-                       pr_err(MPT3SAS_FMT
-                           "failure at %s:%d/%s()!\n", ioc->name,
-                           __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        break;
                }
 
@@ -8207,10 +8110,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
        state = le32_to_cpu(event_data->NewValue);
 
        if (!ioc->hide_ir_msg)
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
-                   ioc->name, __func__,  handle,
-                   le32_to_cpu(event_data->PreviousValue), state));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+                                   __func__, handle,
+                                   le32_to_cpu(event_data->PreviousValue),
+                                   state));
 
        switch (state) {
        case MPI2_RAID_PD_STATE_ONLINE:
@@ -8231,16 +8135,16 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
                if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
                    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
                    handle))) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        return;
                }
 
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        return;
                }
 
@@ -8294,11 +8198,10 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
        if (!reason_str)
                return;
 
-       pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
-           "\thandle(0x%04x), percent complete(%d)\n",
-           ioc->name, reason_str,
-           le16_to_cpu(event_data->VolDevHandle),
-           event_data->PercentComplete);
+       ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
+                reason_str,
+                le16_to_cpu(event_data->VolDevHandle),
+                event_data->PercentComplete);
 }
 
 /**
@@ -8379,9 +8282,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
                        mpt3sas_scsih_enclosure_find_by_handle(ioc,
                                le16_to_cpu(sas_device_pg0->EnclosureHandle));
                if (enclosure_dev == NULL)
-                       pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
-                           "doesn't match with enclosure device!\n",
-                           ioc->name, sas_device_pg0->EnclosureHandle);
+                       ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+                                sas_device_pg0->EnclosureHandle);
        }
        spin_lock_irqsave(&ioc->sas_device_lock, flags);
        list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
@@ -8475,8 +8377,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
                enclosure_dev =
                        kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
                if (!enclosure_dev) {
-                       pr_err(MPT3SAS_FMT
-                               "failure at %s:%d/%s()!\n", ioc->name,
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
                                __FILE__, __LINE__, __func__);
                        return;
                }
@@ -8513,7 +8414,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
        u16 handle;
        u32 device_info;
 
-       pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+       ioc_info(ioc, "search for end-devices: start\n");
 
        if (list_empty(&ioc->sas_device_list))
                goto out;
@@ -8534,8 +8435,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
        }
 
  out:
-       pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
-           ioc->name);
+       ioc_info(ioc, "search for end-devices: complete\n");
 }
 
 /**
@@ -8628,7 +8528,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
        u16 handle;
        u32 device_info;
 
-       pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+       ioc_info(ioc, "search for end-devices: start\n");
 
        if (list_empty(&ioc->pcie_device_list))
                goto out;
@@ -8640,10 +8540,9 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from %s: "
-                           "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
-                           __func__, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                __func__, ioc_status,
+                                le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -8653,8 +8552,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
                _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
        }
 out:
-       pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
-           ioc->name);
+       ioc_info(ioc, "search for PCIe end-devices: complete\n");
 }
 
 /**
@@ -8735,8 +8633,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
        if (!ioc->ir_firmware)
                return;
 
-       pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
-           ioc->name);
+       ioc_info(ioc, "search for raid volumes: start\n");
 
        if (list_empty(&ioc->raid_device_list))
                goto out;
@@ -8779,8 +8676,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
                }
        }
  out:
-       pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
-               ioc->name);
+       ioc_info(ioc, "search for responding raid volumes: complete\n");
 }
 
 /**
@@ -8852,7 +8748,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
        u64 sas_address;
        u16 handle;
 
-       pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+       ioc_info(ioc, "search for expanders: start\n");
 
        if (list_empty(&ioc->sas_expander_list))
                goto out;
@@ -8875,7 +8771,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
        }
 
  out:
-       pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+       ioc_info(ioc, "search for expanders: complete\n");
 }
 
 /**
@@ -8893,12 +8789,10 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
        unsigned long flags;
        LIST_HEAD(head);
 
-       pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
-           ioc->name);
+       ioc_info(ioc, "removing unresponding devices: start\n");
 
        /* removing unresponding end devices */
-       pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
-           ioc->name);
+       ioc_info(ioc, "removing unresponding devices: end-devices\n");
        /*
         * Iterate, pulling off devices marked as non-responding. We become the
         * owner for the reference the list had on any object we prune.
@@ -8922,9 +8816,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
                sas_device_put(sas_device);
        }
 
-       pr_info(MPT3SAS_FMT
-               " Removing unresponding devices: pcie end-devices\n"
-               , ioc->name);
+       ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
        INIT_LIST_HEAD(&head);
        spin_lock_irqsave(&ioc->pcie_device_lock, flags);
        list_for_each_entry_safe(pcie_device, pcie_device_next,
@@ -8944,8 +8836,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
 
        /* removing unresponding volumes */
        if (ioc->ir_firmware) {
-               pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
-                       ioc->name);
+               ioc_info(ioc, "removing unresponding devices: volumes\n");
                list_for_each_entry_safe(raid_device, raid_device_next,
                    &ioc->raid_device_list, list) {
                        if (!raid_device->responding)
@@ -8957,8 +8848,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
        }
 
        /* removing unresponding expanders */
-       pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
-           ioc->name);
+       ioc_info(ioc, "removing unresponding devices: expanders\n");
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        INIT_LIST_HEAD(&tmp_list);
        list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -8974,8 +8864,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
                _scsih_expander_node_remove(ioc, sas_expander);
        }
 
-       pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
-           ioc->name);
+       ioc_info(ioc, "removing unresponding devices: complete\n");
 
        /* unblock devices */
        _scsih_ublock_io_all_device(ioc);
@@ -8992,8 +8881,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
        for (i = 0 ; i < sas_expander->num_phys ; i++) {
                if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
                    &expander_pg1, i, handle))) {
-                       pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                           ioc->name, __FILE__, __LINE__, __func__);
+                       ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                               __FILE__, __LINE__, __func__);
                        return;
                }
 
@@ -9029,11 +8918,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
        u8 retry_count;
        unsigned long flags;
 
-       pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+       ioc_info(ioc, "scan devices: start\n");
 
        _scsih_sas_host_refresh(ioc);
 
-       pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+       ioc_info(ioc, "\tscan devices: expanders start\n");
 
        /* expanders */
        handle = 0xFFFF;
@@ -9042,10 +8931,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
-                           "ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -9057,25 +8944,22 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                        _scsih_refresh_expander_links(ioc, expander_device,
                            handle);
                else {
-                       pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
-                           "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-                           handle, (unsigned long long)
-                           le64_to_cpu(expander_pg0.SASAddress));
+                       ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+                                handle,
+                                (u64)le64_to_cpu(expander_pg0.SASAddress));
                        _scsih_expander_add(ioc, handle);
-                       pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
-                           "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-                           handle, (unsigned long long)
-                           le64_to_cpu(expander_pg0.SASAddress));
+                       ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+                                handle,
+                                (u64)le64_to_cpu(expander_pg0.SASAddress));
                }
        }
 
-       pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
-           ioc->name);
+       ioc_info(ioc, "\tscan devices: expanders complete\n");
 
        if (!ioc->ir_firmware)
                goto skip_to_sas;
 
-       pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+       ioc_info(ioc, "\tscan devices: phys disk start\n");
 
        /* phys disk */
        phys_disk_num = 0xFF;
@@ -9085,10 +8969,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
-                           "ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                phys_disk_num = pd_pg0.PhysDiskNum;
@@ -9105,19 +8987,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
-                           "ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
                if (!_scsih_get_sas_address(ioc, parent_handle,
                    &sas_address)) {
-                       pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
-                           " handle (0x%04x), sas_addr(0x%016llx)\n",
-                           ioc->name, handle, (unsigned long long)
-                           le64_to_cpu(sas_device_pg0.SASAddress));
+                       ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+                                handle,
+                                (u64)le64_to_cpu(sas_device_pg0.SASAddress));
                        mpt3sas_transport_update_links(ioc, sas_address,
                            handle, sas_device_pg0.PhyNum,
                            MPI2_SAS_NEG_LINK_RATE_1_5);
@@ -9131,17 +9010,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                            1)) {
                                ssleep(1);
                        }
-                       pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
-                           " handle (0x%04x), sas_addr(0x%016llx)\n",
-                           ioc->name, handle, (unsigned long long)
-                           le64_to_cpu(sas_device_pg0.SASAddress));
+                       ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+                                handle,
+                                (u64)le64_to_cpu(sas_device_pg0.SASAddress));
                }
        }
 
-       pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
-           ioc->name);
+       ioc_info(ioc, "\tscan devices: phys disk complete\n");
 
-       pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+       ioc_info(ioc, "\tscan devices: volumes start\n");
 
        /* volumes */
        handle = 0xFFFF;
@@ -9150,10 +9027,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
-                           "ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -9170,10 +9045,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
-                           "ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -9182,23 +9055,19 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                        memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
                        element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
                        element.VolDevHandle = volume_pg1.DevHandle;
-                       pr_info(MPT3SAS_FMT
-                               "\tBEFORE adding volume: handle (0x%04x)\n",
-                               ioc->name, volume_pg1.DevHandle);
+                       ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
+                                volume_pg1.DevHandle);
                        _scsih_sas_volume_add(ioc, &element);
-                       pr_info(MPT3SAS_FMT
-                               "\tAFTER adding volume: handle (0x%04x)\n",
-                               ioc->name, volume_pg1.DevHandle);
+                       ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
+                                volume_pg1.DevHandle);
                }
        }
 
-       pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
-           ioc->name);
+       ioc_info(ioc, "\tscan devices: volumes complete\n");
 
  skip_to_sas:
 
-       pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
-           ioc->name);
+       ioc_info(ioc, "\tscan devices: end devices start\n");
 
        /* sas devices */
        handle = 0xFFFF;
@@ -9208,10 +9077,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
                    MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
-                           " ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, ioc_status,
-                           le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                handle = le16_to_cpu(sas_device_pg0.DevHandle);
@@ -9226,10 +9093,9 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                }
                parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
                if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
-                       pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
-                           "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-                           handle, (unsigned long long)
-                           le64_to_cpu(sas_device_pg0.SASAddress));
+                       ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+                                handle,
+                                (u64)le64_to_cpu(sas_device_pg0.SASAddress));
                        mpt3sas_transport_update_links(ioc, sas_address, handle,
                            sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
                        retry_count = 0;
@@ -9241,16 +9107,13 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                            0)) {
                                ssleep(1);
                        }
-                       pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
-                           "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
-                           handle, (unsigned long long)
-                           le64_to_cpu(sas_device_pg0.SASAddress));
+                       ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+                                handle,
+                                (u64)le64_to_cpu(sas_device_pg0.SASAddress));
                }
        }
-       pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
-           ioc->name);
-       pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
-           ioc->name);
+       ioc_info(ioc, "\tscan devices: end devices complete\n");
+       ioc_info(ioc, "\tscan devices: pcie end devices start\n");
 
        /* pcie devices */
        handle = 0xFFFF;
@@ -9260,10 +9123,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
                                & MPI2_IOCSTATUS_MASK;
                if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-                       pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
-                               " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
-                               ioc->name, ioc_status,
-                               le32_to_cpu(mpi_reply.IOCLogInfo));
+                       ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
                        break;
                }
                handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -9280,14 +9141,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
                parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
                _scsih_pcie_add_device(ioc, handle);
 
-               pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
-                       "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
-                       handle,
-                       (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+               ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
+                        handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
        }
-       pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
-               ioc->name);
-       pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+       ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
+       ioc_info(ioc, "scan devices: complete\n");
 }
 
 /**
@@ -9298,8 +9156,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
  */
 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
 }
 
 /**
@@ -9311,8 +9168,7 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 void
 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
        if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
                ioc->scsih_cmds.status |= MPT3_CMD_RESET;
                mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9340,8 +9196,7 @@ mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
 void
 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
 {
-       dtmprintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+       dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
        if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
                                           !ioc->sas_hba.num_phys)) {
                _scsih_prep_device_scan(ioc);
@@ -9396,9 +9251,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
                if (missing_delay[0] != -1 && missing_delay[1] != -1)
                        mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
                            missing_delay[1]);
-               dewtprintk(ioc, pr_info(MPT3SAS_FMT
-                       "port enable: complete from worker thread\n",
-                       ioc->name));
+               dewtprintk(ioc,
+                          ioc_info(ioc, "port enable: complete from worker thread\n"));
                break;
        case MPT3SAS_TURN_ON_PFA_LED:
                _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
@@ -9496,8 +9350,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 
        if (unlikely(!mpi_reply)) {
-               pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 1;
        }
 
@@ -9564,30 +9418,16 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
 
                switch (le32_to_cpu(*log_code)) {
                case MPT2_WARPDRIVE_LC_SSDT:
-                       pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
-                           "IO Throttling has occurred in the WarpDrive "
-                           "subsystem. Check WarpDrive documentation for "
-                           "additional details.\n", ioc->name);
+                       ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
                        break;
                case MPT2_WARPDRIVE_LC_SSDLW:
-                       pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
-                           "Program/Erase Cycles for the WarpDrive subsystem "
-                           "in degraded range. Check WarpDrive documentation "
-                           "for additional details.\n", ioc->name);
+                       ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
                        break;
                case MPT2_WARPDRIVE_LC_SSDLF:
-                       pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
-                           "There are no Program/Erase Cycles for the "
-                           "WarpDrive subsystem. The storage device will be "
-                           "in read-only mode. Check WarpDrive documentation "
-                           "for additional details.\n", ioc->name);
+                       ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
                        break;
                case MPT2_WARPDRIVE_LC_BRMF:
-                       pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
-                           "The Backup Rail Monitor has failed on the "
-                           "WarpDrive subsystem. Check WarpDrive "
-                           "documentation for additional details.\n",
-                           ioc->name);
+                       ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
                        break;
                }
 
@@ -9613,9 +9453,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
                    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
                switch (ActiveCableEventData->ReasonCode) {
                case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
-                       pr_notice(MPT3SAS_FMT
-                           "Currently an active cable with ReceptacleID %d\n",
-                           ioc->name, ActiveCableEventData->ReceptacleID);
+                       ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
+                                  ActiveCableEventData->ReceptacleID);
                        pr_notice("cannot be powered and devices connected\n");
                        pr_notice("to this active cable will not be seen\n");
                        pr_notice("This active cable requires %d mW of power\n",
@@ -9623,9 +9462,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
                        break;
 
                case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
-                       pr_notice(MPT3SAS_FMT
-                           "Currently a cable with ReceptacleID %d\n",
-                           ioc->name, ActiveCableEventData->ReceptacleID);
+                       ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
+                                  ActiveCableEventData->ReceptacleID);
                        pr_notice(
                            "is not running at optimal speed(12 Gb/s rate)\n");
                        break;
@@ -9640,8 +9478,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
        sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
        fw_event = alloc_fw_event_work(sz);
        if (!fw_event) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return 1;
        }
 
@@ -9690,11 +9528,9 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
        mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
            sas_expander->sas_address_parent);
 
-       pr_info(MPT3SAS_FMT
-               "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
-               ioc->name,
-           sas_expander->handle, (unsigned long long)
-           sas_expander->sas_address);
+       ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+                sas_expander->handle, (unsigned long long)
+                sas_expander->sas_address);
 
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        list_del(&sas_expander->list);
@@ -9729,16 +9565,14 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
        mutex_lock(&ioc->scsih_cmds.mutex);
 
        if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
                goto out;
        }
        ioc->scsih_cmds.status = MPT3_CMD_PENDING;
 
        smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
                goto out;
        }
@@ -9751,24 +9585,22 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
        mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
 
        if (!ioc->hide_ir_msg)
-               pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+               ioc_info(ioc, "IR shutdown (sending)\n");
        init_completion(&ioc->scsih_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
 
        if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                goto out;
        }
 
        if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
                mpi_reply = ioc->scsih_cmds.reply;
                if (!ioc->hide_ir_msg)
-                       pr_info(MPT3SAS_FMT "IR shutdown "
-                          "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
-                           ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
-                           le32_to_cpu(mpi_reply->IOCLogInfo));
+                       ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+                                le16_to_cpu(mpi_reply->IOCStatus),
+                                le32_to_cpu(mpi_reply->IOCLogInfo));
        }
 
  out:
@@ -9817,9 +9649,8 @@ static void scsih_remove(struct pci_dev *pdev)
                        sas_target_priv_data->deleted = 1;
                        scsi_remove_target(&raid_device->starget->dev);
                }
-               pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
-                       ioc->name,  raid_device->handle,
-                   (unsigned long long) raid_device->wwid);
+               ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+                        raid_device->handle, (u64)raid_device->wwid);
                _scsih_raid_device_remove(ioc, raid_device);
        }
        list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
@@ -10230,7 +10061,7 @@ scsih_scan_start(struct Scsi_Host *shost)
        rc = mpt3sas_port_enable(ioc);
 
        if (rc != 0)
-               pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+               ioc_info(ioc, "port enable: FAILED\n");
 }
 
 /**
@@ -10255,9 +10086,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
 
        if (time >= (300 * HZ)) {
                ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
-               pr_info(MPT3SAS_FMT
-                       "port enable: FAILED with timeout (timeout=300s)\n",
-                       ioc->name);
+               ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
                ioc->is_driver_loading = 0;
                return 1;
        }
@@ -10266,16 +10095,15 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
                return 0;
 
        if (ioc->start_scan_failed) {
-               pr_info(MPT3SAS_FMT
-                       "port enable: FAILED with (ioc_status=0x%08x)\n",
-                       ioc->name, ioc->start_scan_failed);
+               ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
+                        ioc->start_scan_failed);
                ioc->is_driver_loading = 0;
                ioc->wait_for_discovery_to_complete = 0;
                ioc->remove_host = 1;
                return 1;
        }
 
-       pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+       ioc_info(ioc, "port enable: SUCCESS\n");
        ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
 
        if (ioc->wait_for_discovery_to_complete) {
@@ -10586,28 +10414,22 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ioc->is_mcpu_endpoint) {
                /* mCPU MPI support 64K max IO */
                shost->max_sectors = 128;
-               pr_info(MPT3SAS_FMT
-                               "The max_sectors value is set to %d\n",
-                               ioc->name, shost->max_sectors);
+               ioc_info(ioc, "The max_sectors value is set to %d\n",
+                        shost->max_sectors);
        } else {
                if (max_sectors != 0xFFFF) {
                        if (max_sectors < 64) {
                                shost->max_sectors = 64;
-                               pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
-                                   "for max_sectors, range is 64 to 32767. " \
-                                   "Assigning value of 64.\n", \
-                                   ioc->name, max_sectors);
+                               ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
+                                        max_sectors);
                        } else if (max_sectors > 32767) {
                                shost->max_sectors = 32767;
-                               pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
-                                   "for max_sectors, range is 64 to 32767." \
-                                   "Assigning default value of 32767.\n", \
-                                   ioc->name, max_sectors);
+                               ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
+                                        max_sectors);
                        } else {
                                shost->max_sectors = max_sectors & 0xFFFE;
-                               pr_info(MPT3SAS_FMT
-                                       "The max_sectors value is set to %d\n",
-                                       ioc->name, shost->max_sectors);
+                               ioc_info(ioc, "The max_sectors value is set to %d\n",
+                                        shost->max_sectors);
                        }
                }
        }
@@ -10627,16 +10449,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        ioc->firmware_event_thread = alloc_ordered_workqueue(
            ioc->firmware_event_name, 0);
        if (!ioc->firmware_event_thread) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rv = -ENODEV;
                goto out_thread_fail;
        }
 
        ioc->is_driver_loading = 1;
        if ((mpt3sas_base_attach(ioc))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rv = -ENODEV;
                goto out_attach_fail;
        }
@@ -10657,8 +10479,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        rv = scsi_add_host(shost, &pdev->dev);
        if (rv) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out_add_shost_fail;
        }
 
@@ -10695,9 +10517,8 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
        flush_scheduled_work();
        scsi_block_requests(shost);
        device_state = pci_choose_state(pdev, state);
-       pr_info(MPT3SAS_FMT
-               "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
-               ioc->name, pdev, pci_name(pdev), device_state);
+       ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+                pdev, pci_name(pdev), device_state);
 
        pci_save_state(pdev);
        mpt3sas_base_free_resources(ioc);
@@ -10719,9 +10540,8 @@ scsih_resume(struct pci_dev *pdev)
        pci_power_t device_state = pdev->current_state;
        int r;
 
-       pr_info(MPT3SAS_FMT
-               "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
-               ioc->name, pdev, pci_name(pdev), device_state);
+       ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+                pdev, pci_name(pdev), device_state);
 
        pci_set_power_state(pdev, PCI_D0);
        pci_enable_wake(pdev, PCI_D0, 0);
@@ -10753,8 +10573,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 
-       pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
-           ioc->name, state);
+       ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
 
        switch (state) {
        case pci_channel_io_normal:
@@ -10791,8 +10610,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
        int rc;
 
-       pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
-            ioc->name);
+       ioc_info(ioc, "PCI error: slot reset callback!!\n");
 
        ioc->pci_error_recovery = 0;
        ioc->pdev = pdev;
@@ -10803,8 +10621,8 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
 
        rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
-       pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
-           (rc == 0) ? "success" : "failed");
+       ioc_warn(ioc, "hard reset: %s\n",
+                (rc == 0) ? "success" : "failed");
 
        if (!rc)
                return PCI_ERS_RESULT_RECOVERED;
@@ -10826,7 +10644,7 @@ scsih_pci_resume(struct pci_dev *pdev)
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 
-       pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+       ioc_info(ioc, "PCI error: resume callback!!\n");
 
        mpt3sas_base_start_watchdog(ioc);
        scsi_unblock_requests(ioc->shost);
@@ -10842,8 +10660,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 
-       pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
-           ioc->name);
+       ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
 
        /* TODO - dump whatever for debugging purposes */
 
index f8cc267..6a8a3c0 100644 (file)
@@ -146,25 +146,22 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        u32 ioc_status;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
-               pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: host reset in progress!\n", __func__);
                return -EFAULT;
        }
 
        if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
            MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -ENXIO;
        }
 
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT
-                       "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
-                       ioc->name, handle, ioc_status,
-                    __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+                       handle, ioc_status, __FILE__, __LINE__, __func__);
                return -EIO;
        }
 
@@ -310,16 +307,14 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
        u16 wait_state_count;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
-               pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: host reset in progress!\n", __func__);
                return -EFAULT;
        }
 
        mutex_lock(&ioc->transport_cmds.mutex);
 
        if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -329,26 +324,22 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        rc = -EFAULT;
                        goto out;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name, __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
        if (wait_state_count)
-               pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-                   ioc->name, __func__);
+               ioc_info(ioc, "%s: ioc is operational\n", __func__);
 
        smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -359,9 +350,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
 
        data_out_sz = sizeof(struct rep_manu_request);
        data_in_sz = sizeof(struct rep_manu_reply);
-       data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
-           &data_out_dma);
-
+       data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
+                       &data_out_dma, GFP_KERNEL);
        if (!data_out) {
                pr_err("failure at %s:%d/%s()!\n", __FILE__,
                    __LINE__, __func__);
@@ -388,16 +378,15 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
        ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
            data_in_sz);
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "report_manufacture - send to sas_addr(0x%016llx)\n",
-               ioc->name, (unsigned long long)sas_address));
+       dtransportprintk(ioc,
+                        ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
+                                 (u64)sas_address));
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SmpPassthroughRequest_t)/4);
                if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -405,17 +394,16 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
                goto issue_host_reset;
        }
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "report_manufacture - complete\n", ioc->name));
+       dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n"));
 
        if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
                u8 *tmp;
 
                mpi_reply = ioc->transport_cmds.reply;
 
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "report_manufacture - reply data transfer size(%d)\n",
-                   ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n",
+                                         le16_to_cpu(mpi_reply->ResponseDataLength)));
 
                if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
                    sizeof(struct rep_manu_reply))
@@ -439,8 +427,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
                            manufacture_reply->component_revision_id;
                }
        } else
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "report_manufacture - no reply\n", ioc->name));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "report_manufacture - no reply\n"));
 
  issue_host_reset:
        if (issue_reset)
@@ -448,7 +436,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
  out:
        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
        if (data_out)
-               pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+               dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
                    data_out, data_out_dma);
 
        mutex_unlock(&ioc->transport_cmds.mutex);
@@ -643,8 +631,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        mpt3sas_port = kzalloc(sizeof(struct _sas_port),
            GFP_KERNEL);
        if (!mpt3sas_port) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return NULL;
        }
 
@@ -655,22 +643,21 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
 
        if (!sas_node) {
-               pr_err(MPT3SAS_FMT
-                       "%s: Could not find parent sas_address(0x%016llx)!\n",
-                       ioc->name, __func__, (unsigned long long)sas_address);
+               ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n",
+                       __func__, (u64)sas_address);
                goto out_fail;
        }
 
        if ((_transport_set_identify(ioc, handle,
            &mpt3sas_port->remote_identify))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out_fail;
        }
 
        if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out_fail;
        }
 
@@ -687,20 +674,20 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        }
 
        if (!mpt3sas_port->num_phys) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out_fail;
        }
 
        if (!sas_node->parent_dev) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out_fail;
        }
        port = sas_port_alloc_num(sas_node->parent_dev);
        if ((sas_port_add(port))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                goto out_fail;
        }
 
@@ -729,17 +716,17 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
                sas_device = mpt3sas_get_sdev_by_addr(ioc,
                                    mpt3sas_port->remote_identify.sas_address);
                if (!sas_device) {
-                       dfailprintk(ioc, printk(MPT3SAS_FMT
-                               "failure at %s:%d/%s()!\n",
-                               ioc->name, __FILE__, __LINE__, __func__));
+                       dfailprintk(ioc,
+                                   ioc_info(ioc, "failure at %s:%d/%s()!\n",
+                                            __FILE__, __LINE__, __func__));
                        goto out_fail;
                }
                sas_device->pend_sas_rphy_add = 1;
        }
 
        if ((sas_rphy_add(rphy))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
        }
 
        if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -861,14 +848,14 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
        INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
        phy = sas_phy_alloc(parent_dev, phy_index);
        if (!phy) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
        if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
            &mpt3sas_phy->identify))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                sas_phy_free(phy);
                return -1;
        }
@@ -890,8 +877,8 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
            phy_pg0.ProgrammedLinkRate >> 4);
 
        if ((sas_phy_add(phy))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                sas_phy_free(phy);
                return -1;
        }
@@ -929,14 +916,14 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
        INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
        phy = sas_phy_alloc(parent_dev, phy_index);
        if (!phy) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -1;
        }
        if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
            &mpt3sas_phy->identify))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                sas_phy_free(phy);
                return -1;
        }
@@ -960,8 +947,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
            expander_pg1.ProgrammedLinkRate >> 4);
 
        if ((sas_phy_add(phy))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                sas_phy_free(phy);
                return -1;
        }
@@ -1098,16 +1085,14 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
        u16 wait_state_count;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
-               pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: host reset in progress!\n", __func__);
                return -EFAULT;
        }
 
        mutex_lock(&ioc->transport_cmds.mutex);
 
        if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1117,26 +1102,22 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        rc = -EFAULT;
                        goto out;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name, __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
        if (wait_state_count)
-               pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-                   ioc->name, __func__);
+               ioc_info(ioc, "%s: ioc is operational\n", __func__);
 
        smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1146,7 +1127,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
 
        sz = sizeof(struct phy_error_log_request) +
            sizeof(struct phy_error_log_reply);
-       data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+       data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+                       GFP_KERNEL);
        if (!data_out) {
                pr_err("failure at %s:%d/%s()!\n", __FILE__,
                    __LINE__, __func__);
@@ -1179,17 +1161,16 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
            data_out_dma + sizeof(struct phy_error_log_request),
            sizeof(struct phy_error_log_reply));
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
-               ioc->name, (unsigned long long)phy->identify.sas_address,
-               phy->number));
+       dtransportprintk(ioc,
+                        ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+                                 (u64)phy->identify.sas_address,
+                                 phy->number));
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SmpPassthroughRequest_t)/4);
                if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1197,16 +1178,15 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
                goto issue_host_reset;
        }
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "phy_error_log - complete\n", ioc->name));
+       dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n"));
 
        if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
 
                mpi_reply = ioc->transport_cmds.reply;
 
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "phy_error_log - reply data transfer size(%d)\n",
-                   ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n",
+                                         le16_to_cpu(mpi_reply->ResponseDataLength)));
 
                if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
                    sizeof(struct phy_error_log_reply))
@@ -1215,9 +1195,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
                phy_error_log_reply = data_out +
                    sizeof(struct phy_error_log_request);
 
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "phy_error_log - function_result(%d)\n",
-                   ioc->name, phy_error_log_reply->function_result));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "phy_error_log - function_result(%d)\n",
+                                         phy_error_log_reply->function_result));
 
                phy->invalid_dword_count =
                    be32_to_cpu(phy_error_log_reply->invalid_dword);
@@ -1229,8 +1209,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
                    be32_to_cpu(phy_error_log_reply->phy_reset_problem);
                rc = 0;
        } else
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "phy_error_log - no reply\n", ioc->name));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "phy_error_log - no reply\n"));
 
  issue_host_reset:
        if (issue_reset)
@@ -1238,7 +1218,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
  out:
        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
        if (data_out)
-               pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+               dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
 
        mutex_unlock(&ioc->transport_cmds.mutex);
        return rc;
@@ -1273,17 +1253,16 @@ _transport_get_linkerrors(struct sas_phy *phy)
        /* get hba phy error logs */
        if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
                    phy->number))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -ENXIO;
        }
 
        if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
-               pr_info(MPT3SAS_FMT
-                       "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
-                       ioc->name, phy->number,
-                       le16_to_cpu(mpi_reply.IOCStatus),
-                   le32_to_cpu(mpi_reply.IOCLogInfo));
+               ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+                        phy->number,
+                        le16_to_cpu(mpi_reply.IOCStatus),
+                        le32_to_cpu(mpi_reply.IOCLogInfo));
 
        phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
        phy->running_disparity_error_count =
@@ -1411,16 +1390,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
        u16 wait_state_count;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
-               pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: host reset in progress!\n", __func__);
                return -EFAULT;
        }
 
        mutex_lock(&ioc->transport_cmds.mutex);
 
        if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1430,26 +1407,22 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        rc = -EFAULT;
                        goto out;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name, __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
        if (wait_state_count)
-               pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-                   ioc->name, __func__);
+               ioc_info(ioc, "%s: ioc is operational\n", __func__);
 
        smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1459,7 +1432,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
 
        sz = sizeof(struct phy_control_request) +
            sizeof(struct phy_control_reply);
-       data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+       data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+                       GFP_KERNEL);
        if (!data_out) {
                pr_err("failure at %s:%d/%s()!\n", __FILE__,
                    __LINE__, __func__);
@@ -1497,17 +1471,16 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
            data_out_dma + sizeof(struct phy_control_request),
            sizeof(struct phy_control_reply));
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
-               ioc->name, (unsigned long long)phy->identify.sas_address,
-               phy->number, phy_operation));
+       dtransportprintk(ioc,
+                        ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+                                 (u64)phy->identify.sas_address,
+                                 phy->number, phy_operation));
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s: timeout\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SmpPassthroughRequest_t)/4);
                if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1515,16 +1488,15 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
                goto issue_host_reset;
        }
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "phy_control - complete\n", ioc->name));
+       dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n"));
 
        if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
 
                mpi_reply = ioc->transport_cmds.reply;
 
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "phy_control - reply data transfer size(%d)\n",
-                   ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "phy_control - reply data transfer size(%d)\n",
+                                         le16_to_cpu(mpi_reply->ResponseDataLength)));
 
                if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
                    sizeof(struct phy_control_reply))
@@ -1533,14 +1505,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
                phy_control_reply = data_out +
                    sizeof(struct phy_control_request);
 
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "phy_control - function_result(%d)\n",
-                   ioc->name, phy_control_reply->function_result));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "phy_control - function_result(%d)\n",
+                                         phy_control_reply->function_result));
 
                rc = 0;
        } else
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "phy_control - no reply\n", ioc->name));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "phy_control - no reply\n"));
 
  issue_host_reset:
        if (issue_reset)
@@ -1548,7 +1520,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
  out:
        ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
        if (data_out)
-               pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+               dma_free_coherent(&ioc->pdev->dev, sz, data_out,
+                               data_out_dma);
 
        mutex_unlock(&ioc->transport_cmds.mutex);
        return rc;
@@ -1591,16 +1564,15 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
        mpi_request.PhyNum = phy->number;
 
        if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                return -ENXIO;
        }
 
        if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
-               pr_info(MPT3SAS_FMT
-               "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
-               ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
-                   le32_to_cpu(mpi_reply.IOCLogInfo));
+               ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+                        phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+                        le32_to_cpu(mpi_reply.IOCLogInfo));
 
        return 0;
 }
@@ -1647,23 +1619,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
            sizeof(Mpi2SasIOUnit0PhyData_t));
        sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg0) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENOMEM;
                goto out;
        }
        if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
            sas_iounit_pg0, sz))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENXIO;
                goto out;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -EIO;
                goto out;
        }
@@ -1672,10 +1644,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
        for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
                if (sas_iounit_pg0->PhyData[i].PortFlags &
                    MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
-                       pr_err(MPT3SAS_FMT "discovery is active on " \
-                           "port = %d, phy = %d: unable to enable/disable "
-                           "phys, try again later!\n", ioc->name,
-                           sas_iounit_pg0->PhyData[i].Port, i);
+                       ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n",
+                               sas_iounit_pg0->PhyData[i].Port, i);
                        discovery_active = 1;
                }
        }
@@ -1690,23 +1660,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
            sizeof(Mpi2SasIOUnit1PhyData_t));
        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg1) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENOMEM;
                goto out;
        }
        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
            sas_iounit_pg1, sz))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENXIO;
                goto out;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -EIO;
                goto out;
        }
@@ -1798,23 +1768,23 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
            sizeof(Mpi2SasIOUnit1PhyData_t));
        sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
        if (!sas_iounit_pg1) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENOMEM;
                goto out;
        }
        if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
            sas_iounit_pg1, sz))) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENXIO;
                goto out;
        }
        ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
            MPI2_IOCSTATUS_MASK;
        if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -EIO;
                goto out;
        }
@@ -1833,8 +1803,8 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
 
        if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
            sz)) {
-               pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
-                   ioc->name, __FILE__, __LINE__, __func__);
+               ioc_err(ioc, "failure at %s:%d/%s()!\n",
+                       __FILE__, __LINE__, __func__);
                rc = -ENXIO;
                goto out;
        }
@@ -1922,8 +1892,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
        unsigned int reslen = 0;
 
        if (ioc->shost_recovery || ioc->pci_error_recovery) {
-               pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
-                   __func__, ioc->name);
+               ioc_info(ioc, "%s: host reset in progress!\n", __func__);
                rc = -EFAULT;
                goto job_done;
        }
@@ -1933,8 +1902,8 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
                goto job_done;
 
        if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
-               pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
-                   __func__);
+               ioc_err(ioc, "%s: transport_cmds in use\n",
+                       __func__);
                rc = -EAGAIN;
                goto out;
        }
@@ -1959,26 +1928,22 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
        ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
        while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
                if (wait_state_count++ == 10) {
-                       pr_err(MPT3SAS_FMT
-                           "%s: failed due to ioc not operational\n",
-                           ioc->name, __func__);
+                       ioc_err(ioc, "%s: failed due to ioc not operational\n",
+                               __func__);
                        rc = -EFAULT;
                        goto unmap_in;
                }
                ssleep(1);
                ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
-               pr_info(MPT3SAS_FMT
-                       "%s: waiting for operational state(count=%d)\n",
-                       ioc->name, __func__, wait_state_count);
+               ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+                        __func__, wait_state_count);
        }
        if (wait_state_count)
-               pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
-                   ioc->name, __func__);
+               ioc_info(ioc, "%s: ioc is operational\n", __func__);
 
        smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
        if (!smid) {
-               pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
-                   ioc->name, __func__);
+               ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
                rc = -EAGAIN;
                goto unmap_in;
        }
@@ -1999,16 +1964,15 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
        ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
                        dma_len_in - 4);
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s - sending smp request\n", ioc->name, __func__));
+       dtransportprintk(ioc,
+                        ioc_info(ioc, "%s: sending smp request\n", __func__));
 
        init_completion(&ioc->transport_cmds.done);
        mpt3sas_base_put_smid_default(ioc, smid);
        wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
-               pr_err(MPT3SAS_FMT "%s : timeout\n",
-                   __func__, ioc->name);
+               ioc_err(ioc, "%s: timeout\n", __func__);
                _debug_dump_mf(mpi_request,
                    sizeof(Mpi2SmpPassthroughRequest_t)/4);
                if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
@@ -2018,12 +1982,11 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
                }
        }
 
-       dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-               "%s - complete\n", ioc->name, __func__));
+       dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__));
 
        if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
-               dtransportprintk(ioc, pr_info(MPT3SAS_FMT
-                   "%s - no reply\n", ioc->name, __func__));
+               dtransportprintk(ioc,
+                                ioc_info(ioc, "%s: no reply\n", __func__));
                rc = -ENXIO;
                goto unmap_in;
        }
@@ -2031,9 +1994,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
        mpi_reply = ioc->transport_cmds.reply;
 
        dtransportprintk(ioc,
-               pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
-                       ioc->name, __func__,
-                       le16_to_cpu(mpi_reply->ResponseDataLength)));
+                        ioc_info(ioc, "%s: reply data transfer size(%d)\n",
+                                 __func__,
+                                 le16_to_cpu(mpi_reply->ResponseDataLength)));
 
        memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
        job->reply_len = sizeof(*mpi_reply);
index cae7c1e..6ac453f 100644 (file)
@@ -72,8 +72,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
        u16 sz, event_data_sz;
        unsigned long flags;
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
-           ioc->name, __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
        sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
            sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
@@ -85,23 +84,23 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
        mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
        memcpy(&mpi_reply->EventData, event_data,
            sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: add to driver event log\n",
-               ioc->name, __func__));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: add to driver event log\n",
+                                   __func__));
        mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
        kfree(mpi_reply);
  out:
 
        /* clearing the diag_trigger_active flag */
        spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: clearing diag_trigger_active flag\n",
-               ioc->name, __func__));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: clearing diag_trigger_active flag\n",
+                                   __func__));
        ioc->diag_trigger_active = 0;
        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+                                        __func__));
 }
 
 /**
@@ -115,22 +114,22 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
 {
        u8 issue_reset = 0;
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
-           ioc->name, __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
 
        /* release the diag buffer trace */
        if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
            MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
-               dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: release trace diag buffer\n", ioc->name, __func__));
+               dTriggerDiagPrintk(ioc,
+                                  ioc_info(ioc, "%s: release trace diag buffer\n",
+                                           __func__));
                mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
                    &issue_reset);
        }
 
        _mpt3sas_raise_sigio(ioc, event_data);
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+                                        __func__));
 }
 
 /**
@@ -168,9 +167,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
 
  by_pass_checks:
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: enter - trigger_bitmask = 0x%08x\n",
-               ioc->name, __func__, trigger_bitmask));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n",
+                                   __func__, trigger_bitmask));
 
        /* don't send trigger if an trigger is currently active */
        if (ioc->diag_trigger_active) {
@@ -182,9 +181,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
        if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
                found_match = 1;
                ioc->diag_trigger_active = 1;
-               dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: setting diag_trigger_active flag\n",
-               ioc->name, __func__));
+               dTriggerDiagPrintk(ioc,
+                                  ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+                                           __func__));
        }
        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 
@@ -202,8 +201,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
                mpt3sas_send_trigger_data_event(ioc, &event_data);
 
  out:
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+                                        __func__));
 }
 
 /**
@@ -239,9 +238,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
                return;
        }
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
-               ioc->name, __func__, event, log_entry_qualifier));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+                                   __func__, event, log_entry_qualifier));
 
        /* don't send trigger if an trigger is currently active */
        if (ioc->diag_trigger_active) {
@@ -263,26 +262,26 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
                }
                found_match = 1;
                ioc->diag_trigger_active = 1;
-               dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-                       "%s: setting diag_trigger_active flag\n",
-                       ioc->name, __func__));
+               dTriggerDiagPrintk(ioc,
+                                  ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+                                           __func__));
        }
        spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
 
        if (!found_match)
                goto out;
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: setting diag_trigger_active flag\n",
-               ioc->name, __func__));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+                                   __func__));
        memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
        event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
        event_data.u.event.EventValue = event;
        event_data.u.event.LogEntryQualifier = log_entry_qualifier;
        mpt3sas_send_trigger_data_event(ioc, &event_data);
  out:
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+                                        __func__));
 }
 
 /**
@@ -319,9 +318,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
                return;
        }
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
-               ioc->name, __func__, sense_key, asc, ascq));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+                                   __func__, sense_key, asc, ascq));
 
        /* don't send trigger if an trigger is currently active */
        if (ioc->diag_trigger_active) {
@@ -347,9 +346,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
        if (!found_match)
                goto out;
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: setting diag_trigger_active flag\n",
-               ioc->name, __func__));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+                                   __func__));
        memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
        event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
        event_data.u.scsi.SenseKey = sense_key;
@@ -357,8 +356,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
        event_data.u.scsi.ASCQ = ascq;
        mpt3sas_send_trigger_data_event(ioc, &event_data);
  out:
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+                                        __func__));
 }
 
 /**
@@ -393,9 +392,9 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
                return;
        }
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
-               ioc->name, __func__, ioc_status, loginfo));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+                                   __func__, ioc_status, loginfo));
 
        /* don't send trigger if an trigger is currently active */
        if (ioc->diag_trigger_active) {
@@ -420,15 +419,15 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
        if (!found_match)
                goto out;
 
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
-               "%s: setting diag_trigger_active flag\n",
-               ioc->name, __func__));
+       dTriggerDiagPrintk(ioc,
+                          ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+                                   __func__));
        memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
        event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
        event_data.u.mpi.IOCStatus = ioc_status;
        event_data.u.mpi.IocLogInfo = loginfo;
        mpt3sas_send_trigger_data_event(ioc, &event_data);
  out:
-       dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
-           __func__));
+       dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+                                        __func__));
 }
index b4927f2..cc07ba4 100644 (file)
@@ -127,20 +127,17 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
                return;
 
        if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "globally as drives are exposed\n", ioc->name);
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n");
                return;
        }
        if (mpt3sas_get_num_volumes(ioc) > 1) {
                _warpdrive_disable_ddio(ioc);
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "globally as number of drives > 1\n", ioc->name);
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n");
                return;
        }
        if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
            &num_pds)) || !num_pds) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "Failure in computing number of drives\n", ioc->name);
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n");
                return;
        }
 
@@ -148,15 +145,13 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
            sizeof(Mpi2RaidVol0PhysDisk_t));
        vol_pg0 = kzalloc(sz, GFP_KERNEL);
        if (!vol_pg0) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "Memory allocation failure for RVPG0\n", ioc->name);
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
                return;
        }
 
        if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
             MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "Failure in retrieving RVPG0\n", ioc->name);
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n");
                kfree(vol_pg0);
                return;
        }
@@ -166,10 +161,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
         * assumed for WARPDRIVE, disable direct I/O
         */
        if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
-               pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "for the drive with handle(0x%04x): num_mem=%d, "
-                   "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
-                   num_pds, MPT_MAX_WARPDRIVE_PDS);
+               ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n",
+                        raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS);
                kfree(vol_pg0);
                return;
        }
@@ -179,22 +172,18 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
                    vol_pg0->PhysDisk[count].PhysDiskNum) ||
                    le16_to_cpu(pd_pg0.DevHandle) ==
                    MPT3SAS_INVALID_DEVICE_HANDLE) {
-                       pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
-                           "disabled for the drive with handle(0x%04x) member"
-                           "handle retrieval failed for member number=%d\n",
-                           ioc->name, raid_device->handle,
-                           vol_pg0->PhysDisk[count].PhysDiskNum);
+                       ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n",
+                                raid_device->handle,
+                                vol_pg0->PhysDisk[count].PhysDiskNum);
                        goto out_error;
                }
                /* Disable direct I/O if member drive lba exceeds 4 bytes */
                dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
                if (dev_max_lba >> 32) {
-                       pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
-                           "disabled for the drive with handle(0x%04x) member"
-                           " handle (0x%04x) unsupported max lba 0x%016llx\n",
-                           ioc->name, raid_device->handle,
-                           le16_to_cpu(pd_pg0.DevHandle),
-                           (unsigned long long)dev_max_lba);
+                       ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n",
+                                raid_device->handle,
+                                le16_to_cpu(pd_pg0.DevHandle),
+                                (u64)dev_max_lba);
                        goto out_error;
                }
 
@@ -206,41 +195,36 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
         * not RAID0
         */
        if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "for the drive with handle(0x%04x): type=%d, "
-                   "s_sz=%uK, blk_size=%u\n", ioc->name,
-                   raid_device->handle, raid_device->volume_type,
-                   (le32_to_cpu(vol_pg0->StripeSize) *
-                   le16_to_cpu(vol_pg0->BlockSize)) / 1024,
-                   le16_to_cpu(vol_pg0->BlockSize));
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n",
+                        raid_device->handle, raid_device->volume_type,
+                        (le32_to_cpu(vol_pg0->StripeSize) *
+                         le16_to_cpu(vol_pg0->BlockSize)) / 1024,
+                        le16_to_cpu(vol_pg0->BlockSize));
                goto out_error;
        }
 
        stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
        stripe_exp = find_first_bit(&stripe_sz, 32);
        if (stripe_exp == 32) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-               "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
-                   ioc->name, raid_device->handle,
-                   (le32_to_cpu(vol_pg0->StripeSize) *
-                   le16_to_cpu(vol_pg0->BlockSize)) / 1024);
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+                        raid_device->handle,
+                        (le32_to_cpu(vol_pg0->StripeSize) *
+                         le16_to_cpu(vol_pg0->BlockSize)) / 1024);
                goto out_error;
        }
        raid_device->stripe_exponent = stripe_exp;
        block_sz = le16_to_cpu(vol_pg0->BlockSize);
        block_exp = find_first_bit(&block_sz, 16);
        if (block_exp == 16) {
-               pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
-                   "for the drive with handle(0x%04x) invalid block sz %u\n",
-                   ioc->name, raid_device->handle,
-                   le16_to_cpu(vol_pg0->BlockSize));
+               ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n",
+                        raid_device->handle, le16_to_cpu(vol_pg0->BlockSize));
                goto out_error;
        }
        raid_device->block_exponent = block_exp;
        raid_device->direct_io_enabled = 1;
 
-       pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive"
-           " with handle(0x%04x)\n", ioc->name, raid_device->handle);
+       ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n",
+                raid_device->handle);
        /*
         * WARPDRIVE: Though the following fields are not used for direct IO,
         * stored for future purpose:
index 8c91637..3ac3437 100644 (file)
@@ -403,29 +403,14 @@ static int pci_go_64(struct pci_dev *pdev)
 {
        int rc;
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (rc) {
-                       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-                       if (rc) {
-                               dev_printk(KERN_ERR, &pdev->dev,
-                                          "64-bit DMA enable failed\n");
-                               return rc;
-                       }
-               }
-       } else {
-               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rc) {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (rc) {
                        dev_printk(KERN_ERR, &pdev->dev,
                                   "32-bit DMA enable failed\n");
                        return rc;
                }
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_printk(KERN_ERR, &pdev->dev,
-                                  "32-bit consistent DMA enable failed\n");
-                       return rc;
-               }
        }
 
        return rc;
index cff43bd..3df1428 100644 (file)
@@ -336,13 +336,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
         * DMA-map SMP request, response buffers
         */
        sg_req = &task->smp_task.smp_req;
-       elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE);
        if (!elem)
                return -ENOMEM;
        req_len = sg_dma_len(sg_req);
 
        sg_resp = &task->smp_task.smp_resp;
-       elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE);
        if (!elem) {
                rc = -ENOMEM;
                goto err_out;
@@ -416,10 +416,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
 
 err_out_2:
        dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
-                    PCI_DMA_FROMDEVICE);
+                    DMA_FROM_DEVICE);
 err_out:
        dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
-                    PCI_DMA_TODEVICE);
+                    DMA_TO_DEVICE);
        return rc;
 }
 
@@ -904,9 +904,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
        switch (task->task_proto) {
        case SAS_PROTOCOL_SMP:
                dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
-                            PCI_DMA_FROMDEVICE);
+                            DMA_FROM_DEVICE);
                dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
-                            PCI_DMA_TODEVICE);
+                            DMA_TO_DEVICE);
                break;
 
        case SAS_PROTOCOL_SATA:
index b3cd9a6..2458974 100644 (file)
@@ -143,8 +143,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 
        case RESOURCE_UNCACHED_MEMORY:
                size = round_up(size, 8);
-               res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
-                                                      &res->bus_addr);
+               res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
+                               &res->bus_addr, GFP_KERNEL);
                if (!res->virt_addr) {
                        dev_err(&mhba->pdev->dev,
                                        "unable to allocate consistent mem,"
@@ -175,7 +175,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
        list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
                switch (res->type) {
                case RESOURCE_UNCACHED_MEMORY:
-                       pci_free_consistent(mhba->pdev, res->size,
+                       dma_free_coherent(&mhba->pdev->dev, res->size,
                                                res->virt_addr, res->bus_addr);
                        break;
                case RESOURCE_CACHED_MEMORY:
@@ -211,14 +211,14 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
        dma_addr_t busaddr;
 
        sg = scsi_sglist(scmd);
-       *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
-                              (int) scmd->sc_data_direction);
+       *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
+                              scmd->sc_data_direction);
        if (*sg_count > mhba->max_sge) {
                dev_err(&mhba->pdev->dev,
                        "sg count[0x%x] is bigger than max sg[0x%x].\n",
                        *sg_count, mhba->max_sge);
-               pci_unmap_sg(mhba->pdev, sg, sgnum,
-                            (int) scmd->sc_data_direction);
+               dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
+                            scmd->sc_data_direction);
                return -1;
        }
        for (i = 0; i < *sg_count; i++) {
@@ -246,7 +246,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
        if (size == 0)
                return 0;
 
-       virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
+       virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+                       GFP_KERNEL);
        if (!virt_addr)
                return -1;
 
@@ -274,8 +275,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
        }
        INIT_LIST_HEAD(&cmd->queue_pointer);
 
-       cmd->frame = pci_alloc_consistent(mhba->pdev,
-                               mhba->ib_max_size, &cmd->frame_phys);
+       cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
+                       &cmd->frame_phys, GFP_KERNEL);
        if (!cmd->frame) {
                dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
                        " frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +288,7 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
                if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
                        dev_err(&mhba->pdev->dev, "failed to allocate memory"
                                                " for internal frame\n");
-                       pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+                       dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
                                        cmd->frame, cmd->frame_phys);
                        kfree(cmd);
                        return NULL;
@@ -313,10 +314,10 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
                        phy_addr = (dma_addr_t) m_sg->baseaddr_l |
                                (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 
-                       pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+                       dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
                                                                phy_addr);
                }
-               pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+               dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
                                cmd->frame, cmd->frame_phys);
                kfree(cmd);
        }
@@ -663,16 +664,17 @@ static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
        }
 }
 
-static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+static int mvumi_pci_set_master(struct pci_dev *pdev)
 {
-       unsigned int ret = 0;
+       int ret = 0;
+
        pci_set_master(pdev);
 
        if (IS_DMA64) {
-               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
-                       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+                       ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        } else
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 
        return ret;
 }
@@ -771,7 +773,7 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
        mvumi_free_cmds(mhba);
        mvumi_release_mem_resource(mhba);
        mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
-       pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+       dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
                mhba->handshake_page, mhba->handshake_page_phys);
        kfree(mhba->regs);
        pci_release_regions(mhba->pdev);
@@ -1339,9 +1341,9 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
        }
 
        if (scsi_bufflen(scmd))
-               pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+               dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
                             scsi_sg_count(scmd),
-                            (int) scmd->sc_data_direction);
+                            scmd->sc_data_direction);
        cmd->scmd->scsi_done(scmd);
        mvumi_return_cmd(mhba, cmd);
 }
@@ -2148,9 +2150,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
        scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
        scmd->SCp.ptr = NULL;
        if (scsi_bufflen(scmd)) {
-               pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+               dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
                             scsi_sg_count(scmd),
-                            (int)scmd->sc_data_direction);
+                            scmd->sc_data_direction);
        }
        mvumi_return_cmd(mhba, cmd);
        spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2362,8 +2364,8 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
                ret = -ENOMEM;
                goto fail_alloc_mem;
        }
-       mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
-                                               &mhba->handshake_page_phys);
+       mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
+                       HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
        if (!mhba->handshake_page) {
                dev_err(&mhba->pdev->dev,
                        "failed to allocate memory for handshake\n");
@@ -2383,7 +2385,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
 
 fail_ready_state:
        mvumi_release_mem_resource(mhba);
-       pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+       dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
                mhba->handshake_page, mhba->handshake_page_phys);
 fail_alloc_page:
        kfree(mhba->regs);
@@ -2480,20 +2482,9 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                return ret;
 
-       pci_set_master(pdev);
-
-       if (IS_DMA64) {
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (ret) {
-                       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-                       if (ret)
-                               goto fail_set_dma_mask;
-               }
-       } else {
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (ret)
-                       goto fail_set_dma_mask;
-       }
+       ret = mvumi_pci_set_master(pdev);
+       if (ret)
+               goto fail_set_dma_mask;
 
        host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
        if (!host) {
@@ -2627,19 +2618,11 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
                dev_err(&pdev->dev, "enable device failed\n");
                return ret;
        }
-       pci_set_master(pdev);
-       if (IS_DMA64) {
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (ret) {
-                       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-                       if (ret)
-                               goto fail;
-               }
-       } else {
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (ret)
-                       goto fail;
-       }
+
+       ret = mvumi_pci_set_master(pdev);
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       if (ret)
+               goto fail;
        ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
        if (ret)
                goto fail;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
new file mode 100644 (file)
index 0000000..aeb282f
--- /dev/null
@@ -0,0 +1,3656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrb.h"
+
+static struct raid_template *myrb_raid_template;
+
+static void myrb_monitor(struct work_struct *work);
+static inline void myrb_translate_devstate(void *DeviceState);
+
+static inline int myrb_logical_channel(struct Scsi_Host *shost)
+{
+       return shost->max_channel - 1;
+}
+
+static struct myrb_devstate_name_entry {
+       enum myrb_devstate state;
+       const char *name;
+} myrb_devstate_name_list[] = {
+       { MYRB_DEVICE_DEAD, "Dead" },
+       { MYRB_DEVICE_WO, "WriteOnly" },
+       { MYRB_DEVICE_ONLINE, "Online" },
+       { MYRB_DEVICE_CRITICAL, "Critical" },
+       { MYRB_DEVICE_STANDBY, "Standby" },
+       { MYRB_DEVICE_OFFLINE, "Offline" },
+};
+
+static const char *myrb_devstate_name(enum myrb_devstate state)
+{
+       struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
+               if (entry[i].state == state)
+                       return entry[i].name;
+       }
+       return "Unknown";
+}
+
+static struct myrb_raidlevel_name_entry {
+       enum myrb_raidlevel level;
+       const char *name;
+} myrb_raidlevel_name_list[] = {
+       { MYRB_RAID_LEVEL0, "RAID0" },
+       { MYRB_RAID_LEVEL1, "RAID1" },
+       { MYRB_RAID_LEVEL3, "RAID3" },
+       { MYRB_RAID_LEVEL5, "RAID5" },
+       { MYRB_RAID_LEVEL6, "RAID6" },
+       { MYRB_RAID_JBOD, "JBOD" },
+};
+
+static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
+{
+       struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
+               if (entry[i].level == level)
+                       return entry[i].name;
+       }
+       return NULL;
+}
+
+/**
+ * myrb_create_mempools - allocates auxiliary data structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
+{
+       size_t elem_size, elem_align;
+
+       elem_align = sizeof(struct myrb_sge);
+       elem_size = cb->host->sg_tablesize * elem_align;
+       cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
+                                     elem_size, elem_align, 0);
+       if (cb->sg_pool == NULL) {
+               shost_printk(KERN_ERR, cb->host,
+                            "Failed to allocate SG pool\n");
+               return false;
+       }
+
+       cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
+                                      sizeof(struct myrb_dcdb),
+                                      sizeof(unsigned int), 0);
+       if (!cb->dcdb_pool) {
+               dma_pool_destroy(cb->sg_pool);
+               cb->sg_pool = NULL;
+               shost_printk(KERN_ERR, cb->host,
+                            "Failed to allocate DCDB pool\n");
+               return false;
+       }
+
+       snprintf(cb->work_q_name, sizeof(cb->work_q_name),
+                "myrb_wq_%d", cb->host->host_no);
+       cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+       if (!cb->work_q) {
+               dma_pool_destroy(cb->dcdb_pool);
+               cb->dcdb_pool = NULL;
+               dma_pool_destroy(cb->sg_pool);
+               cb->sg_pool = NULL;
+               shost_printk(KERN_ERR, cb->host,
+                            "Failed to create workqueue\n");
+               return false;
+       }
+
+       /*
+        * Initialize the Monitoring Timer.
+        */
+       INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
+       queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
+
+       return true;
+}
+
+/**
+ * myrb_destroy_mempools - tears down the memory pools for the controller
+ */
+static void myrb_destroy_mempools(struct myrb_hba *cb)
+{
+       cancel_delayed_work_sync(&cb->monitor_work);
+       destroy_workqueue(cb->work_q);
+
+       dma_pool_destroy(cb->sg_pool);
+       dma_pool_destroy(cb->dcdb_pool);
+}
+
+/**
+ * myrb_reset_cmd - reset command block
+ */
+static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
+{
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+       memset(mbox, 0, sizeof(union myrb_cmd_mbox));
+       cmd_blk->status = 0;
+}
+
+/**
+ * myrb_qcmd - queues command block for execution
+ */
+static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+       void __iomem *base = cb->io_base;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
+
+       cb->write_cmd_mbox(next_mbox, mbox);
+       if (cb->prev_cmd_mbox1->words[0] == 0 ||
+           cb->prev_cmd_mbox2->words[0] == 0)
+               cb->get_cmd_mbox(base);
+       cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
+       cb->prev_cmd_mbox1 = next_mbox;
+       if (++next_mbox > cb->last_cmd_mbox)
+               next_mbox = cb->first_cmd_mbox;
+       cb->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrb_exec_cmd - executes command block and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
+               struct myrb_cmdblk *cmd_blk)
+{
+       DECLARE_COMPLETION_ONSTACK(cmpl);
+       unsigned long flags;
+
+       cmd_blk->completion = &cmpl;
+
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       cb->qcmd(cb, cmd_blk);
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+       WARN_ON(in_interrupt());
+       wait_for_completion(&cmpl);
+       return cmd_blk->status;
+}
+
+/**
+ * myrb_exec_type3 - executes a type 3 command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3(struct myrb_hba *cb,
+               enum myrb_cmd_opcode op, dma_addr_t addr)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       unsigned short status;
+
+       mutex_lock(&cb->dcmd_mutex);
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3.id = MYRB_DCMD_TAG;
+       mbox->type3.opcode = op;
+       mbox->type3.addr = addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       mutex_unlock(&cb->dcmd_mutex);
+       return status;
+}
+
+/**
+ * myrb_exec_type3D - executes a type 3D command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
+               enum myrb_cmd_opcode op, struct scsi_device *sdev,
+               struct myrb_pdev_state *pdev_info)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       unsigned short status;
+       dma_addr_t pdev_info_addr;
+
+       pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
+                                       sizeof(struct myrb_pdev_state),
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
+               return MYRB_STATUS_SUBSYS_FAILED;
+
+       mutex_lock(&cb->dcmd_mutex);
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3D.id = MYRB_DCMD_TAG;
+       mbox->type3D.opcode = op;
+       mbox->type3D.channel = sdev->channel;
+       mbox->type3D.target = sdev->id;
+       mbox->type3D.addr = pdev_info_addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       mutex_unlock(&cb->dcmd_mutex);
+       dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
+                        sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
+       if (status == MYRB_STATUS_SUCCESS &&
+           mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
+               myrb_translate_devstate(pdev_info);
+
+       return status;
+}
+
+static char *myrb_event_msg[] = {
+       "killed because write recovery failed",
+       "killed because of SCSI bus reset failure",
+       "killed because of double check condition",
+       "killed because it was removed",
+       "killed because of gross error on SCSI chip",
+       "killed because of bad tag returned from drive",
+       "killed because of timeout on SCSI command",
+       "killed because of reset SCSI command issued from system",
+       "killed because busy or parity error count exceeded limit",
+       "killed because of 'kill drive' command from system",
+       "killed because of selection timeout",
+       "killed due to SCSI phase sequence error",
+       "killed due to unknown status",
+};
+
+/**
+ * myrb_get_event - get event log from HBA
+ * @cb: pointer to the hba structure
+ * @event: number of the event
+ *
+ * Execute a type 3E command and logs the event message
+ */
+static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct myrb_log_entry *ev_buf;
+       dma_addr_t ev_addr;
+       unsigned short status;
+
+       ev_buf = dma_alloc_coherent(&cb->pdev->dev,
+                                   sizeof(struct myrb_log_entry),
+                                   &ev_addr, GFP_KERNEL);
+       if (!ev_buf)
+               return;
+
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3E.id = MYRB_MCMD_TAG;
+       mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
+       mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
+       mbox->type3E.opqual = 1;
+       mbox->type3E.ev_seq = event;
+       mbox->type3E.addr = ev_addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       if (status != MYRB_STATUS_SUCCESS)
+               shost_printk(KERN_INFO, cb->host,
+                            "Failed to get event log %d, status %04x\n",
+                            event, status);
+
+       else if (ev_buf->seq_num == event) {
+               struct scsi_sense_hdr sshdr;
+
+               memset(&sshdr, 0, sizeof(sshdr));
+               scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
+
+               if (sshdr.sense_key == VENDOR_SPECIFIC &&
+                   sshdr.asc == 0x80 &&
+                   sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
+                       shost_printk(KERN_CRIT, cb->host,
+                                    "Physical drive %d:%d: %s\n",
+                                    ev_buf->channel, ev_buf->target,
+                                    myrb_event_msg[sshdr.ascq]);
+               else
+                       shost_printk(KERN_CRIT, cb->host,
+                                    "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
+                                    ev_buf->channel, ev_buf->target,
+                                    sshdr.sense_key, sshdr.asc, sshdr.ascq);
+       }
+
+       dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
+                         ev_buf, ev_addr);
+}
+
+/**
+ * myrb_get_errtable - retrieves the error table from the controller
+ *
+ * Executes a type 3 command and logs the error table from the controller.
+ */
+static void myrb_get_errtable(struct myrb_hba *cb)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       unsigned short status;
+       struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
+
+       memcpy(&old_table, cb->err_table, sizeof(old_table));
+
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3.id = MYRB_MCMD_TAG;
+       mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
+       mbox->type3.addr = cb->err_table_addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       if (status == MYRB_STATUS_SUCCESS) {
+               struct myrb_error_entry *table = cb->err_table;
+               struct myrb_error_entry *new, *old;
+               size_t err_table_offset;
+               struct scsi_device *sdev;
+
+               shost_for_each_device(sdev, cb->host) {
+                       if (sdev->channel >= myrb_logical_channel(cb->host))
+                               continue;
+                       err_table_offset = sdev->channel * MYRB_MAX_TARGETS
+                               + sdev->id;
+                       new = table + err_table_offset;
+                       old = &old_table[err_table_offset];
+                       if (new->parity_err == old->parity_err &&
+                           new->soft_err == old->soft_err &&
+                           new->hard_err == old->hard_err &&
+                           new->misc_err == old->misc_err)
+                               continue;
+                       sdev_printk(KERN_CRIT, sdev,
+                                   "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
+                                   new->parity_err, new->soft_err,
+                                   new->hard_err, new->misc_err);
+               }
+       }
+}
+
+/**
+ * myrb_get_ldev_info - retrieves the logical device table from the controller
+ *
+ * Executes a type 3 command and updates the logical device table.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
+{
+       unsigned short status;
+       int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
+       struct Scsi_Host *shost = cb->host;
+
+       status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
+                                cb->ldev_info_addr);
+       if (status != MYRB_STATUS_SUCCESS)
+               return status;
+
+       for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
+               struct myrb_ldev_info *old = NULL;
+               struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
+               struct scsi_device *sdev;
+
+               sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
+                                         ldev_num, 0);
+               if (!sdev) {
+                       if (new->state == MYRB_DEVICE_OFFLINE)
+                               continue;
+                       shost_printk(KERN_INFO, shost,
+                                    "Adding Logical Drive %d in state %s\n",
+                                    ldev_num, myrb_devstate_name(new->state));
+                       scsi_add_device(shost, myrb_logical_channel(shost),
+                                       ldev_num, 0);
+                       continue;
+               }
+               old = sdev->hostdata;
+               if (new->state != old->state)
+                       shost_printk(KERN_INFO, shost,
+                                    "Logical Drive %d is now %s\n",
+                                    ldev_num, myrb_devstate_name(new->state));
+               if (new->wb_enabled != old->wb_enabled)
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Logical Drive is now WRITE %s\n",
+                                   (new->wb_enabled ? "BACK" : "THRU"));
+               memcpy(old, new, sizeof(*new));
+               scsi_device_put(sdev);
+       }
+       return status;
+}
+
+/**
+ * myrb_get_rbld_progress - get rebuild progress information
+ *
+ * Executes a type 3 command and returns the rebuild progress
+ * information.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
+               struct myrb_rbld_progress *rbld)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct myrb_rbld_progress *rbld_buf;
+       dma_addr_t rbld_addr;
+       unsigned short status;
+
+       rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+                                     sizeof(struct myrb_rbld_progress),
+                                     &rbld_addr, GFP_KERNEL);
+       if (!rbld_buf)
+               return MYRB_STATUS_RBLD_NOT_CHECKED;
+
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3.id = MYRB_MCMD_TAG;
+       mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
+       mbox->type3.addr = rbld_addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       if (rbld)
+               memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
+       dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+                         rbld_buf, rbld_addr);
+       return status;
+}
+
+/**
+ * myrb_update_rbld_progress - updates the rebuild status
+ *
+ * Updates the rebuild status for the attached logical devices.
+ *
+ */
+static void myrb_update_rbld_progress(struct myrb_hba *cb)
+{
+       struct myrb_rbld_progress rbld_buf;
+       unsigned short status;
+
+       status = myrb_get_rbld_progress(cb, &rbld_buf);
+       if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
+           cb->last_rbld_status == MYRB_STATUS_SUCCESS)
+               status = MYRB_STATUS_RBLD_SUCCESS;
+       if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
+               unsigned int blocks_done =
+                       rbld_buf.ldev_size - rbld_buf.blocks_left;
+               struct scsi_device *sdev;
+
+               sdev = scsi_device_lookup(cb->host,
+                                         myrb_logical_channel(cb->host),
+                                         rbld_buf.ldev_num, 0);
+               if (!sdev)
+                       return;
+
+               switch (status) {
+               case MYRB_STATUS_SUCCESS:
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild in Progress, %d%% completed\n",
+                                   (100 * (blocks_done >> 7))
+                                   / (rbld_buf.ldev_size >> 7));
+                       break;
+               case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild Failed due to Logical Drive Failure\n");
+                       break;
+               case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild Failed due to Bad Blocks on Other Drives\n");
+                       break;
+               case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
+                       break;
+               case MYRB_STATUS_RBLD_SUCCESS:
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild Completed Successfully\n");
+                       break;
+               case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
+                       sdev_printk(KERN_INFO, sdev,
+                                    "Rebuild Successfully Terminated\n");
+                       break;
+               default:
+                       break;
+               }
+               scsi_device_put(sdev);
+       }
+       cb->last_rbld_status = status;
+}
+
+/**
+ * myrb_get_cc_progress - retrieve the rebuild status
+ *
+ * Execute a type 3 Command and fetch the rebuild / consistency check
+ * status.
+ */
+static void myrb_get_cc_progress(struct myrb_hba *cb)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct myrb_rbld_progress *rbld_buf;
+       dma_addr_t rbld_addr;
+       unsigned short status;
+
+       rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+                                     sizeof(struct myrb_rbld_progress),
+                                     &rbld_addr, GFP_KERNEL);
+       if (!rbld_buf) {
+               cb->need_cc_status = true;
+               return;
+       }
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3.id = MYRB_MCMD_TAG;
+       mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
+       mbox->type3.addr = rbld_addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       if (status == MYRB_STATUS_SUCCESS) {
+               unsigned int ldev_num = rbld_buf->ldev_num;
+               unsigned int ldev_size = rbld_buf->ldev_size;
+               unsigned int blocks_done =
+                       ldev_size - rbld_buf->blocks_left;
+               struct scsi_device *sdev;
+
+               sdev = scsi_device_lookup(cb->host,
+                                         myrb_logical_channel(cb->host),
+                                         ldev_num, 0);
+               if (sdev) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Consistency Check in Progress: %d%% completed\n",
+                                   (100 * (blocks_done >> 7))
+                                   / (ldev_size >> 7));
+                       scsi_device_put(sdev);
+               }
+       }
+       dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+                         rbld_buf, rbld_addr);
+}
+
+/**
+ * myrb_bgi_control - updates background initialisation status
+ *
+ * Executes a type 3B command and updates the background initialisation status
+ */
+static void myrb_bgi_control(struct myrb_hba *cb)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct myrb_bgi_status *bgi, *last_bgi;
+       dma_addr_t bgi_addr;
+       struct scsi_device *sdev = NULL;
+       unsigned short status;
+
+       bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+                                &bgi_addr, GFP_KERNEL);
+       if (!bgi) {
+               shost_printk(KERN_ERR, cb->host,
+                            "Failed to allocate bgi memory\n");
+               return;
+       }
+       myrb_reset_cmd(cmd_blk);
+       mbox->type3B.id = MYRB_DCMD_TAG;
+       mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
+       mbox->type3B.optype = 0x20;
+       mbox->type3B.addr = bgi_addr;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       last_bgi = &cb->bgi_status;
+       sdev = scsi_device_lookup(cb->host,
+                                 myrb_logical_channel(cb->host),
+                                 bgi->ldev_num, 0);
+       switch (status) {
+       case MYRB_STATUS_SUCCESS:
+               switch (bgi->status) {
+               case MYRB_BGI_INVALID:
+                       break;
+               case MYRB_BGI_STARTED:
+                       if (!sdev)
+                               break;
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Background Initialization Started\n");
+                       break;
+               case MYRB_BGI_INPROGRESS:
+                       if (!sdev)
+                               break;
+                       if (bgi->blocks_done == last_bgi->blocks_done &&
+                           bgi->ldev_num == last_bgi->ldev_num)
+                               break;
+                       sdev_printk(KERN_INFO, sdev,
+                                "Background Initialization in Progress: %d%% completed\n",
+                                (100 * (bgi->blocks_done >> 7))
+                                / (bgi->ldev_size >> 7));
+                       break;
+               case MYRB_BGI_SUSPENDED:
+                       if (!sdev)
+                               break;
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Background Initialization Suspended\n");
+                       break;
+               case MYRB_BGI_CANCELLED:
+                       if (!sdev)
+                               break;
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Background Initialization Cancelled\n");
+                       break;
+               }
+               memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
+               break;
+       case MYRB_STATUS_BGI_SUCCESS:
+               if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Background Initialization Completed Successfully\n");
+               cb->bgi_status.status = MYRB_BGI_INVALID;
+               break;
+       case MYRB_STATUS_BGI_ABORTED:
+               if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Background Initialization Aborted\n");
+               /* Fallthrough */
+       case MYRB_STATUS_NO_BGI_INPROGRESS:
+               cb->bgi_status.status = MYRB_BGI_INVALID;
+               break;
+       }
+       if (sdev)
+               scsi_device_put(sdev);
+       dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+                         bgi, bgi_addr);
+}
+
+/**
+ * myrb_hba_enquiry - updates the controller status
+ *
+ * Executes a DAC_V1_Enquiry command and updates the controller status.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
+{
+       struct myrb_enquiry old, *new;
+       unsigned short status;
+
+       memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
+
+       status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
+       if (status != MYRB_STATUS_SUCCESS)
+               return status;
+
+       new = cb->enquiry;
+       if (new->ldev_count > old.ldev_count) {
+               int ldev_num = old.ldev_count - 1;
+
+               while (++ldev_num < new->ldev_count)
+                       shost_printk(KERN_CRIT, cb->host,
+                                    "Logical Drive %d Now Exists\n",
+                                    ldev_num);
+       }
+       if (new->ldev_count < old.ldev_count) {
+               int ldev_num = new->ldev_count - 1;
+
+               while (++ldev_num < old.ldev_count)
+                       shost_printk(KERN_CRIT, cb->host,
+                                    "Logical Drive %d No Longer Exists\n",
+                                    ldev_num);
+       }
+       if (new->status.deferred != old.status.deferred)
+               shost_printk(KERN_CRIT, cb->host,
+                            "Deferred Write Error Flag is now %s\n",
+                            (new->status.deferred ? "TRUE" : "FALSE"));
+       if (new->ev_seq != old.ev_seq) {
+               cb->new_ev_seq = new->ev_seq;
+               cb->need_err_info = true;
+               shost_printk(KERN_INFO, cb->host,
+                            "Event log %d/%d (%d/%d) available\n",
+                            cb->old_ev_seq, cb->new_ev_seq,
+                            old.ev_seq, new->ev_seq);
+       }
+       if ((new->ldev_critical > 0 &&
+            new->ldev_critical != old.ldev_critical) ||
+           (new->ldev_offline > 0 &&
+            new->ldev_offline != old.ldev_offline) ||
+           (new->ldev_count != old.ldev_count)) {
+               shost_printk(KERN_INFO, cb->host,
+                            "Logical drive count changed (%d/%d/%d)\n",
+                            new->ldev_critical,
+                            new->ldev_offline,
+                            new->ldev_count);
+               cb->need_ldev_info = true;
+       }
+       if (new->pdev_dead > 0 ||
+           new->pdev_dead != old.pdev_dead ||
+           time_after_eq(jiffies, cb->secondary_monitor_time
+                         + MYRB_SECONDARY_MONITOR_INTERVAL)) {
+               cb->need_bgi_status = cb->bgi_status_supported;
+               cb->secondary_monitor_time = jiffies;
+       }
+       if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+           new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
+           old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+           old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
+               cb->need_rbld = true;
+               cb->rbld_first = (new->ldev_critical < old.ldev_critical);
+       }
+       if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
+               switch (new->rbld) {
+               case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
+                       shost_printk(KERN_INFO, cb->host,
+                                    "Consistency Check Completed Successfully\n");
+                       break;
+               case MYRB_STDBY_RBLD_IN_PROGRESS:
+               case MYRB_BG_RBLD_IN_PROGRESS:
+                       break;
+               case MYRB_BG_CHECK_IN_PROGRESS:
+                       cb->need_cc_status = true;
+                       break;
+               case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
+                       shost_printk(KERN_INFO, cb->host,
+                                    "Consistency Check Completed with Error\n");
+                       break;
+               case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
+                       shost_printk(KERN_INFO, cb->host,
+                                    "Consistency Check Failed - Physical Device Failed\n");
+                       break;
+               case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
+                       shost_printk(KERN_INFO, cb->host,
+                                    "Consistency Check Failed - Logical Drive Failed\n");
+                       break;
+               case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
+                       shost_printk(KERN_INFO, cb->host,
+                                    "Consistency Check Failed - Other Causes\n");
+                       break;
+               case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
+                       shost_printk(KERN_INFO, cb->host,
+                                    "Consistency Check Successfully Terminated\n");
+                       break;
+               }
+       else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
+               cb->need_cc_status = true;
+
+       return MYRB_STATUS_SUCCESS;
+}
+
+/**
+ * myrb_set_pdev_state - sets the device state for a physical device
+ *
+ * Return: command status
+ */
+static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
+               struct scsi_device *sdev, enum myrb_devstate state)
+{
+       struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       unsigned short status;
+
+       mutex_lock(&cb->dcmd_mutex);
+       mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
+       mbox->type3D.id = MYRB_DCMD_TAG;
+       mbox->type3D.channel = sdev->channel;
+       mbox->type3D.target = sdev->id;
+       mbox->type3D.state = state & 0x1F;
+       status = myrb_exec_cmd(cb, cmd_blk);
+       mutex_unlock(&cb->dcmd_mutex);
+
+       return status;
+}
+
+/**
+ * myrb_enable_mmio - enables the Memory Mailbox Interface
+ *
+ * PD and P controller types have no memory mailbox, but still need the
+ * other dma mapped memory.
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
+{
+       void __iomem *base = cb->io_base;
+       struct pci_dev *pdev = cb->pdev;
+       size_t err_table_size;
+       size_t ldev_info_size;
+       union myrb_cmd_mbox *cmd_mbox_mem;
+       struct myrb_stat_mbox *stat_mbox_mem;
+       union myrb_cmd_mbox mbox;
+       unsigned short status;
+
+       memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
+
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+               dev_err(&pdev->dev, "DMA mask out of range\n");
+               return false;
+       }
+
+       cb->enquiry = dma_alloc_coherent(&pdev->dev,
+                                        sizeof(struct myrb_enquiry),
+                                        &cb->enquiry_addr, GFP_KERNEL);
+       if (!cb->enquiry)
+               return false;
+
+       err_table_size = sizeof(struct myrb_error_entry) *
+               MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+       cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
+                                          &cb->err_table_addr, GFP_KERNEL);
+       if (!cb->err_table)
+               return false;
+
+       ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
+       cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
+                                              &cb->ldev_info_addr, GFP_KERNEL);
+       if (!cb->ldev_info_buf)
+               return false;
+
+       /*
+        * Skip mailbox initialisation for PD and P Controllers
+        */
+       if (!mmio_init_fn)
+               return true;
+
+       /* These are the base addresses for the command memory mailbox array */
+       cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
+       cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
+                                               cb->cmd_mbox_size,
+                                               &cb->cmd_mbox_addr,
+                                               GFP_KERNEL);
+       if (!cb->first_cmd_mbox)
+               return false;
+
+       cmd_mbox_mem = cb->first_cmd_mbox;
+       cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
+       cb->last_cmd_mbox = cmd_mbox_mem;
+       cb->next_cmd_mbox = cb->first_cmd_mbox;
+       cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
+       cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
+
+       /* These are the base addresses for the status memory mailbox array */
+       cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
+           sizeof(struct myrb_stat_mbox);
+       cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
+                                                cb->stat_mbox_size,
+                                                &cb->stat_mbox_addr,
+                                                GFP_KERNEL);
+       if (!cb->first_stat_mbox)
+               return false;
+
+       stat_mbox_mem = cb->first_stat_mbox;
+       stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
+       cb->last_stat_mbox = stat_mbox_mem;
+       cb->next_stat_mbox = cb->first_stat_mbox;
+
+       /* Enable the Memory Mailbox Interface. */
+       cb->dual_mode_interface = true;
+       mbox.typeX.opcode = 0x2B;
+       mbox.typeX.id = 0;
+       mbox.typeX.opcode2 = 0x14;
+       mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
+       mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
+
+       status = mmio_init_fn(pdev, base, &mbox);
+       if (status != MYRB_STATUS_SUCCESS) {
+               cb->dual_mode_interface = false;
+               mbox.typeX.opcode2 = 0x10;
+               status = mmio_init_fn(pdev, base, &mbox);
+               if (status != MYRB_STATUS_SUCCESS) {
+                       dev_err(&pdev->dev,
+                               "Failed to enable mailbox, statux %02X\n",
+                               status);
+                       return false;
+               }
+       }
+       return true;
+}
+
+/**
+ * myrb_get_hba_config - reads the configuration information
+ *
+ * Reads the configuration information from the controller and
+ * initializes the controller structure.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int myrb_get_hba_config(struct myrb_hba *cb)
+{
+       struct myrb_enquiry2 *enquiry2;
+       dma_addr_t enquiry2_addr;
+       struct myrb_config2 *config2;
+       dma_addr_t config2_addr;
+       struct Scsi_Host *shost = cb->host;
+       struct pci_dev *pdev = cb->pdev;
+       int pchan_max = 0, pchan_cur = 0;
+       unsigned short status;
+       int ret = -ENODEV, memsize = 0;
+
+       enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+                                     &enquiry2_addr, GFP_KERNEL);
+       if (!enquiry2) {
+               shost_printk(KERN_ERR, cb->host,
+                            "Failed to allocate V1 enquiry2 memory\n");
+               return -ENOMEM;
+       }
+       config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
+                                    &config2_addr, GFP_KERNEL);
+       if (!config2) {
+               shost_printk(KERN_ERR, cb->host,
+                            "Failed to allocate V1 config2 memory\n");
+               dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+                                 enquiry2, enquiry2_addr);
+               return -ENOMEM;
+       }
+       mutex_lock(&cb->dma_mutex);
+       status = myrb_hba_enquiry(cb);
+       mutex_unlock(&cb->dma_mutex);
+       if (status != MYRB_STATUS_SUCCESS) {
+               shost_printk(KERN_WARNING, cb->host,
+                            "Failed it issue V1 Enquiry\n");
+               goto out_free;
+       }
+
+       status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
+       if (status != MYRB_STATUS_SUCCESS) {
+               shost_printk(KERN_WARNING, cb->host,
+                            "Failed to issue V1 Enquiry2\n");
+               goto out_free;
+       }
+
+       status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
+       if (status != MYRB_STATUS_SUCCESS) {
+               shost_printk(KERN_WARNING, cb->host,
+                            "Failed to issue ReadConfig2\n");
+               goto out_free;
+       }
+
+       status = myrb_get_ldev_info(cb);
+       if (status != MYRB_STATUS_SUCCESS) {
+               shost_printk(KERN_WARNING, cb->host,
+                            "Failed to get logical drive information\n");
+               goto out_free;
+       }
+
+       /*
+        * Initialize the Controller Model Name and Full Model Name fields.
+        */
+       switch (enquiry2->hw.sub_model) {
+       case DAC960_V1_P_PD_PU:
+               if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
+                       strcpy(cb->model_name, "DAC960PU");
+               else
+                       strcpy(cb->model_name, "DAC960PD");
+               break;
+       case DAC960_V1_PL:
+               strcpy(cb->model_name, "DAC960PL");
+               break;
+       case DAC960_V1_PG:
+               strcpy(cb->model_name, "DAC960PG");
+               break;
+       case DAC960_V1_PJ:
+               strcpy(cb->model_name, "DAC960PJ");
+               break;
+       case DAC960_V1_PR:
+               strcpy(cb->model_name, "DAC960PR");
+               break;
+       case DAC960_V1_PT:
+               strcpy(cb->model_name, "DAC960PT");
+               break;
+       case DAC960_V1_PTL0:
+               strcpy(cb->model_name, "DAC960PTL0");
+               break;
+       case DAC960_V1_PRL:
+               strcpy(cb->model_name, "DAC960PRL");
+               break;
+       case DAC960_V1_PTL1:
+               strcpy(cb->model_name, "DAC960PTL1");
+               break;
+       case DAC960_V1_1164P:
+               strcpy(cb->model_name, "eXtremeRAID 1100");
+               break;
+       default:
+               shost_printk(KERN_WARNING, cb->host,
+                            "Unknown Model %X\n",
+                            enquiry2->hw.sub_model);
+               goto out;
+       }
+       /*
+        * Initialize the Controller Firmware Version field and verify that it
+        * is a supported firmware version.
+        * The supported firmware versions are:
+        *
+        * DAC1164P                 5.06 and above
+        * DAC960PTL/PRL/PJ/PG      4.06 and above
+        * DAC960PU/PD/PL           3.51 and above
+        * DAC960PU/PD/PL/P         2.73 and above
+        */
+#if defined(CONFIG_ALPHA)
+       /*
+        * DEC Alpha machines were often equipped with DAC960 cards that were
+        * OEMed from Mylex, and had their own custom firmware. Version 2.70,
+        * the last custom FW revision to be released by DEC for these older
+        * controllers, appears to work quite well with this driver.
+        *
+        * Cards tested successfully were several versions each of the PD and
+        * PU, called by DEC the KZPSC and KZPAC, respectively, and having
+        * the Manufacturer Numbers (from Mylex), usually on a sticker on the
+        * back of the board, of:
+        *
+        * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
+        *         or D040349 (3-channel)
+        * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
+        *         or D040397 (3-channel)
+        */
+# define FIRMWARE_27X  "2.70"
+#else
+# define FIRMWARE_27X  "2.73"
+#endif
+
+       if (enquiry2->fw.major_version == 0) {
+               enquiry2->fw.major_version = cb->enquiry->fw_major_version;
+               enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
+               enquiry2->fw.firmware_type = '0';
+               enquiry2->fw.turn_id = 0;
+       }
+       sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+               enquiry2->fw.major_version,
+               enquiry2->fw.minor_version,
+               enquiry2->fw.firmware_type,
+               enquiry2->fw.turn_id);
+       if (!((enquiry2->fw.major_version == 5 &&
+              enquiry2->fw.minor_version >= 6) ||
+             (enquiry2->fw.major_version == 4 &&
+              enquiry2->fw.minor_version >= 6) ||
+             (enquiry2->fw.major_version == 3 &&
+              enquiry2->fw.minor_version >= 51) ||
+             (enquiry2->fw.major_version == 2 &&
+              strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
+               shost_printk(KERN_WARNING, cb->host,
+                       "Firmware Version '%s' unsupported\n",
+                       cb->fw_version);
+               goto out;
+       }
+       /*
+        * Initialize the Channels, Targets, Memory Size, and SAF-TE
+        * Enclosure Management Enabled fields.
+        */
+       switch (enquiry2->hw.model) {
+       case MYRB_5_CHANNEL_BOARD:
+               pchan_max = 5;
+               break;
+       case MYRB_3_CHANNEL_BOARD:
+       case MYRB_3_CHANNEL_ASIC_DAC:
+               pchan_max = 3;
+               break;
+       case MYRB_2_CHANNEL_BOARD:
+               pchan_max = 2;
+               break;
+       default:
+               pchan_max = enquiry2->cfg_chan;
+               break;
+       }
+       pchan_cur = enquiry2->cur_chan;
+       if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
+               cb->bus_width = 32;
+       else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
+               cb->bus_width = 16;
+       else
+               cb->bus_width = 8;
+       cb->ldev_block_size = enquiry2->ldev_block_size;
+       shost->max_channel = pchan_cur;
+       shost->max_id = enquiry2->max_targets;
+       memsize = enquiry2->mem_size >> 20;
+       cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
+       /*
+        * Initialize the Controller Queue Depth, Driver Queue Depth,
+        * Logical Drive Count, Maximum Blocks per Command, Controller
+        * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+        * The Driver Queue Depth must be at most one less than the
+        * Controller Queue Depth to allow for an automatic drive
+        * rebuild operation.
+        */
+       shost->can_queue = cb->enquiry->max_tcq;
+       if (shost->can_queue < 3)
+               shost->can_queue = enquiry2->max_cmds;
+       if (shost->can_queue < 3)
+               /* Play safe and disable TCQ */
+               shost->can_queue = 1;
+
+       if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
+               shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
+       shost->max_sectors = enquiry2->max_sectors;
+       shost->sg_tablesize = enquiry2->max_sge;
+       if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
+               shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
+       /*
+        * Initialize the Stripe Size, Segment Size, and Geometry Translation.
+        */
+       cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
+               >> (10 - MYRB_BLKSIZE_BITS);
+       cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
+               >> (10 - MYRB_BLKSIZE_BITS);
+       /* Assume 255/63 translation */
+       cb->ldev_geom_heads = 255;
+       cb->ldev_geom_sectors = 63;
+       if (config2->drive_geometry) {
+               cb->ldev_geom_heads = 128;
+               cb->ldev_geom_sectors = 32;
+       }
+
+       /*
+        * Initialize the Background Initialization Status.
+        */
+       if ((cb->fw_version[0] == '4' &&
+            strcmp(cb->fw_version, "4.08") >= 0) ||
+           (cb->fw_version[0] == '5' &&
+            strcmp(cb->fw_version, "5.08") >= 0)) {
+               cb->bgi_status_supported = true;
+               myrb_bgi_control(cb);
+       }
+       cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
+       ret = 0;
+
+out:
+       shost_printk(KERN_INFO, cb->host,
+               "Configuring %s PCI RAID Controller\n", cb->model_name);
+       shost_printk(KERN_INFO, cb->host,
+               "  Firmware Version: %s, Memory Size: %dMB\n",
+               cb->fw_version, memsize);
+       if (cb->io_addr == 0)
+               shost_printk(KERN_INFO, cb->host,
+                       "  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
+                       (unsigned long)cb->pci_addr, cb->irq);
+       else
+               shost_printk(KERN_INFO, cb->host,
+                       "  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
+                       (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
+                       cb->irq);
+       shost_printk(KERN_INFO, cb->host,
+               "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+               cb->host->can_queue, cb->host->max_sectors);
+       shost_printk(KERN_INFO, cb->host,
+                    "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+                    cb->host->can_queue, cb->host->sg_tablesize,
+                    MYRB_SCATTER_GATHER_LIMIT);
+       shost_printk(KERN_INFO, cb->host,
+                    "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
+                    cb->stripe_size, cb->segment_size,
+                    cb->ldev_geom_heads, cb->ldev_geom_sectors,
+                    cb->safte_enabled ?
+                    "  SAF-TE Enclosure Management Enabled" : "");
+       shost_printk(KERN_INFO, cb->host,
+                    "  Physical: %d/%d channels %d/%d/%d devices\n",
+                    pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
+                    cb->host->max_id);
+
+       shost_printk(KERN_INFO, cb->host,
+                    "  Logical: 1/1 channels, %d/%d disks\n",
+                    cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
+
+out_free:
+       dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+                         enquiry2, enquiry2_addr);
+       dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
+                         config2, config2_addr);
+
+       return ret;
+}
+
+/**
+ * myrb_unmap - unmaps controller structures
+ */
+static void myrb_unmap(struct myrb_hba *cb)
+{
+       if (cb->ldev_info_buf) {
+               size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
+                       MYRB_MAX_LDEVS;
+               dma_free_coherent(&cb->pdev->dev, ldev_info_size,
+                                 cb->ldev_info_buf, cb->ldev_info_addr);
+               cb->ldev_info_buf = NULL;
+       }
+       if (cb->err_table) {
+               size_t err_table_size = sizeof(struct myrb_error_entry) *
+                       MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+               dma_free_coherent(&cb->pdev->dev, err_table_size,
+                                 cb->err_table, cb->err_table_addr);
+               cb->err_table = NULL;
+       }
+       if (cb->enquiry) {
+               dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
+                                 cb->enquiry, cb->enquiry_addr);
+               cb->enquiry = NULL;
+       }
+       if (cb->first_stat_mbox) {
+               dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
+                                 cb->first_stat_mbox, cb->stat_mbox_addr);
+               cb->first_stat_mbox = NULL;
+       }
+       if (cb->first_cmd_mbox) {
+               dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
+                                 cb->first_cmd_mbox, cb->cmd_mbox_addr);
+               cb->first_cmd_mbox = NULL;
+       }
+}
+
+/**
+ * myrb_cleanup - cleanup controller structures
+ */
+static void myrb_cleanup(struct myrb_hba *cb)
+{
+       struct pci_dev *pdev = cb->pdev;
+
+       /* Free the memory mailbox, status, and related structures */
+       myrb_unmap(cb);
+
+       if (cb->mmio_base) {
+               cb->disable_intr(cb->io_base);
+               iounmap(cb->mmio_base);
+       }
+       if (cb->irq)
+               free_irq(cb->irq, cb);
+       if (cb->io_addr)
+               release_region(cb->io_addr, 0x80);
+       pci_set_drvdata(pdev, NULL);
+       pci_disable_device(pdev);
+       scsi_host_put(cb->host);
+}
+
+static int myrb_host_reset(struct scsi_cmnd *scmd)
+{
+       struct Scsi_Host *shost = scmd->device->host;
+       struct myrb_hba *cb = shost_priv(shost);
+
+       cb->reset(cb->io_base);
+       return SUCCESS;
+}
+
+static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
+               struct scsi_cmnd *scmd)
+{
+       struct myrb_hba *cb = shost_priv(shost);
+       struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct myrb_dcdb *dcdb;
+       dma_addr_t dcdb_addr;
+       struct scsi_device *sdev = scmd->device;
+       struct scatterlist *sgl;
+       unsigned long flags;
+       int nsge;
+
+       myrb_reset_cmd(cmd_blk);
+       dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
+       if (!dcdb)
+               return SCSI_MLQUEUE_HOST_BUSY;
+       nsge = scsi_dma_map(scmd);
+       if (nsge > 1) {
+               dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
+               scmd->result = (DID_ERROR << 16);
+               scmd->scsi_done(scmd);
+               return 0;
+       }
+
+       mbox->type3.opcode = MYRB_CMD_DCDB;
+       mbox->type3.id = scmd->request->tag + 3;
+       mbox->type3.addr = dcdb_addr;
+       dcdb->channel = sdev->channel;
+       dcdb->target = sdev->id;
+       switch (scmd->sc_data_direction) {
+       case DMA_NONE:
+               dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
+               break;
+       case DMA_TO_DEVICE:
+               dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
+               break;
+       case DMA_FROM_DEVICE:
+               dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
+               break;
+       default:
+               dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
+               break;
+       }
+       dcdb->early_status = false;
+       if (scmd->request->timeout <= 10)
+               dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
+       else if (scmd->request->timeout <= 60)
+               dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
+       else if (scmd->request->timeout <= 600)
+               dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
+       else
+               dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
+       dcdb->no_autosense = false;
+       dcdb->allow_disconnect = true;
+       sgl = scsi_sglist(scmd);
+       dcdb->dma_addr = sg_dma_address(sgl);
+       if (sg_dma_len(sgl) > USHRT_MAX) {
+               dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
+               dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
+       } else {
+               dcdb->xfer_len_lo = sg_dma_len(sgl);
+               dcdb->xfer_len_hi4 = 0;
+       }
+       dcdb->cdb_len = scmd->cmd_len;
+       dcdb->sense_len = sizeof(dcdb->sense);
+       memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
+
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       cb->qcmd(cb, cmd_blk);
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+       return 0;
+}
+
+static void myrb_inquiry(struct myrb_hba *cb,
+               struct scsi_cmnd *scmd)
+{
+       unsigned char inq[36] = {
+               0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
+               0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
+               0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+               0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+               0x20, 0x20, 0x20, 0x20,
+       };
+
+       if (cb->bus_width > 16)
+               inq[7] |= 1 << 6;
+       if (cb->bus_width > 8)
+               inq[7] |= 1 << 5;
+       memcpy(&inq[16], cb->model_name, 16);
+       memcpy(&inq[32], cb->fw_version, 1);
+       memcpy(&inq[33], &cb->fw_version[2], 2);
+       memcpy(&inq[35], &cb->fw_version[7], 1);
+
+       scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
+}
+
+static void
+myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+               struct myrb_ldev_info *ldev_info)
+{
+       unsigned char modes[32], *mode_pg;
+       bool dbd;
+       size_t mode_len;
+
+       dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+       if (dbd) {
+               mode_len = 24;
+               mode_pg = &modes[4];
+       } else {
+               mode_len = 32;
+               mode_pg = &modes[12];
+       }
+       memset(modes, 0, sizeof(modes));
+       modes[0] = mode_len - 1;
+       if (!dbd) {
+               unsigned char *block_desc = &modes[4];
+
+               modes[3] = 8;
+               put_unaligned_be32(ldev_info->size, &block_desc[0]);
+               put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
+       }
+       mode_pg[0] = 0x08;
+       mode_pg[1] = 0x12;
+       if (ldev_info->wb_enabled)
+               mode_pg[2] |= 0x04;
+       if (cb->segment_size) {
+               mode_pg[2] |= 0x08;
+               put_unaligned_be16(cb->segment_size, &mode_pg[14]);
+       }
+
+       scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static void myrb_request_sense(struct myrb_hba *cb,
+               struct scsi_cmnd *scmd)
+{
+       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                               NO_SENSE, 0, 0);
+       scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
+                                SCSI_SENSE_BUFFERSIZE);
+}
+
+static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+               struct myrb_ldev_info *ldev_info)
+{
+       unsigned char data[8];
+
+       dev_dbg(&scmd->device->sdev_gendev,
+               "Capacity %u, blocksize %u\n",
+               ldev_info->size, cb->ldev_block_size);
+       put_unaligned_be32(ldev_info->size - 1, &data[0]);
+       put_unaligned_be32(cb->ldev_block_size, &data[4]);
+       scsi_sg_copy_from_buffer(scmd, data, 8);
+}
+
+static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
+               struct scsi_cmnd *scmd)
+{
+       struct myrb_hba *cb = shost_priv(shost);
+       struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct myrb_ldev_info *ldev_info;
+       struct scsi_device *sdev = scmd->device;
+       struct scatterlist *sgl;
+       unsigned long flags;
+       u64 lba;
+       u32 block_cnt;
+       int nsge;
+
+       ldev_info = sdev->hostdata;
+       if (ldev_info->state != MYRB_DEVICE_ONLINE &&
+           ldev_info->state != MYRB_DEVICE_WO) {
+               dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
+                       sdev->id, ldev_info ? ldev_info->state : 0xff);
+               scmd->result = (DID_BAD_TARGET << 16);
+               scmd->scsi_done(scmd);
+               return 0;
+       }
+       switch (scmd->cmnd[0]) {
+       case TEST_UNIT_READY:
+               scmd->result = (DID_OK << 16);
+               scmd->scsi_done(scmd);
+               return 0;
+       case INQUIRY:
+               if (scmd->cmnd[1] & 1) {
+                       /* Illegal request, invalid field in CDB */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               ILLEGAL_REQUEST, 0x24, 0);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+               } else {
+                       myrb_inquiry(cb, scmd);
+                       scmd->result = (DID_OK << 16);
+               }
+               scmd->scsi_done(scmd);
+               return 0;
+       case SYNCHRONIZE_CACHE:
+               scmd->result = (DID_OK << 16);
+               scmd->scsi_done(scmd);
+               return 0;
+       case MODE_SENSE:
+               if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+                   (scmd->cmnd[2] & 0x3F) != 0x08) {
+                       /* Illegal request, invalid field in CDB */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               ILLEGAL_REQUEST, 0x24, 0);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+               } else {
+                       myrb_mode_sense(cb, scmd, ldev_info);
+                       scmd->result = (DID_OK << 16);
+               }
+               scmd->scsi_done(scmd);
+               return 0;
+       case READ_CAPACITY:
+               if ((scmd->cmnd[1] & 1) ||
+                   (scmd->cmnd[8] & 1)) {
+                       /* Illegal request, invalid field in CDB */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               ILLEGAL_REQUEST, 0x24, 0);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+               lba = get_unaligned_be32(&scmd->cmnd[2]);
+               if (lba) {
+                       /* Illegal request, invalid field in CDB */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               ILLEGAL_REQUEST, 0x24, 0);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+               myrb_read_capacity(cb, scmd, ldev_info);
+               scmd->scsi_done(scmd);
+               return 0;
+       case REQUEST_SENSE:
+               myrb_request_sense(cb, scmd);
+               scmd->result = (DID_OK << 16);
+               return 0;
+       case SEND_DIAGNOSTIC:
+               if (scmd->cmnd[1] != 0x04) {
+                       /* Illegal request, invalid field in CDB */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               ILLEGAL_REQUEST, 0x24, 0);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+               } else {
+                       /* Assume good status */
+                       scmd->result = (DID_OK << 16);
+               }
+               scmd->scsi_done(scmd);
+               return 0;
+       case READ_6:
+               if (ldev_info->state == MYRB_DEVICE_WO) {
+                       /* Data protect, attempt to read invalid data */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               DATA_PROTECT, 0x21, 0x06);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+       case WRITE_6:
+               lba = (((scmd->cmnd[1] & 0x1F) << 16) |
+                      (scmd->cmnd[2] << 8) |
+                      scmd->cmnd[3]);
+               block_cnt = scmd->cmnd[4];
+               break;
+       case READ_10:
+               if (ldev_info->state == MYRB_DEVICE_WO) {
+                       /* Data protect, attempt to read invalid data */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               DATA_PROTECT, 0x21, 0x06);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+       case WRITE_10:
+       case VERIFY:            /* 0x2F */
+       case WRITE_VERIFY:      /* 0x2E */
+               lba = get_unaligned_be32(&scmd->cmnd[2]);
+               block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
+               break;
+       case READ_12:
+               if (ldev_info->state == MYRB_DEVICE_WO) {
+                       /* Data protect, attempt to read invalid data */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               DATA_PROTECT, 0x21, 0x06);
+                       scmd->result = (DRIVER_SENSE << 24) |
+                               SAM_STAT_CHECK_CONDITION;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+       case WRITE_12:
+       case VERIFY_12: /* 0xAF */
+       case WRITE_VERIFY_12:   /* 0xAE */
+               lba = get_unaligned_be32(&scmd->cmnd[2]);
+               block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+               break;
+       default:
+               /* Illegal request, invalid opcode */
+               scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                       ILLEGAL_REQUEST, 0x20, 0);
+               scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+               scmd->scsi_done(scmd);
+               return 0;
+       }
+
+       myrb_reset_cmd(cmd_blk);
+       mbox->type5.id = scmd->request->tag + 3;
+       if (scmd->sc_data_direction == DMA_NONE)
+               goto submit;
+       nsge = scsi_dma_map(scmd);
+       if (nsge == 1) {
+               sgl = scsi_sglist(scmd);
+               if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+                       mbox->type5.opcode = MYRB_CMD_READ;
+               else
+                       mbox->type5.opcode = MYRB_CMD_WRITE;
+
+               mbox->type5.ld.xfer_len = block_cnt;
+               mbox->type5.ld.ldev_num = sdev->id;
+               mbox->type5.lba = lba;
+               mbox->type5.addr = (u32)sg_dma_address(sgl);
+       } else {
+               struct myrb_sge *hw_sgl;
+               dma_addr_t hw_sgl_addr;
+               int i;
+
+               hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
+               if (!hw_sgl)
+                       return SCSI_MLQUEUE_HOST_BUSY;
+
+               cmd_blk->sgl = hw_sgl;
+               cmd_blk->sgl_addr = hw_sgl_addr;
+
+               if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+                       mbox->type5.opcode = MYRB_CMD_READ_SG;
+               else
+                       mbox->type5.opcode = MYRB_CMD_WRITE_SG;
+
+               mbox->type5.ld.xfer_len = block_cnt;
+               mbox->type5.ld.ldev_num = sdev->id;
+               mbox->type5.lba = lba;
+               mbox->type5.addr = hw_sgl_addr;
+               mbox->type5.sg_count = nsge;
+
+               scsi_for_each_sg(scmd, sgl, nsge, i) {
+                       hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
+                       hw_sgl->sge_count = (u32)sg_dma_len(sgl);
+                       hw_sgl++;
+               }
+       }
+submit:
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       cb->qcmd(cb, cmd_blk);
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+       return 0;
+}
+
+static int myrb_queuecommand(struct Scsi_Host *shost,
+               struct scsi_cmnd *scmd)
+{
+       struct scsi_device *sdev = scmd->device;
+
+       if (sdev->channel > myrb_logical_channel(shost)) {
+               scmd->result = (DID_BAD_TARGET << 16);
+               scmd->scsi_done(scmd);
+               return 0;
+       }
+       if (sdev->channel == myrb_logical_channel(shost))
+               return myrb_ldev_queuecommand(shost, scmd);
+
+       return myrb_pthru_queuecommand(shost, scmd);
+}
+
+static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+{
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_ldev_info *ldev_info;
+       unsigned short ldev_num = sdev->id;
+       enum raid_level level;
+
+       ldev_info = cb->ldev_info_buf + ldev_num;
+       if (!ldev_info)
+               return -ENXIO;
+
+       sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
+       if (!sdev->hostdata)
+               return -ENOMEM;
+       dev_dbg(&sdev->sdev_gendev,
+               "slave alloc ldev %d state %x\n",
+               ldev_num, ldev_info->state);
+       memcpy(sdev->hostdata, ldev_info,
+              sizeof(*ldev_info));
+       switch (ldev_info->raid_level) {
+       case MYRB_RAID_LEVEL0:
+               level = RAID_LEVEL_LINEAR;
+               break;
+       case MYRB_RAID_LEVEL1:
+               level = RAID_LEVEL_1;
+               break;
+       case MYRB_RAID_LEVEL3:
+               level = RAID_LEVEL_3;
+               break;
+       case MYRB_RAID_LEVEL5:
+               level = RAID_LEVEL_5;
+               break;
+       case MYRB_RAID_LEVEL6:
+               level = RAID_LEVEL_6;
+               break;
+       case MYRB_RAID_JBOD:
+               level = RAID_LEVEL_JBOD;
+               break;
+       default:
+               level = RAID_LEVEL_UNKNOWN;
+               break;
+       }
+       raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
+       return 0;
+}
+
+static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+{
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_pdev_state *pdev_info;
+       unsigned short status;
+
+       if (sdev->id > MYRB_MAX_TARGETS)
+               return -ENXIO;
+
+       pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+       if (!pdev_info)
+               return -ENOMEM;
+
+       status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+                                 sdev, pdev_info);
+       if (status != MYRB_STATUS_SUCCESS) {
+               dev_dbg(&sdev->sdev_gendev,
+                       "Failed to get device state, status %x\n",
+                       status);
+               kfree(pdev_info);
+               return -ENXIO;
+       }
+       if (!pdev_info->present) {
+               dev_dbg(&sdev->sdev_gendev,
+                       "device not present, skip\n");
+               kfree(pdev_info);
+               return -ENXIO;
+       }
+       dev_dbg(&sdev->sdev_gendev,
+               "slave alloc pdev %d:%d state %x\n",
+               sdev->channel, sdev->id, pdev_info->state);
+       sdev->hostdata = pdev_info;
+
+       return 0;
+}
+
+static int myrb_slave_alloc(struct scsi_device *sdev)
+{
+       if (sdev->channel > myrb_logical_channel(sdev->host))
+               return -ENXIO;
+
+       if (sdev->lun > 0)
+               return -ENXIO;
+
+       if (sdev->channel == myrb_logical_channel(sdev->host))
+               return myrb_ldev_slave_alloc(sdev);
+
+       return myrb_pdev_slave_alloc(sdev);
+}
+
+static int myrb_slave_configure(struct scsi_device *sdev)
+{
+       struct myrb_ldev_info *ldev_info;
+
+       if (sdev->channel > myrb_logical_channel(sdev->host))
+               return -ENXIO;
+
+       if (sdev->channel < myrb_logical_channel(sdev->host)) {
+               sdev->no_uld_attach = 1;
+               return 0;
+       }
+       if (sdev->lun != 0)
+               return -ENXIO;
+
+       ldev_info = sdev->hostdata;
+       if (!ldev_info)
+               return -ENXIO;
+       if (ldev_info->state != MYRB_DEVICE_ONLINE)
+               sdev_printk(KERN_INFO, sdev,
+                           "Logical drive is %s\n",
+                           myrb_devstate_name(ldev_info->state));
+
+       sdev->tagged_supported = 1;
+       return 0;
+}
+
+static void myrb_slave_destroy(struct scsi_device *sdev)
+{
+       kfree(sdev->hostdata);
+}
+
+static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+               sector_t capacity, int geom[])
+{
+       struct myrb_hba *cb = shost_priv(sdev->host);
+
+       geom[0] = cb->ldev_geom_heads;
+       geom[1] = cb->ldev_geom_sectors;
+       geom[2] = sector_div(capacity, geom[0] * geom[1]);
+
+       return 0;
+}
+
+static ssize_t raid_state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       int ret;
+
+       if (!sdev->hostdata)
+               return snprintf(buf, 16, "Unknown\n");
+
+       if (sdev->channel == myrb_logical_channel(sdev->host)) {
+               struct myrb_ldev_info *ldev_info = sdev->hostdata;
+               const char *name;
+
+               name = myrb_devstate_name(ldev_info->state);
+               if (name)
+                       ret = snprintf(buf, 32, "%s\n", name);
+               else
+                       ret = snprintf(buf, 32, "Invalid (%02X)\n",
+                                      ldev_info->state);
+       } else {
+               struct myrb_pdev_state *pdev_info = sdev->hostdata;
+               unsigned short status;
+               const char *name;
+
+               status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+                                         sdev, pdev_info);
+               if (status != MYRB_STATUS_SUCCESS)
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Failed to get device state, status %x\n",
+                                   status);
+
+               if (!pdev_info->present)
+                       name = "Removed";
+               else
+                       name = myrb_devstate_name(pdev_info->state);
+               if (name)
+                       ret = snprintf(buf, 32, "%s\n", name);
+               else
+                       ret = snprintf(buf, 32, "Invalid (%02X)\n",
+                                      pdev_info->state);
+       }
+       return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_pdev_state *pdev_info;
+       enum myrb_devstate new_state;
+       unsigned short status;
+
+       if (!strncmp(buf, "kill", 4) ||
+           !strncmp(buf, "offline", 7))
+               new_state = MYRB_DEVICE_DEAD;
+       else if (!strncmp(buf, "online", 6))
+               new_state = MYRB_DEVICE_ONLINE;
+       else if (!strncmp(buf, "standby", 7))
+               new_state = MYRB_DEVICE_STANDBY;
+       else
+               return -EINVAL;
+
+       pdev_info = sdev->hostdata;
+       if (!pdev_info) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Failed - no physical device information\n");
+               return -ENXIO;
+       }
+       if (!pdev_info->present) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Failed - device not present\n");
+               return -ENXIO;
+       }
+
+       if (pdev_info->state == new_state)
+               return count;
+
+       status = myrb_set_pdev_state(cb, sdev, new_state);
+       switch (status) {
+       case MYRB_STATUS_SUCCESS:
+               break;
+       case MYRB_STATUS_START_DEVICE_FAILED:
+               sdev_printk(KERN_INFO, sdev,
+                            "Failed - Unable to Start Device\n");
+               count = -EAGAIN;
+               break;
+       case MYRB_STATUS_NO_DEVICE:
+               sdev_printk(KERN_INFO, sdev,
+                           "Failed - No Device at Address\n");
+               count = -ENODEV;
+               break;
+       case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
+               sdev_printk(KERN_INFO, sdev,
+                        "Failed - Invalid Channel or Target or Modifier\n");
+               count = -EINVAL;
+               break;
+       case MYRB_STATUS_CHANNEL_BUSY:
+               sdev_printk(KERN_INFO, sdev,
+                        "Failed - Channel Busy\n");
+               count = -EBUSY;
+               break;
+       default:
+               sdev_printk(KERN_INFO, sdev,
+                        "Failed - Unexpected Status %04X\n", status);
+               count = -EIO;
+               break;
+       }
+       return count;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       if (sdev->channel == myrb_logical_channel(sdev->host)) {
+               struct myrb_ldev_info *ldev_info = sdev->hostdata;
+               const char *name;
+
+               if (!ldev_info)
+                       return -ENXIO;
+
+               name = myrb_raidlevel_name(ldev_info->raid_level);
+               if (!name)
+                       return snprintf(buf, 32, "Invalid (%02X)\n",
+                                       ldev_info->state);
+               return snprintf(buf, 32, "%s\n", name);
+       }
+       return snprintf(buf, 32, "Physical Drive\n");
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_rbld_progress rbld_buf;
+       unsigned char status;
+
+       if (sdev->channel < myrb_logical_channel(sdev->host))
+               return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+       status = myrb_get_rbld_progress(cb, &rbld_buf);
+
+       if (rbld_buf.ldev_num != sdev->id ||
+           status != MYRB_STATUS_SUCCESS)
+               return snprintf(buf, 32, "not rebuilding\n");
+
+       return snprintf(buf, 32, "rebuilding block %u of %u\n",
+                       rbld_buf.ldev_size - rbld_buf.blocks_left,
+                       rbld_buf.ldev_size);
+}
+
+static ssize_t rebuild_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_cmdblk *cmd_blk;
+       union myrb_cmd_mbox *mbox;
+       unsigned short status;
+       int rc, start;
+       const char *msg;
+
+       rc = kstrtoint(buf, 0, &start);
+       if (rc)
+               return rc;
+
+       if (sdev->channel >= myrb_logical_channel(sdev->host))
+               return -ENXIO;
+
+       status = myrb_get_rbld_progress(cb, NULL);
+       if (start) {
+               if (status == MYRB_STATUS_SUCCESS) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild Not Initiated; already in progress\n");
+                       return -EALREADY;
+               }
+               mutex_lock(&cb->dcmd_mutex);
+               cmd_blk = &cb->dcmd_blk;
+               myrb_reset_cmd(cmd_blk);
+               mbox = &cmd_blk->mbox;
+               mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
+               mbox->type3D.id = MYRB_DCMD_TAG;
+               mbox->type3D.channel = sdev->channel;
+               mbox->type3D.target = sdev->id;
+               status = myrb_exec_cmd(cb, cmd_blk);
+               mutex_unlock(&cb->dcmd_mutex);
+       } else {
+               struct pci_dev *pdev = cb->pdev;
+               unsigned char *rate;
+               dma_addr_t rate_addr;
+
+               if (status != MYRB_STATUS_SUCCESS) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Rebuild Not Cancelled; not in progress\n");
+                       return 0;
+               }
+
+               rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+                                         &rate_addr, GFP_KERNEL);
+               if (rate == NULL) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Cancellation of Rebuild Failed - Out of Memory\n");
+                       return -ENOMEM;
+               }
+               mutex_lock(&cb->dcmd_mutex);
+               cmd_blk = &cb->dcmd_blk;
+               myrb_reset_cmd(cmd_blk);
+               mbox = &cmd_blk->mbox;
+               mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+               mbox->type3R.id = MYRB_DCMD_TAG;
+               mbox->type3R.rbld_rate = 0xFF;
+               mbox->type3R.addr = rate_addr;
+               status = myrb_exec_cmd(cb, cmd_blk);
+               dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+               mutex_unlock(&cb->dcmd_mutex);
+       }
+       if (status == MYRB_STATUS_SUCCESS) {
+               sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+                           start ? "Initiated" : "Cancelled");
+               return count;
+       }
+       if (!start) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Rebuild Not Cancelled, status 0x%x\n",
+                           status);
+               return -EIO;
+       }
+
+       switch (status) {
+       case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+               msg = "Attempt to Rebuild Online or Unresponsive Drive";
+               break;
+       case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+               msg = "New Disk Failed During Rebuild";
+               break;
+       case MYRB_STATUS_INVALID_ADDRESS:
+               msg = "Invalid Device Address";
+               break;
+       case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+               msg = "Already in Progress";
+               break;
+       default:
+               msg = NULL;
+               break;
+       }
+       if (msg)
+               sdev_printk(KERN_INFO, sdev,
+                           "Rebuild Failed - %s\n", msg);
+       else
+               sdev_printk(KERN_INFO, sdev,
+                           "Rebuild Failed, status 0x%x\n", status);
+
+       return -EIO;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_rbld_progress rbld_buf;
+       struct myrb_cmdblk *cmd_blk;
+       union myrb_cmd_mbox *mbox;
+       unsigned short ldev_num = 0xFFFF;
+       unsigned short status;
+       int rc, start;
+       const char *msg;
+
+       rc = kstrtoint(buf, 0, &start);
+       if (rc)
+               return rc;
+
+       if (sdev->channel < myrb_logical_channel(sdev->host))
+               return -ENXIO;
+
+       status = myrb_get_rbld_progress(cb, &rbld_buf);
+       if (start) {
+               if (status == MYRB_STATUS_SUCCESS) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Check Consistency Not Initiated; already in progress\n");
+                       return -EALREADY;
+               }
+               mutex_lock(&cb->dcmd_mutex);
+               cmd_blk = &cb->dcmd_blk;
+               myrb_reset_cmd(cmd_blk);
+               mbox = &cmd_blk->mbox;
+               mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
+               mbox->type3C.id = MYRB_DCMD_TAG;
+               mbox->type3C.ldev_num = sdev->id;
+               mbox->type3C.auto_restore = true;
+
+               status = myrb_exec_cmd(cb, cmd_blk);
+               mutex_unlock(&cb->dcmd_mutex);
+       } else {
+               struct pci_dev *pdev = cb->pdev;
+               unsigned char *rate;
+               dma_addr_t rate_addr;
+
+               if (ldev_num != sdev->id) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Check Consistency Not Cancelled; not in progress\n");
+                       return 0;
+               }
+               rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+                                         &rate_addr, GFP_KERNEL);
+               if (rate == NULL) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Cancellation of Check Consistency Failed - Out of Memory\n");
+                       return -ENOMEM;
+               }
+               mutex_lock(&cb->dcmd_mutex);
+               cmd_blk = &cb->dcmd_blk;
+               myrb_reset_cmd(cmd_blk);
+               mbox = &cmd_blk->mbox;
+               mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+               mbox->type3R.id = MYRB_DCMD_TAG;
+               mbox->type3R.rbld_rate = 0xFF;
+               mbox->type3R.addr = rate_addr;
+               status = myrb_exec_cmd(cb, cmd_blk);
+               dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+               mutex_unlock(&cb->dcmd_mutex);
+       }
+       if (status == MYRB_STATUS_SUCCESS) {
+               sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
+                           start ? "Initiated" : "Cancelled");
+               return count;
+       }
+       if (!start) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Check Consistency Not Cancelled, status 0x%x\n",
+                           status);
+               return -EIO;
+       }
+
+       switch (status) {
+       case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+               msg = "Dependent Physical Device is DEAD";
+               break;
+       case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+               msg = "New Disk Failed During Rebuild";
+               break;
+       case MYRB_STATUS_INVALID_ADDRESS:
+               msg = "Invalid or Nonredundant Logical Drive";
+               break;
+       case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+               msg = "Already in Progress";
+               break;
+       default:
+               msg = NULL;
+               break;
+       }
+       if (msg)
+               sdev_printk(KERN_INFO, sdev,
+                           "Check Consistency Failed - %s\n", msg);
+       else
+               sdev_printk(KERN_INFO, sdev,
+                           "Check Consistency Failed, status 0x%x\n", status);
+
+       return -EIO;
+}
+
+static ssize_t consistency_check_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return rebuild_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static ssize_t ctlr_num_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrb_hba *cb = shost_priv(shost);
+
+       return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static ssize_t firmware_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrb_hba *cb = shost_priv(shost);
+
+       return snprintf(buf, 16, "%s\n", cb->fw_version);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t model_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrb_hba *cb = shost_priv(shost);
+
+       return snprintf(buf, 16, "%s\n", cb->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t flush_cache_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrb_hba *cb = shost_priv(shost);
+       unsigned short status;
+
+       status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+       if (status == MYRB_STATUS_SUCCESS) {
+               shost_printk(KERN_INFO, shost,
+                            "Cache Flush Completed\n");
+               return count;
+       }
+       shost_printk(KERN_INFO, shost,
+                    "Cache Flush Failed, status %x\n", status);
+       return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static struct device_attribute *myrb_sdev_attrs[] = {
+       &dev_attr_rebuild,
+       &dev_attr_consistency_check,
+       &dev_attr_raid_state,
+       &dev_attr_raid_level,
+       NULL,
+};
+
+static struct device_attribute *myrb_shost_attrs[] = {
+       &dev_attr_ctlr_num,
+       &dev_attr_model,
+       &dev_attr_firmware,
+       &dev_attr_flush_cache,
+       NULL,
+};
+
+struct scsi_host_template myrb_template = {
+       .module                 = THIS_MODULE,
+       .name                   = "DAC960",
+       .proc_name              = "myrb",
+       .queuecommand           = myrb_queuecommand,
+       .eh_host_reset_handler  = myrb_host_reset,
+       .slave_alloc            = myrb_slave_alloc,
+       .slave_configure        = myrb_slave_configure,
+       .slave_destroy          = myrb_slave_destroy,
+       .bios_param             = myrb_biosparam,
+       .cmd_size               = sizeof(struct myrb_cmdblk),
+       .shost_attrs            = myrb_shost_attrs,
+       .sdev_attrs             = myrb_sdev_attrs,
+       .this_id                = -1,
+};
+
+/**
+ * myrb_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int myrb_is_raid(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       return sdev->channel == myrb_logical_channel(sdev->host);
+}
+
+/**
+ * myrb_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void myrb_get_resync(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_rbld_progress rbld_buf;
+       unsigned int percent_complete = 0;
+       unsigned short status;
+       unsigned int ldev_size = 0, remaining = 0;
+
+       if (sdev->channel < myrb_logical_channel(sdev->host))
+               return;
+       status = myrb_get_rbld_progress(cb, &rbld_buf);
+       if (status == MYRB_STATUS_SUCCESS) {
+               if (rbld_buf.ldev_num == sdev->id) {
+                       ldev_size = rbld_buf.ldev_size;
+                       remaining = rbld_buf.blocks_left;
+               }
+       }
+       if (remaining && ldev_size)
+               percent_complete = (ldev_size - remaining) * 100 / ldev_size;
+       raid_set_resync(myrb_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrb_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void myrb_get_state(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrb_hba *cb = shost_priv(sdev->host);
+       struct myrb_ldev_info *ldev_info = sdev->hostdata;
+       enum raid_state state = RAID_STATE_UNKNOWN;
+       unsigned short status;
+
+       if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
+               state = RAID_STATE_UNKNOWN;
+       else {
+               status = myrb_get_rbld_progress(cb, NULL);
+               if (status == MYRB_STATUS_SUCCESS)
+                       state = RAID_STATE_RESYNCING;
+               else {
+                       switch (ldev_info->state) {
+                       case MYRB_DEVICE_ONLINE:
+                               state = RAID_STATE_ACTIVE;
+                               break;
+                       case MYRB_DEVICE_WO:
+                       case MYRB_DEVICE_CRITICAL:
+                               state = RAID_STATE_DEGRADED;
+                               break;
+                       default:
+                               state = RAID_STATE_OFFLINE;
+                       }
+               }
+       }
+       raid_set_state(myrb_raid_template, dev, state);
+}
+
+struct raid_function_template myrb_raid_functions = {
+       .cookie         = &myrb_template,
+       .is_raid        = myrb_is_raid,
+       .get_resync     = myrb_get_resync,
+       .get_state      = myrb_get_state,
+};
+
+static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
+               struct scsi_cmnd *scmd)
+{
+       unsigned short status;
+
+       if (!cmd_blk)
+               return;
+
+       scsi_dma_unmap(scmd);
+
+       if (cmd_blk->dcdb) {
+               memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
+               dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
+                             cmd_blk->dcdb_addr);
+               cmd_blk->dcdb = NULL;
+       }
+       if (cmd_blk->sgl) {
+               dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
+               cmd_blk->sgl = NULL;
+               cmd_blk->sgl_addr = 0;
+       }
+       status = cmd_blk->status;
+       switch (status) {
+       case MYRB_STATUS_SUCCESS:
+       case MYRB_STATUS_DEVICE_BUSY:
+               scmd->result = (DID_OK << 16) | status;
+               break;
+       case MYRB_STATUS_BAD_DATA:
+               dev_dbg(&scmd->device->sdev_gendev,
+                       "Bad Data Encountered\n");
+               if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+                       /* Unrecovered read error */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               MEDIUM_ERROR, 0x11, 0);
+               else
+                       /* Write error */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               MEDIUM_ERROR, 0x0C, 0);
+               scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+               break;
+       case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
+               scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
+               if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+                       /* Unrecovered read error, auto-reallocation failed */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               MEDIUM_ERROR, 0x11, 0x04);
+               else
+                       /* Write error, auto-reallocation failed */
+                       scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                               MEDIUM_ERROR, 0x0C, 0x02);
+               scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+               break;
+       case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
+               dev_dbg(&scmd->device->sdev_gendev,
+                           "Logical Drive Nonexistent or Offline");
+               scmd->result = (DID_BAD_TARGET << 16);
+               break;
+       case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
+               dev_dbg(&scmd->device->sdev_gendev,
+                           "Attempt to Access Beyond End of Logical Drive");
+               /* Logical block address out of range */
+               scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                       NOT_READY, 0x21, 0);
+               break;
+       case MYRB_STATUS_DEVICE_NONRESPONSIVE:
+               dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
+               scmd->result = (DID_BAD_TARGET << 16);
+               break;
+       default:
+               scmd_printk(KERN_ERR, scmd,
+                           "Unexpected Error Status %04X", status);
+               scmd->result = (DID_ERROR << 16);
+               break;
+       }
+       scmd->scsi_done(scmd);
+}
+
+static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+       if (!cmd_blk)
+               return;
+
+       if (cmd_blk->completion) {
+               complete(cmd_blk->completion);
+               cmd_blk->completion = NULL;
+       }
+}
+
+static void myrb_monitor(struct work_struct *work)
+{
+       struct myrb_hba *cb = container_of(work,
+                       struct myrb_hba, monitor_work.work);
+       struct Scsi_Host *shost = cb->host;
+       unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
+
+       dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+       if (cb->new_ev_seq > cb->old_ev_seq) {
+               int event = cb->old_ev_seq;
+
+               dev_dbg(&shost->shost_gendev,
+                       "get event log no %d/%d\n",
+                       cb->new_ev_seq, event);
+               myrb_get_event(cb, event);
+               cb->old_ev_seq = event + 1;
+               interval = 10;
+       } else if (cb->need_err_info) {
+               cb->need_err_info = false;
+               dev_dbg(&shost->shost_gendev, "get error table\n");
+               myrb_get_errtable(cb);
+               interval = 10;
+       } else if (cb->need_rbld && cb->rbld_first) {
+               cb->need_rbld = false;
+               dev_dbg(&shost->shost_gendev,
+                       "get rebuild progress\n");
+               myrb_update_rbld_progress(cb);
+               interval = 10;
+       } else if (cb->need_ldev_info) {
+               cb->need_ldev_info = false;
+               dev_dbg(&shost->shost_gendev,
+                       "get logical drive info\n");
+               myrb_get_ldev_info(cb);
+               interval = 10;
+       } else if (cb->need_rbld) {
+               cb->need_rbld = false;
+               dev_dbg(&shost->shost_gendev,
+                       "get rebuild progress\n");
+               myrb_update_rbld_progress(cb);
+               interval = 10;
+       } else if (cb->need_cc_status) {
+               cb->need_cc_status = false;
+               dev_dbg(&shost->shost_gendev,
+                       "get consistency check progress\n");
+               myrb_get_cc_progress(cb);
+               interval = 10;
+       } else if (cb->need_bgi_status) {
+               cb->need_bgi_status = false;
+               dev_dbg(&shost->shost_gendev, "get background init status\n");
+               myrb_bgi_control(cb);
+               interval = 10;
+       } else {
+               dev_dbg(&shost->shost_gendev, "new enquiry\n");
+               mutex_lock(&cb->dma_mutex);
+               myrb_hba_enquiry(cb);
+               mutex_unlock(&cb->dma_mutex);
+               if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
+                   cb->need_err_info || cb->need_rbld ||
+                   cb->need_ldev_info || cb->need_cc_status ||
+                   cb->need_bgi_status) {
+                       dev_dbg(&shost->shost_gendev,
+                               "reschedule monitor\n");
+                       interval = 0;
+               }
+       }
+       if (interval > 1)
+               cb->primary_monitor_time = jiffies;
+       queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
+}
+
+/**
+ * myrb_err_status - reports controller BIOS messages
+ *
+ * Controller BIOS messages are passed through the Error Status Register
+ * when the driver performs the BIOS handshaking.
+ *
+ * Return: true for fatal errors and false otherwise.
+ */
+bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+               unsigned char parm0, unsigned char parm1)
+{
+       struct pci_dev *pdev = cb->pdev;
+
+       switch (error) {
+       case 0x00:
+               dev_info(&pdev->dev,
+                        "Physical Device %d:%d Not Responding\n",
+                        parm1, parm0);
+               break;
+       case 0x08:
+               dev_notice(&pdev->dev, "Spinning Up Drives\n");
+               break;
+       case 0x30:
+               dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+               break;
+       case 0x60:
+               dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+               break;
+       case 0x70:
+               dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+               break;
+       case 0x90:
+               dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+                          parm1, parm0);
+               break;
+       case 0xA0:
+               dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+               break;
+       case 0xB0:
+               dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+               break;
+       case 0xD0:
+               dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+               break;
+       case 0xF0:
+               dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+               return true;
+       default:
+               dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+                       error);
+               return true;
+       }
+       return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 LA Series Controllers
+ */
+
+static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
+{
+       writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_gen_intr(void __iomem *base)
+{
+       writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_reset_ctrl(void __iomem *base)
+{
+       writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
+{
+       unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+       return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_LA_init_in_progress(void __iomem *base)
+{
+       unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+       return !(idb & DAC960_LA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
+{
+       writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
+{
+       writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_intr(void __iomem *base)
+{
+       writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
+              base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
+{
+       unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+       return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
+{
+       unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+       return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LA_enable_intr(void __iomem *base)
+{
+       unsigned char odb = 0xFF;
+
+       odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
+       writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LA_disable_intr(void __iomem *base)
+{
+       unsigned char odb = 0xFF;
+
+       odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
+       writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LA_intr_enabled(void __iomem *base)
+{
+       unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
+
+       return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+               union myrb_cmd_mbox *mbox)
+{
+       mem_mbox->words[1] = mbox->words[1];
+       mem_mbox->words[2] = mbox->words[2];
+       mem_mbox->words[3] = mbox->words[3];
+       /* Memory barrier to prevent reordering */
+       wmb();
+       mem_mbox->words[0] = mbox->words[0];
+       /* Memory barrier to force PCI access */
+       mb();
+}
+
+static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
+               union myrb_cmd_mbox *mbox)
+{
+       writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
+       writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
+       writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
+       writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
+}
+
+static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
+{
+       return readb(base + DAC960_LA_STSID_OFFSET);
+}
+
+static inline unsigned short DAC960_LA_read_status(void __iomem *base)
+{
+       return readw(base + DAC960_LA_STS_OFFSET);
+}
+
+static inline bool
+DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
+               unsigned char *param0, unsigned char *param1)
+{
+       unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
+
+       if (!(errsts & DAC960_LA_ERRSTS_PENDING))
+               return false;
+       errsts &= ~DAC960_LA_ERRSTS_PENDING;
+
+       *error = errsts;
+       *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
+       *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
+       writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
+       return true;
+}
+
+static inline unsigned short
+DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
+               union myrb_cmd_mbox *mbox)
+{
+       unsigned short status;
+       int timeout = 0;
+
+       while (timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (!DAC960_LA_hw_mbox_is_full(base))
+                       break;
+               udelay(10);
+               timeout++;
+       }
+       if (DAC960_LA_hw_mbox_is_full(base)) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for empty mailbox\n");
+               return MYRB_STATUS_SUBSYS_TIMEOUT;
+       }
+       DAC960_LA_write_hw_mbox(base, mbox);
+       DAC960_LA_hw_mbox_new_cmd(base);
+       timeout = 0;
+       while (timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (DAC960_LA_hw_mbox_status_available(base))
+                       break;
+               udelay(10);
+               timeout++;
+       }
+       if (!DAC960_LA_hw_mbox_status_available(base)) {
+               dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
+               return MYRB_STATUS_SUBSYS_TIMEOUT;
+       }
+       status = DAC960_LA_read_status(base);
+       DAC960_LA_ack_hw_mbox_intr(base);
+       DAC960_LA_ack_hw_mbox_status(base);
+
+       return status;
+}
+
+static int DAC960_LA_hw_init(struct pci_dev *pdev,
+               struct myrb_hba *cb, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char error, parm0, parm1;
+
+       DAC960_LA_disable_intr(base);
+       DAC960_LA_ack_hw_mbox_status(base);
+       udelay(1000);
+       timeout = 0;
+       while (DAC960_LA_init_in_progress(base) &&
+              timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (DAC960_LA_read_error_status(base, &error,
+                                             &parm0, &parm1) &&
+                   myrb_err_status(cb, error, parm0, parm1))
+                       return -ENODEV;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRB_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
+               dev_err(&pdev->dev,
+                       "Unable to Enable Memory Mailbox Interface\n");
+               DAC960_LA_reset_ctrl(base);
+               return -ENODEV;
+       }
+       DAC960_LA_enable_intr(base);
+       cb->qcmd = myrb_qcmd;
+       cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
+       if (cb->dual_mode_interface)
+               cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
+       else
+               cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
+       cb->disable_intr = DAC960_LA_disable_intr;
+       cb->reset = DAC960_LA_reset_ctrl;
+
+       return 0;
+}
+
+static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
+{
+       struct myrb_hba *cb = arg;
+       void __iomem *base = cb->io_base;
+       struct myrb_stat_mbox *next_stat_mbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       DAC960_LA_ack_intr(base);
+       next_stat_mbox = cb->next_stat_mbox;
+       while (next_stat_mbox->valid) {
+               unsigned char id = next_stat_mbox->id;
+               struct scsi_cmnd *scmd = NULL;
+               struct myrb_cmdblk *cmd_blk = NULL;
+
+               if (id == MYRB_DCMD_TAG)
+                       cmd_blk = &cb->dcmd_blk;
+               else if (id == MYRB_MCMD_TAG)
+                       cmd_blk = &cb->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cb->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk)
+                       cmd_blk->status = next_stat_mbox->status;
+               else
+                       dev_err(&cb->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+               if (++next_stat_mbox > cb->last_stat_mbox)
+                       next_stat_mbox = cb->first_stat_mbox;
+
+               if (cmd_blk) {
+                       if (id < 3)
+                               myrb_handle_cmdblk(cb, cmd_blk);
+                       else
+                               myrb_handle_scsi(cb, cmd_blk, scmd);
+               }
+       }
+       cb->next_stat_mbox = next_stat_mbox;
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_LA_privdata = {
+       .hw_init =      DAC960_LA_hw_init,
+       .irq_handler =  DAC960_LA_intr_handler,
+       .mmio_size =    DAC960_LA_mmio_size,
+};
+
+/*
+ * DAC960 PG Series Controllers
+ */
+static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
+{
+       writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
+{
+       writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_gen_intr(void __iomem *base)
+{
+       writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_reset_ctrl(void __iomem *base)
+{
+       writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
+{
+       writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
+{
+       unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+       return idb & DAC960_PG_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PG_init_in_progress(void __iomem *base)
+{
+       unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+       return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
+{
+       writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
+{
+       writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_intr(void __iomem *base)
+{
+       writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
+              base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
+{
+       unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+       return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
+{
+       unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+       return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PG_enable_intr(void __iomem *base)
+{
+       unsigned int imask = (unsigned int)-1;
+
+       imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
+       writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_PG_disable_intr(void __iomem *base)
+{
+       unsigned int imask = (unsigned int)-1;
+
+       writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_PG_intr_enabled(void __iomem *base)
+{
+       unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
+
+       return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+               union myrb_cmd_mbox *mbox)
+{
+       mem_mbox->words[1] = mbox->words[1];
+       mem_mbox->words[2] = mbox->words[2];
+       mem_mbox->words[3] = mbox->words[3];
+       /* Memory barrier to prevent reordering */
+       wmb();
+       mem_mbox->words[0] = mbox->words[0];
+       /* Memory barrier to force PCI access */
+       mb();
+}
+
+static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
+               union myrb_cmd_mbox *mbox)
+{
+       writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
+       writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
+       writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
+       writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PG_read_status_cmd_ident(void __iomem *base)
+{
+       return readb(base + DAC960_PG_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PG_read_status(void __iomem *base)
+{
+       return readw(base + DAC960_PG_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
+               unsigned char *param0, unsigned char *param1)
+{
+       unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
+
+       if (!(errsts & DAC960_PG_ERRSTS_PENDING))
+               return false;
+       errsts &= ~DAC960_PG_ERRSTS_PENDING;
+       *error = errsts;
+       *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
+       *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
+       writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
+       return true;
+}
+
+static inline unsigned short
+DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
+               union myrb_cmd_mbox *mbox)
+{
+       unsigned short status;
+       int timeout = 0;
+
+       while (timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (!DAC960_PG_hw_mbox_is_full(base))
+                       break;
+               udelay(10);
+               timeout++;
+       }
+       if (DAC960_PG_hw_mbox_is_full(base)) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for empty mailbox\n");
+               return MYRB_STATUS_SUBSYS_TIMEOUT;
+       }
+       DAC960_PG_write_hw_mbox(base, mbox);
+       DAC960_PG_hw_mbox_new_cmd(base);
+
+       timeout = 0;
+       while (timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (DAC960_PG_hw_mbox_status_available(base))
+                       break;
+               udelay(10);
+               timeout++;
+       }
+       if (!DAC960_PG_hw_mbox_status_available(base)) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for mailbox status\n");
+               return MYRB_STATUS_SUBSYS_TIMEOUT;
+       }
+       status = DAC960_PG_read_status(base);
+       DAC960_PG_ack_hw_mbox_intr(base);
+       DAC960_PG_ack_hw_mbox_status(base);
+
+       return status;
+}
+
+static int DAC960_PG_hw_init(struct pci_dev *pdev,
+               struct myrb_hba *cb, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char error, parm0, parm1;
+
+       DAC960_PG_disable_intr(base);
+       DAC960_PG_ack_hw_mbox_status(base);
+       udelay(1000);
+       while (DAC960_PG_init_in_progress(base) &&
+              timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (DAC960_PG_read_error_status(base, &error,
+                                               &parm0, &parm1) &&
+                   myrb_err_status(cb, error, parm0, parm1))
+                       return -EIO;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRB_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
+               dev_err(&pdev->dev,
+                       "Unable to Enable Memory Mailbox Interface\n");
+               DAC960_PG_reset_ctrl(base);
+               return -ENODEV;
+       }
+       DAC960_PG_enable_intr(base);
+       cb->qcmd = myrb_qcmd;
+       cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
+       if (cb->dual_mode_interface)
+               cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
+       else
+               cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
+       cb->disable_intr = DAC960_PG_disable_intr;
+       cb->reset = DAC960_PG_reset_ctrl;
+
+       return 0;
+}
+
+static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
+{
+       struct myrb_hba *cb = arg;
+       void __iomem *base = cb->io_base;
+       struct myrb_stat_mbox *next_stat_mbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       DAC960_PG_ack_intr(base);
+       next_stat_mbox = cb->next_stat_mbox;
+       while (next_stat_mbox->valid) {
+               unsigned char id = next_stat_mbox->id;
+               struct scsi_cmnd *scmd = NULL;
+               struct myrb_cmdblk *cmd_blk = NULL;
+
+               if (id == MYRB_DCMD_TAG)
+                       cmd_blk = &cb->dcmd_blk;
+               else if (id == MYRB_MCMD_TAG)
+                       cmd_blk = &cb->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cb->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk)
+                       cmd_blk->status = next_stat_mbox->status;
+               else
+                       dev_err(&cb->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+               if (++next_stat_mbox > cb->last_stat_mbox)
+                       next_stat_mbox = cb->first_stat_mbox;
+
+               if (id < 3)
+                       myrb_handle_cmdblk(cb, cmd_blk);
+               else
+                       myrb_handle_scsi(cb, cmd_blk, scmd);
+       }
+       cb->next_stat_mbox = next_stat_mbox;
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PG_privdata = {
+       .hw_init =      DAC960_PG_hw_init,
+       .irq_handler =  DAC960_PG_intr_handler,
+       .mmio_size =    DAC960_PG_mmio_size,
+};
+
+
+/*
+ * DAC960 PD Series Controllers
+ */
+
+static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
+{
+       writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_gen_intr(void __iomem *base)
+{
+       writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_reset_ctrl(void __iomem *base)
+{
+       writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
+{
+       unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+       return idb & DAC960_PD_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PD_init_in_progress(void __iomem *base)
+{
+       unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+       return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PD_ack_intr(void __iomem *base)
+{
+       writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
+{
+       unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
+
+       return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PD_enable_intr(void __iomem *base)
+{
+       writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline void DAC960_PD_disable_intr(void __iomem *base)
+{
+       writeb(0, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline bool DAC960_PD_intr_enabled(void __iomem *base)
+{
+       unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
+
+       return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
+}
+
+static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
+               union myrb_cmd_mbox *mbox)
+{
+       writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
+       writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
+       writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
+       writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PD_read_status_cmd_ident(void __iomem *base)
+{
+       return readb(base + DAC960_PD_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PD_read_status(void __iomem *base)
+{
+       return readw(base + DAC960_PD_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
+               unsigned char *param0, unsigned char *param1)
+{
+       unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
+
+       if (!(errsts & DAC960_PD_ERRSTS_PENDING))
+               return false;
+       errsts &= ~DAC960_PD_ERRSTS_PENDING;
+       *error = errsts;
+       *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
+       *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
+       writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
+       return true;
+}
+
+static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+       void __iomem *base = cb->io_base;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+       while (DAC960_PD_hw_mbox_is_full(base))
+               udelay(1);
+       DAC960_PD_write_cmd_mbox(base, mbox);
+       DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+static int DAC960_PD_hw_init(struct pci_dev *pdev,
+               struct myrb_hba *cb, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char error, parm0, parm1;
+
+       if (!request_region(cb->io_addr, 0x80, "myrb")) {
+               dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+                       (unsigned long)cb->io_addr);
+               return -EBUSY;
+       }
+       DAC960_PD_disable_intr(base);
+       DAC960_PD_ack_hw_mbox_status(base);
+       udelay(1000);
+       while (DAC960_PD_init_in_progress(base) &&
+              timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (DAC960_PD_read_error_status(base, &error,
+                                             &parm0, &parm1) &&
+                   myrb_err_status(cb, error, parm0, parm1))
+                       return -EIO;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRB_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrb_enable_mmio(cb, NULL)) {
+               dev_err(&pdev->dev,
+                       "Unable to Enable Memory Mailbox Interface\n");
+               DAC960_PD_reset_ctrl(base);
+               return -ENODEV;
+       }
+       DAC960_PD_enable_intr(base);
+       cb->qcmd = DAC960_PD_qcmd;
+       cb->disable_intr = DAC960_PD_disable_intr;
+       cb->reset = DAC960_PD_reset_ctrl;
+
+       return 0;
+}
+
+static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
+{
+       struct myrb_hba *cb = arg;
+       void __iomem *base = cb->io_base;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       while (DAC960_PD_hw_mbox_status_available(base)) {
+               unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+               struct scsi_cmnd *scmd = NULL;
+               struct myrb_cmdblk *cmd_blk = NULL;
+
+               if (id == MYRB_DCMD_TAG)
+                       cmd_blk = &cb->dcmd_blk;
+               else if (id == MYRB_MCMD_TAG)
+                       cmd_blk = &cb->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cb->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk)
+                       cmd_blk->status = DAC960_PD_read_status(base);
+               else
+                       dev_err(&cb->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               DAC960_PD_ack_intr(base);
+               DAC960_PD_ack_hw_mbox_status(base);
+
+               if (id < 3)
+                       myrb_handle_cmdblk(cb, cmd_blk);
+               else
+                       myrb_handle_scsi(cb, cmd_blk, scmd);
+       }
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PD_privdata = {
+       .hw_init =      DAC960_PD_hw_init,
+       .irq_handler =  DAC960_PD_intr_handler,
+       .mmio_size =    DAC960_PD_mmio_size,
+};
+
+
+/*
+ * DAC960 P Series Controllers
+ *
+ * Similar to the DAC960 PD Series Controllers, but some commands have
+ * to be translated.
+ */
+
+static inline void myrb_translate_enquiry(void *enq)
+{
+       memcpy(enq + 132, enq + 36, 64);
+       memset(enq + 36, 0, 96);
+}
+
+static inline void myrb_translate_devstate(void *state)
+{
+       memcpy(state + 2, state + 3, 1);
+       memmove(state + 4, state + 5, 2);
+       memmove(state + 6, state + 8, 4);
+}
+
+static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       int ldev_num = mbox->type5.ld.ldev_num;
+
+       mbox->bytes[3] &= 0x7;
+       mbox->bytes[3] |= mbox->bytes[7] << 6;
+       mbox->bytes[7] = ldev_num;
+}
+
+static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+       int ldev_num = mbox->bytes[7];
+
+       mbox->bytes[7] = mbox->bytes[3] >> 6;
+       mbox->bytes[3] &= 0x7;
+       mbox->bytes[3] |= ldev_num << 3;
+}
+
+static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+       void __iomem *base = cb->io_base;
+       union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+       switch (mbox->common.opcode) {
+       case MYRB_CMD_ENQUIRY:
+               mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
+               break;
+       case MYRB_CMD_GET_DEVICE_STATE:
+               mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
+               break;
+       case MYRB_CMD_READ:
+               mbox->common.opcode = MYRB_CMD_READ_OLD;
+               myrb_translate_to_rw_command(cmd_blk);
+               break;
+       case MYRB_CMD_WRITE:
+               mbox->common.opcode = MYRB_CMD_WRITE_OLD;
+               myrb_translate_to_rw_command(cmd_blk);
+               break;
+       case MYRB_CMD_READ_SG:
+               mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
+               myrb_translate_to_rw_command(cmd_blk);
+               break;
+       case MYRB_CMD_WRITE_SG:
+               mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
+               myrb_translate_to_rw_command(cmd_blk);
+               break;
+       default:
+               break;
+       }
+       while (DAC960_PD_hw_mbox_is_full(base))
+               udelay(1);
+       DAC960_PD_write_cmd_mbox(base, mbox);
+       DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+
+static int DAC960_P_hw_init(struct pci_dev *pdev,
+               struct myrb_hba *cb, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char error, parm0, parm1;
+
+       if (!request_region(cb->io_addr, 0x80, "myrb")) {
+               dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+                       (unsigned long)cb->io_addr);
+               return -EBUSY;
+       }
+       DAC960_PD_disable_intr(base);
+       DAC960_PD_ack_hw_mbox_status(base);
+       udelay(1000);
+       while (DAC960_PD_init_in_progress(base) &&
+              timeout < MYRB_MAILBOX_TIMEOUT) {
+               if (DAC960_PD_read_error_status(base, &error,
+                                               &parm0, &parm1) &&
+                   myrb_err_status(cb, error, parm0, parm1))
+                       return -EAGAIN;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRB_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrb_enable_mmio(cb, NULL)) {
+               dev_err(&pdev->dev,
+                       "Unable to allocate DMA mapped memory\n");
+               DAC960_PD_reset_ctrl(base);
+               return -ETIMEDOUT;
+       }
+       DAC960_PD_enable_intr(base);
+       cb->qcmd = DAC960_P_qcmd;
+       cb->disable_intr = DAC960_PD_disable_intr;
+       cb->reset = DAC960_PD_reset_ctrl;
+
+       return 0;
+}
+
+static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
+{
+       struct myrb_hba *cb = arg;
+       void __iomem *base = cb->io_base;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cb->queue_lock, flags);
+       while (DAC960_PD_hw_mbox_status_available(base)) {
+               unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+               struct scsi_cmnd *scmd = NULL;
+               struct myrb_cmdblk *cmd_blk = NULL;
+               union myrb_cmd_mbox *mbox;
+               enum myrb_cmd_opcode op;
+
+
+               if (id == MYRB_DCMD_TAG)
+                       cmd_blk = &cb->dcmd_blk;
+               else if (id == MYRB_MCMD_TAG)
+                       cmd_blk = &cb->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cb->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk)
+                       cmd_blk->status = DAC960_PD_read_status(base);
+               else
+                       dev_err(&cb->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               DAC960_PD_ack_intr(base);
+               DAC960_PD_ack_hw_mbox_status(base);
+
+               if (!cmd_blk)
+                       continue;
+
+               mbox = &cmd_blk->mbox;
+               op = mbox->common.opcode;
+               switch (op) {
+               case MYRB_CMD_ENQUIRY_OLD:
+                       mbox->common.opcode = MYRB_CMD_ENQUIRY;
+                       myrb_translate_enquiry(cb->enquiry);
+                       break;
+               case MYRB_CMD_READ_OLD:
+                       mbox->common.opcode = MYRB_CMD_READ;
+                       myrb_translate_from_rw_command(cmd_blk);
+                       break;
+               case MYRB_CMD_WRITE_OLD:
+                       mbox->common.opcode = MYRB_CMD_WRITE;
+                       myrb_translate_from_rw_command(cmd_blk);
+                       break;
+               case MYRB_CMD_READ_SG_OLD:
+                       mbox->common.opcode = MYRB_CMD_READ_SG;
+                       myrb_translate_from_rw_command(cmd_blk);
+                       break;
+               case MYRB_CMD_WRITE_SG_OLD:
+                       mbox->common.opcode = MYRB_CMD_WRITE_SG;
+                       myrb_translate_from_rw_command(cmd_blk);
+                       break;
+               default:
+                       break;
+               }
+               if (id < 3)
+                       myrb_handle_cmdblk(cb, cmd_blk);
+               else
+                       myrb_handle_scsi(cb, cmd_blk, scmd);
+       }
+       spin_unlock_irqrestore(&cb->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_P_privdata = {
+       .hw_init =      DAC960_P_hw_init,
+       .irq_handler =  DAC960_P_intr_handler,
+       .mmio_size =    DAC960_PD_mmio_size,
+};
+
+static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
+               const struct pci_device_id *entry)
+{
+       struct myrb_privdata *privdata =
+               (struct myrb_privdata *)entry->driver_data;
+       irq_handler_t irq_handler = privdata->irq_handler;
+       unsigned int mmio_size = privdata->mmio_size;
+       struct Scsi_Host *shost;
+       struct myrb_hba *cb = NULL;
+
+       shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
+       if (!shost) {
+               dev_err(&pdev->dev, "Unable to allocate Controller\n");
+               return NULL;
+       }
+       shost->max_cmd_len = 12;
+       shost->max_lun = 256;
+       cb = shost_priv(shost);
+       mutex_init(&cb->dcmd_mutex);
+       mutex_init(&cb->dma_mutex);
+       cb->pdev = pdev;
+
+       if (pci_enable_device(pdev))
+               goto failure;
+
+       if (privdata->hw_init == DAC960_PD_hw_init ||
+           privdata->hw_init == DAC960_P_hw_init) {
+               cb->io_addr = pci_resource_start(pdev, 0);
+               cb->pci_addr = pci_resource_start(pdev, 1);
+       } else
+               cb->pci_addr = pci_resource_start(pdev, 0);
+
+       pci_set_drvdata(pdev, cb);
+       spin_lock_init(&cb->queue_lock);
+       if (mmio_size < PAGE_SIZE)
+               mmio_size = PAGE_SIZE;
+       cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+       if (cb->mmio_base == NULL) {
+               dev_err(&pdev->dev,
+                       "Unable to map Controller Register Window\n");
+               goto failure;
+       }
+
+       cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
+       if (privdata->hw_init(pdev, cb, cb->io_base))
+               goto failure;
+
+       if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
+               dev_err(&pdev->dev,
+                       "Unable to acquire IRQ Channel %d\n", pdev->irq);
+               goto failure;
+       }
+       cb->irq = pdev->irq;
+       return cb;
+
+failure:
+       dev_err(&pdev->dev,
+               "Failed to initialize Controller\n");
+       myrb_cleanup(cb);
+       return NULL;
+}
+
+static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+       struct myrb_hba *cb;
+       int ret;
+
+       cb = myrb_detect(dev, entry);
+       if (!cb)
+               return -ENODEV;
+
+       ret = myrb_get_hba_config(cb);
+       if (ret < 0) {
+               myrb_cleanup(cb);
+               return ret;
+       }
+
+       if (!myrb_create_mempools(dev, cb)) {
+               ret = -ENOMEM;
+               goto failed;
+       }
+
+       ret = scsi_add_host(cb->host, &dev->dev);
+       if (ret) {
+               dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+               myrb_destroy_mempools(cb);
+               goto failed;
+       }
+       scsi_scan_host(cb->host);
+       return 0;
+failed:
+       myrb_cleanup(cb);
+       return ret;
+}
+
+
+static void myrb_remove(struct pci_dev *pdev)
+{
+       struct myrb_hba *cb = pci_get_drvdata(pdev);
+
+       shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
+       myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+       myrb_cleanup(cb);
+       myrb_destroy_mempools(cb);
+}
+
+
+static const struct pci_device_id myrb_id_table[] = {
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
+                              PCI_DEVICE_ID_DEC_21285,
+                              PCI_VENDOR_ID_MYLEX,
+                              PCI_DEVICE_ID_MYLEX_DAC960_LA),
+               .driver_data    = (unsigned long) &DAC960_LA_privdata,
+       },
+       {
+               PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
+       },
+       {
+               PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
+       },
+       {
+               PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
+       },
+       {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrb_id_table);
+
+static struct pci_driver myrb_pci_driver = {
+       .name           = "myrb",
+       .id_table       = myrb_id_table,
+       .probe          = myrb_probe,
+       .remove         = myrb_remove,
+};
+
+static int __init myrb_init_module(void)
+{
+       int ret;
+
+       myrb_raid_template = raid_class_attach(&myrb_raid_functions);
+       if (!myrb_raid_template)
+               return -ENODEV;
+
+       ret = pci_register_driver(&myrb_pci_driver);
+       if (ret)
+               raid_class_release(myrb_raid_template);
+
+       return ret;
+}
+
+static void __exit myrb_cleanup_module(void)
+{
+       pci_unregister_driver(&myrb_pci_driver);
+       raid_class_release(myrb_raid_template);
+}
+
+module_init(myrb_init_module);
+module_exit(myrb_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
new file mode 100644 (file)
index 0000000..9289c19
--- /dev/null
@@ -0,0 +1,958 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#ifndef MYRB_H
+#define MYRB_H
+
+#define MYRB_MAX_LDEVS                 32
+#define MYRB_MAX_CHANNELS              3
+#define MYRB_MAX_TARGETS               16
+#define MYRB_MAX_PHYSICAL_DEVICES      45
+#define MYRB_SCATTER_GATHER_LIMIT      32
+#define MYRB_CMD_MBOX_COUNT            256
+#define MYRB_STAT_MBOX_COUNT           1024
+
+#define MYRB_BLKSIZE_BITS              9
+#define MYRB_MAILBOX_TIMEOUT           1000000
+
+#define MYRB_DCMD_TAG                  1
+#define MYRB_MCMD_TAG                  2
+
+#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/*
+ * DAC960 V1 Firmware Command Opcodes.
+ */
+enum myrb_cmd_opcode {
+       /* I/O Commands */
+       MYRB_CMD_READ_EXTENDED =        0x33,
+       MYRB_CMD_WRITE_EXTENDED =       0x34,
+       MYRB_CMD_READAHEAD_EXTENDED =   0x35,
+       MYRB_CMD_READ_EXTENDED_SG =     0xB3,
+       MYRB_CMD_WRITE_EXTENDED_SG =    0xB4,
+       MYRB_CMD_READ =                 0x36,
+       MYRB_CMD_READ_SG =              0xB6,
+       MYRB_CMD_WRITE =                0x37,
+       MYRB_CMD_WRITE_SG =             0xB7,
+       MYRB_CMD_DCDB =                 0x04,
+       MYRB_CMD_DCDB_SG =              0x84,
+       MYRB_CMD_FLUSH =                0x0A,
+       /* Controller Status Related Commands */
+       MYRB_CMD_ENQUIRY =              0x53,
+       MYRB_CMD_ENQUIRY2 =             0x1C,
+       MYRB_CMD_GET_LDRV_ELEMENT =     0x55,
+       MYRB_CMD_GET_LDEV_INFO =        0x19,
+       MYRB_CMD_IOPORTREAD =           0x39,
+       MYRB_CMD_IOPORTWRITE =          0x3A,
+       MYRB_CMD_GET_SD_STATS =         0x3E,
+       MYRB_CMD_GET_PD_STATS =         0x3F,
+       MYRB_CMD_EVENT_LOG_OPERATION =  0x72,
+       /* Device Related Commands */
+       MYRB_CMD_START_DEVICE =         0x10,
+       MYRB_CMD_GET_DEVICE_STATE =     0x50,
+       MYRB_CMD_STOP_CHANNEL =         0x13,
+       MYRB_CMD_START_CHANNEL =        0x12,
+       MYRB_CMD_RESET_CHANNEL =        0x1A,
+       /* Commands Associated with Data Consistency and Errors */
+       MYRB_CMD_REBUILD =              0x09,
+       MYRB_CMD_REBUILD_ASYNC =        0x16,
+       MYRB_CMD_CHECK_CONSISTENCY =    0x0F,
+       MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E,
+       MYRB_CMD_REBUILD_STAT =         0x0C,
+       MYRB_CMD_GET_REBUILD_PROGRESS = 0x27,
+       MYRB_CMD_REBUILD_CONTROL =      0x1F,
+       MYRB_CMD_READ_BADBLOCK_TABLE =  0x0B,
+       MYRB_CMD_READ_BADDATA_TABLE =   0x25,
+       MYRB_CMD_CLEAR_BADDATA_TABLE =  0x26,
+       MYRB_CMD_GET_ERROR_TABLE =      0x17,
+       MYRB_CMD_ADD_CAPACITY_ASYNC =   0x2A,
+       MYRB_CMD_BGI_CONTROL =          0x2B,
+       /* Configuration Related Commands */
+       MYRB_CMD_READ_CONFIG2 =         0x3D,
+       MYRB_CMD_WRITE_CONFIG2 =        0x3C,
+       MYRB_CMD_READ_CONFIG_ONDISK =   0x4A,
+       MYRB_CMD_WRITE_CONFIG_ONDISK =  0x4B,
+       MYRB_CMD_READ_CONFIG =          0x4E,
+       MYRB_CMD_READ_BACKUP_CONFIG =   0x4D,
+       MYRB_CMD_WRITE_CONFIG =         0x4F,
+       MYRB_CMD_ADD_CONFIG =           0x4C,
+       MYRB_CMD_READ_CONFIG_LABEL =    0x48,
+       MYRB_CMD_WRITE_CONFIG_LABEL =   0x49,
+       /* Firmware Upgrade Related Commands */
+       MYRB_CMD_LOAD_IMAGE =           0x20,
+       MYRB_CMD_STORE_IMAGE =          0x21,
+       MYRB_CMD_PROGRAM_IMAGE =        0x22,
+       /* Diagnostic Commands */
+       MYRB_CMD_SET_DIAGNOSTIC_MODE =  0x31,
+       MYRB_CMD_RUN_DIAGNOSTIC =       0x32,
+       /* Subsystem Service Commands */
+       MYRB_CMD_GET_SUBSYS_DATA =      0x70,
+       MYRB_CMD_SET_SUBSYS_PARAM =     0x71,
+       /* Version 2.xx Firmware Commands */
+       MYRB_CMD_ENQUIRY_OLD =          0x05,
+       MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14,
+       MYRB_CMD_READ_OLD =             0x02,
+       MYRB_CMD_WRITE_OLD =            0x03,
+       MYRB_CMD_READ_SG_OLD =          0x82,
+       MYRB_CMD_WRITE_SG_OLD =         0x83
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Command Status Codes.
+ */
+#define MYRB_STATUS_SUCCESS                    0x0000  /* Common */
+#define MYRB_STATUS_CHECK_CONDITION            0x0002  /* Common */
+#define MYRB_STATUS_NO_DEVICE                  0x0102  /* Common */
+#define MYRB_STATUS_INVALID_ADDRESS            0x0105  /* Common */
+#define MYRB_STATUS_INVALID_PARAM              0x0105  /* Common */
+#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR   0x0001  /* I/O */
+#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */
+#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV  0x0105  /* I/O */
+#define MYRB_STATUS_BAD_DATA                   0x010C  /* I/O */
+#define MYRB_STATUS_DEVICE_BUSY                        0x0008  /* DCDB */
+#define MYRB_STATUS_DEVICE_NONRESPONSIVE       0x000E  /* DCDB */
+#define MYRB_STATUS_COMMAND_TERMINATED         0x000F  /* DCDB */
+#define MYRB_STATUS_START_DEVICE_FAILED                0x0002  /* Device */
+#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET  0x0105  /* Device */
+#define MYRB_STATUS_CHANNEL_BUSY               0x0106  /* Device */
+#define MYRB_STATUS_OUT_OF_MEMORY              0x0107  /* Device */
+#define MYRB_STATUS_CHANNEL_NOT_STOPPED                0x0002  /* Device */
+#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002        /* Consistency */
+#define MYRB_STATUS_RBLD_BADBLOCKS             0x0003  /* Consistency */
+#define MYRB_STATUS_RBLD_NEW_DISK_FAILED       0x0004  /* Consistency */
+#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS   0x0106  /* Consistency */
+#define MYRB_STATUS_DEPENDENT_DISK_DEAD                0x0002  /* Consistency */
+#define MYRB_STATUS_INCONSISTENT_BLOCKS                0x0003  /* Consistency */
+#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */
+#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS        0x0105  /* Consistency */
+#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID        0x0000  /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE   0x0002  /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS      0x0003  /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004        /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS               0x0100  /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED    0x0107  /* Consistency */
+#define MYRB_STATUS_RBLD_NOT_CHECKED           0x0108  /* Consistency */
+#define MYRB_STATUS_BGI_SUCCESS                        0x0100  /* Consistency */
+#define MYRB_STATUS_BGI_ABORTED                        0x0005  /* Consistency */
+#define MYRB_STATUS_NO_BGI_INPROGRESS          0x0105  /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS    0x0004  /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */
+#define MYRB_STATUS_CONFIG2_CSUM_ERROR         0x0002  /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_SUSPENDED    0x0106  /* Configuration */
+#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM  0x0105  /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_NOT_SAVED    0x0106  /* Configuration */
+#define MYRB_STATUS_SUBSYS_NOTINSTALLED                0x0001  /* Subsystem */
+#define MYRB_STATUS_SUBSYS_FAILED              0x0002  /* Subsystem */
+#define MYRB_STATUS_SUBSYS_BUSY                        0x0106  /* Subsystem */
+#define MYRB_STATUS_SUBSYS_TIMEOUT             0x0108  /* Subsystem */
+
+/*
+ * DAC960 V1 Firmware Enquiry Command reply structure.
+ */
+struct myrb_enquiry {
+       unsigned char ldev_count;                       /* Byte 0 */
+       unsigned int rsvd1:24;                          /* Bytes 1-3 */
+       unsigned int ldev_sizes[32];                    /* Bytes 4-131 */
+       unsigned short flash_age;                       /* Bytes 132-133 */
+       struct {
+               unsigned char deferred:1;               /* Byte 134 Bit 0 */
+               unsigned char low_bat:1;                /* Byte 134 Bit 1 */
+               unsigned char rsvd2:6;                  /* Byte 134 Bits 2-7 */
+       } status;
+       unsigned char rsvd3:8;                          /* Byte 135 */
+       unsigned char fw_minor_version;                 /* Byte 136 */
+       unsigned char fw_major_version;                 /* Byte 137 */
+       enum {
+               MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS =       0x00,
+               MYRB_STDBY_RBLD_IN_PROGRESS =                   0x01,
+               MYRB_BG_RBLD_IN_PROGRESS =                      0x02,
+               MYRB_BG_CHECK_IN_PROGRESS =                     0x03,
+               MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR =          0xFF,
+               MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED =     0xF0,
+               MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED =      0xF1,
+               MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER =            0xF2,
+               MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED =      0xF3
+       } __packed rbld;                /* Byte 138 */
+       unsigned char max_tcq;                          /* Byte 139 */
+       unsigned char ldev_offline;                     /* Byte 140 */
+       unsigned char rsvd4:8;                          /* Byte 141 */
+       unsigned short ev_seq;                          /* Bytes 142-143 */
+       unsigned char ldev_critical;                    /* Byte 144 */
+       unsigned int rsvd5:24;                          /* Bytes 145-147 */
+       unsigned char pdev_dead;                        /* Byte 148 */
+       unsigned char rsvd6:8;                          /* Byte 149 */
+       unsigned char rbld_count;                       /* Byte 150 */
+       struct {
+               unsigned char rsvd7:3;                  /* Byte 151 Bits 0-2 */
+               unsigned char bbu_present:1;            /* Byte 151 Bit 3 */
+               unsigned char rsvd8:4;                  /* Byte 151 Bits 4-7 */
+       } misc;
+       struct {
+               unsigned char target;
+               unsigned char channel;
+       } dead_drives[21];                              /* Bytes 152-194 */
+       unsigned char rsvd9[62];                        /* Bytes 195-255 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Enquiry2 Command reply structure.
+ */
+struct myrb_enquiry2 {
+       struct {
+               enum {
+                       DAC960_V1_P_PD_PU =                     0x01,
+                       DAC960_V1_PL =                          0x02,
+                       DAC960_V1_PG =                          0x10,
+                       DAC960_V1_PJ =                          0x11,
+                       DAC960_V1_PR =                          0x12,
+                       DAC960_V1_PT =                          0x13,
+                       DAC960_V1_PTL0 =                        0x14,
+                       DAC960_V1_PRL =                         0x15,
+                       DAC960_V1_PTL1 =                        0x16,
+                       DAC960_V1_1164P =                       0x20
+               } __packed sub_model;           /* Byte 0 */
+               unsigned char actual_channels;                  /* Byte 1 */
+               enum {
+                       MYRB_5_CHANNEL_BOARD =          0x01,
+                       MYRB_3_CHANNEL_BOARD =          0x02,
+                       MYRB_2_CHANNEL_BOARD =          0x03,
+                       MYRB_3_CHANNEL_ASIC_DAC =       0x04
+               } __packed model;               /* Byte 2 */
+               enum {
+                       MYRB_EISA_CONTROLLER =          0x01,
+                       MYRB_MCA_CONTROLLER =           0x02,
+                       MYRB_PCI_CONTROLLER =           0x03,
+                       MYRB_SCSI_TO_SCSI =             0x08
+               } __packed controller;  /* Byte 3 */
+       } hw;                                           /* Bytes 0-3 */
+       /* MajorVersion.MinorVersion-FirmwareType-TurnID */
+       struct {
+               unsigned char major_version;            /* Byte 4 */
+               unsigned char minor_version;            /* Byte 5 */
+               unsigned char turn_id;                  /* Byte 6 */
+               char firmware_type;                     /* Byte 7 */
+       } fw;                                           /* Bytes 4-7 */
+       unsigned int rsvd1;                             /* Byte 8-11 */
+       unsigned char cfg_chan;                         /* Byte 12 */
+       unsigned char cur_chan;                         /* Byte 13 */
+       unsigned char max_targets;                      /* Byte 14 */
+       unsigned char max_tcq;                          /* Byte 15 */
+       unsigned char max_ldev;                         /* Byte 16 */
+       unsigned char max_arms;                         /* Byte 17 */
+       unsigned char max_spans;                        /* Byte 18 */
+       unsigned char rsvd2;                            /* Byte 19 */
+       unsigned int rsvd3;                             /* Bytes 20-23 */
+       unsigned int mem_size;                          /* Bytes 24-27 */
+       unsigned int cache_size;                        /* Bytes 28-31 */
+       unsigned int flash_size;                        /* Bytes 32-35 */
+       unsigned int nvram_size;                        /* Bytes 36-39 */
+       struct {
+               enum {
+                       MYRB_RAM_TYPE_DRAM =            0x0,
+                       MYRB_RAM_TYPE_EDO =                     0x1,
+                       MYRB_RAM_TYPE_SDRAM =           0x2,
+                       MYRB_RAM_TYPE_Last =            0x7
+               } __packed ram:3;       /* Byte 40 Bits 0-2 */
+               enum {
+                       MYRB_ERR_CORR_None =    0x0,
+                       MYRB_ERR_CORR_Parity =  0x1,
+                       MYRB_ERR_CORR_ECC =             0x2,
+                       MYRB_ERR_CORR_Last =    0x7
+               } __packed ec:3;        /* Byte 40 Bits 3-5 */
+               unsigned char fast_page:1;              /* Byte 40 Bit 6 */
+               unsigned char low_power:1;              /* Byte 40 Bit 7 */
+               unsigned char rsvd4;                    /* Bytes 41 */
+       } mem_type;
+       unsigned short clock_speed;                     /* Bytes 42-43 */
+       unsigned short mem_speed;                       /* Bytes 44-45 */
+       unsigned short hw_speed;                        /* Bytes 46-47 */
+       unsigned char rsvd5[12];                        /* Bytes 48-59 */
+       unsigned short max_cmds;                        /* Bytes 60-61 */
+       unsigned short max_sge;                         /* Bytes 62-63 */
+       unsigned short max_drv_cmds;                    /* Bytes 64-65 */
+       unsigned short max_io_desc;                     /* Bytes 66-67 */
+       unsigned short max_sectors;                     /* Bytes 68-69 */
+       unsigned char latency;                          /* Byte 70 */
+       unsigned char rsvd6;                            /* Byte 71 */
+       unsigned char scsi_tmo;                         /* Byte 72 */
+       unsigned char rsvd7;                            /* Byte 73 */
+       unsigned short min_freelines;                   /* Bytes 74-75 */
+       unsigned char rsvd8[8];                         /* Bytes 76-83 */
+       unsigned char rbld_rate_const;                  /* Byte 84 */
+       unsigned char rsvd9[11];                        /* Byte 85-95 */
+       unsigned short pdrv_block_size;                 /* Bytes 96-97 */
+       unsigned short ldev_block_size;                 /* Bytes 98-99 */
+       unsigned short max_blocks_per_cmd;              /* Bytes 100-101 */
+       unsigned short block_factor;                    /* Bytes 102-103 */
+       unsigned short cacheline_size;                  /* Bytes 104-105 */
+       struct {
+               enum {
+                       MYRB_WIDTH_NARROW_8BIT =                0x0,
+                       MYRB_WIDTH_WIDE_16BIT =                 0x1,
+                       MYRB_WIDTH_WIDE_32BIT =                 0x2
+               } __packed bus_width:2; /* Byte 106 Bits 0-1 */
+               enum {
+                       MYRB_SCSI_SPEED_FAST =                  0x0,
+                       MYRB_SCSI_SPEED_ULTRA =                 0x1,
+                       MYRB_SCSI_SPEED_ULTRA2 =                0x2
+               } __packed bus_speed:2; /* Byte 106 Bits 2-3 */
+               unsigned char differential:1;           /* Byte 106 Bit 4 */
+               unsigned char rsvd10:3;                 /* Byte 106 Bits 5-7 */
+       } scsi_cap;
+       unsigned char rsvd11[5];                        /* Byte 107-111 */
+       unsigned short fw_build;                        /* Bytes 112-113 */
+       enum {
+               MYRB_FAULT_AEMI =                               0x01,
+               MYRB_FAULT_OEM1 =                               0x02,
+               MYRB_FAULT_OEM2 =                               0x04,
+               MYRB_FAULT_OEM3 =                               0x08,
+               MYRB_FAULT_CONNER =                             0x10,
+               MYRB_FAULT_SAFTE =                              0x20
+       } __packed fault_mgmt;          /* Byte 114 */
+       unsigned char rsvd12;                           /* Byte 115 */
+       struct {
+               unsigned int clustering:1;              /* Byte 116 Bit 0 */
+               unsigned int online_RAID_expansion:1;   /* Byte 116 Bit 1 */
+               unsigned int readahead:1;               /* Byte 116 Bit 2 */
+               unsigned int bgi:1;                     /* Byte 116 Bit 3 */
+               unsigned int rsvd13:28;                 /* Bytes 116-119 */
+       } fw_features;
+       unsigned char rsvd14[8];                        /* Bytes 120-127 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive State type.
+ */
+enum myrb_devstate {
+       MYRB_DEVICE_DEAD =              0x00,
+       MYRB_DEVICE_WO =                0x02,
+       MYRB_DEVICE_ONLINE =            0x03,
+       MYRB_DEVICE_CRITICAL =          0x04,
+       MYRB_DEVICE_STANDBY =           0x10,
+       MYRB_DEVICE_OFFLINE =           0xFF
+} __packed;
+
+/*
+ * DAC960 V1 RAID Levels
+ */
+enum myrb_raidlevel {
+       MYRB_RAID_LEVEL0 =              0x0,     /* RAID 0 */
+       MYRB_RAID_LEVEL1 =              0x1,     /* RAID 1 */
+       MYRB_RAID_LEVEL3 =              0x3,     /* RAID 3 */
+       MYRB_RAID_LEVEL5 =              0x5,     /* RAID 5 */
+       MYRB_RAID_LEVEL6 =              0x6,     /* RAID 6 */
+       MYRB_RAID_JBOD =                0x7,     /* RAID 7 (JBOD) */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive Information structure.
+ */
+struct myrb_ldev_info {
+       unsigned int size;                              /* Bytes 0-3 */
+       enum myrb_devstate state;                       /* Byte 4 */
+       unsigned int raid_level:7;                      /* Byte 5 Bits 0-6 */
+       unsigned int wb_enabled:1;                      /* Byte 5 Bit 7 */
+       unsigned int rsvd:16;                           /* Bytes 6-7 */
+};
+
+/*
+ * DAC960 V1 Firmware Perform Event Log Operation Types.
+ */
+#define DAC960_V1_GetEventLogEntry             0x00
+
+/*
+ * DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+ */
+struct myrb_log_entry {
+       unsigned char msg_type;                 /* Byte 0 */
+       unsigned char msg_len;                  /* Byte 1 */
+       unsigned char target:5;                 /* Byte 2 Bits 0-4 */
+       unsigned char channel:3;                /* Byte 2 Bits 5-7 */
+       unsigned char lun:6;                    /* Byte 3 Bits 0-5 */
+       unsigned char rsvd1:2;                  /* Byte 3 Bits 6-7 */
+       unsigned short seq_num;                 /* Bytes 4-5 */
+       unsigned char sense[26];                /* Bytes 6-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Get Device State Command reply structure.
+ * The structure is padded by 2 bytes for compatibility with Version 2.xx
+ * Firmware.
+ */
+struct myrb_pdev_state {
+       unsigned int present:1;                 /* Byte 0 Bit 0 */
+       unsigned int :7;                                /* Byte 0 Bits 1-7 */
+       enum {
+               MYRB_TYPE_OTHER =                       0x0,
+               MYRB_TYPE_DISK =                        0x1,
+               MYRB_TYPE_TAPE =                        0x2,
+               MYRB_TYPE_CDROM_OR_WORM =               0x3
+       } __packed devtype:2;           /* Byte 1 Bits 0-1 */
+       unsigned int rsvd1:1;                           /* Byte 1 Bit 2 */
+       unsigned int fast20:1;                          /* Byte 1 Bit 3 */
+       unsigned int sync:1;                            /* Byte 1 Bit 4 */
+       unsigned int fast:1;                            /* Byte 1 Bit 5 */
+       unsigned int wide:1;                            /* Byte 1 Bit 6 */
+       unsigned int tcq_supported:1;                   /* Byte 1 Bit 7 */
+       enum myrb_devstate state;                       /* Byte 2 */
+       unsigned int rsvd2:8;                           /* Byte 3 */
+       unsigned int sync_multiplier;                   /* Byte 4 */
+       unsigned int sync_offset:5;                     /* Byte 5 Bits 0-4 */
+       unsigned int rsvd3:3;                           /* Byte 5 Bits 5-7 */
+       unsigned int size;                              /* Bytes 6-9 */
+       unsigned int rsvd4:16;                  /* Bytes 10-11 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+ */
+struct myrb_rbld_progress {
+       unsigned int ldev_num;                          /* Bytes 0-3 */
+       unsigned int ldev_size;                         /* Bytes 4-7 */
+       unsigned int blocks_left;                       /* Bytes 8-11 */
+};
+
+/*
+ * DAC960 V1 Firmware Background Initialization Status Command reply structure.
+ */
+struct myrb_bgi_status {
+       unsigned int ldev_size;                         /* Bytes 0-3 */
+       unsigned int blocks_done;                       /* Bytes 4-7 */
+       unsigned char rsvd1[12];                        /* Bytes 8-19 */
+       unsigned int ldev_num;                          /* Bytes 20-23 */
+       unsigned char raid_level;                       /* Byte 24 */
+       enum {
+               MYRB_BGI_INVALID =      0x00,
+               MYRB_BGI_STARTED =      0x02,
+               MYRB_BGI_INPROGRESS =   0x04,
+               MYRB_BGI_SUSPENDED =    0x05,
+               MYRB_BGI_CANCELLED =    0x06
+       } __packed status;              /* Byte 25 */
+       unsigned char rsvd2[6];                         /* Bytes 26-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Error Table Entry structure.
+ */
+struct myrb_error_entry {
+       unsigned char parity_err;                       /* Byte 0 */
+       unsigned char soft_err;                         /* Byte 1 */
+       unsigned char hard_err;                         /* Byte 2 */
+       unsigned char misc_err;                         /* Byte 3 */
+};
+
+/*
+ * DAC960 V1 Firmware Read Config2 Command reply structure.
+ */
+struct myrb_config2 {
+       unsigned rsvd1:1;                               /* Byte 0 Bit 0 */
+       unsigned active_negation:1;                     /* Byte 0 Bit 1 */
+       unsigned rsvd2:5;                               /* Byte 0 Bits 2-6 */
+       unsigned no_rescan_on_reset_during_scan:1;      /* Byte 0 Bit 7 */
+       unsigned StorageWorks_support:1;                /* Byte 1 Bit 0 */
+       unsigned HewlettPackard_support:1;              /* Byte 1 Bit 1 */
+       unsigned no_disconnect_on_first_command:1;      /* Byte 1 Bit 2 */
+       unsigned rsvd3:2;                               /* Byte 1 Bits 3-4 */
+       unsigned AEMI_ARM:1;                            /* Byte 1 Bit 5 */
+       unsigned AEMI_OFM:1;                            /* Byte 1 Bit 6 */
+       unsigned rsvd4:1;                               /* Byte 1 Bit 7 */
+       enum {
+               MYRB_OEMID_MYLEX =              0x00,
+               MYRB_OEMID_IBM =                0x08,
+               MYRB_OEMID_HP =                 0x0A,
+               MYRB_OEMID_DEC =                0x0C,
+               MYRB_OEMID_SIEMENS =            0x10,
+               MYRB_OEMID_INTEL =              0x12
+       } __packed OEMID;               /* Byte 2 */
+       unsigned char oem_model_number;                 /* Byte 3 */
+       unsigned char physical_sector;                  /* Byte 4 */
+       unsigned char logical_sector;                   /* Byte 5 */
+       unsigned char block_factor;                     /* Byte 6 */
+       unsigned readahead_enabled:1;                   /* Byte 7 Bit 0 */
+       unsigned low_BIOS_delay:1;                      /* Byte 7 Bit 1 */
+       unsigned rsvd5:2;                               /* Byte 7 Bits 2-3 */
+       unsigned restrict_reassign_to_one_sector:1;     /* Byte 7 Bit 4 */
+       unsigned rsvd6:1;                               /* Byte 7 Bit 5 */
+       unsigned FUA_during_write_recovery:1;           /* Byte 7 Bit 6 */
+       unsigned enable_LeftSymmetricRAID5Algorithm:1;  /* Byte 7 Bit 7 */
+       unsigned char default_rebuild_rate;             /* Byte 8 */
+       unsigned char rsvd7;                            /* Byte 9 */
+       unsigned char blocks_per_cacheline;             /* Byte 10 */
+       unsigned char blocks_per_stripe;                /* Byte 11 */
+       struct {
+               enum {
+                       MYRB_SPEED_ASYNC =              0x0,
+                       MYRB_SPEED_SYNC_8MHz =          0x1,
+                       MYRB_SPEED_SYNC_5MHz =          0x2,
+                       MYRB_SPEED_SYNC_10_OR_20MHz =   0x3
+               } __packed speed:2;     /* Byte 11 Bits 0-1 */
+               unsigned force_8bit:1;                  /* Byte 11 Bit 2 */
+               unsigned disable_fast20:1;              /* Byte 11 Bit 3 */
+               unsigned rsvd8:3;                       /* Byte 11 Bits 4-6 */
+               unsigned enable_tcq:1;                  /* Byte 11 Bit 7 */
+       } __packed channelparam[6];     /* Bytes 12-17 */
+       unsigned char SCSIInitiatorID;                  /* Byte 18 */
+       unsigned char rsvd9;                            /* Byte 19 */
+       enum {
+               MYRB_STARTUP_CONTROLLER_SPINUP =        0x00,
+               MYRB_STARTUP_POWERON_SPINUP =           0x01
+       } __packed startup;             /* Byte 20 */
+       unsigned char simultaneous_device_spinup_count; /* Byte 21 */
+       unsigned char seconds_delay_between_spinups;    /* Byte 22 */
+       unsigned char rsvd10[29];                       /* Bytes 23-51 */
+       unsigned BIOS_disabled:1;                       /* Byte 52 Bit 0 */
+       unsigned CDROM_boot_enabled:1;                  /* Byte 52 Bit 1 */
+       unsigned rsvd11:3;                              /* Byte 52 Bits 2-4 */
+       enum {
+               MYRB_GEOM_128_32 =              0x0,
+               MYRB_GEOM_255_63 =              0x1,
+               MYRB_GEOM_RESERVED1 =           0x2,
+               MYRB_GEOM_RESERVED2 =           0x3
+       } __packed drive_geometry:2;    /* Byte 52 Bits 5-6 */
+       unsigned rsvd12:1;                              /* Byte 52 Bit 7 */
+       unsigned char rsvd13[9];                        /* Bytes 53-61 */
+       unsigned short csum;                            /* Bytes 62-63 */
+};
+
+/*
+ * DAC960 V1 Firmware DCDB request structure.
+ */
+struct myrb_dcdb {
+       unsigned target:4;                               /* Byte 0 Bits 0-3 */
+       unsigned channel:4;                              /* Byte 0 Bits 4-7 */
+       enum {
+               MYRB_DCDB_XFER_NONE =           0,
+               MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1,
+               MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2,
+               MYRB_DCDB_XFER_ILLEGAL =        3
+       } __packed data_xfer:2;                         /* Byte 1 Bits 0-1 */
+       unsigned early_status:1;                        /* Byte 1 Bit 2 */
+       unsigned rsvd1:1;                               /* Byte 1 Bit 3 */
+       enum {
+               MYRB_DCDB_TMO_24_HRS =  0,
+               MYRB_DCDB_TMO_10_SECS = 1,
+               MYRB_DCDB_TMO_60_SECS = 2,
+               MYRB_DCDB_TMO_10_MINS = 3
+       } __packed timeout:2;                           /* Byte 1 Bits 4-5 */
+       unsigned no_autosense:1;                        /* Byte 1 Bit 6 */
+       unsigned allow_disconnect:1;                    /* Byte 1 Bit 7 */
+       unsigned short xfer_len_lo;                     /* Bytes 2-3 */
+       u32 dma_addr;                                   /* Bytes 4-7 */
+       unsigned char cdb_len:4;                        /* Byte 8 Bits 0-3 */
+       unsigned char xfer_len_hi4:4;                   /* Byte 8 Bits 4-7 */
+       unsigned char sense_len;                        /* Byte 9 */
+       unsigned char cdb[12];                          /* Bytes 10-21 */
+       unsigned char sense[64];                        /* Bytes 22-85 */
+       unsigned char status;                           /* Byte 86 */
+       unsigned char rsvd2;                            /* Byte 87 */
+};
+
+/*
+ * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+ *32 Bit Byte Count structure.
+ */
+struct myrb_sge {
+       u32 sge_addr;           /* Bytes 0-3 */
+       u32 sge_count;          /* Bytes 4-7 */
+};
+
+/*
+ * 13 Byte DAC960 V1 Firmware Command Mailbox structure.
+ * Bytes 13-15 are not used.  The structure is padded to 16 bytes for
+ * efficient access.
+ */
+union myrb_cmd_mbox {
+       unsigned int words[4];                          /* Words 0-3 */
+       unsigned char bytes[16];                        /* Bytes 0-15 */
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char rsvd[14];                 /* Bytes 2-15 */
+       } __packed common;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char rsvd1[6];                 /* Bytes 2-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char rsvd2[4];                 /* Bytes 12-15 */
+       } __packed type3;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char optype;                   /* Byte 2 */
+               unsigned char rsvd1[5];                 /* Bytes 3-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char rsvd2[4];                 /* Bytes 12-15 */
+       } __packed type3B;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char rsvd1[5];                 /* Bytes 2-6 */
+               unsigned char ldev_num:6;               /* Byte 7 Bits 0-6 */
+               unsigned char auto_restore:1;           /* Byte 7 Bit 7 */
+               unsigned char rsvd2[8];                 /* Bytes 8-15 */
+       } __packed type3C;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char channel;                  /* Byte 2 */
+               unsigned char target;                   /* Byte 3 */
+               enum myrb_devstate state;               /* Byte 4 */
+               unsigned char rsvd1[3];                 /* Bytes 5-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char rsvd2[4];                 /* Bytes 12-15 */
+       } __packed type3D;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char optype;                   /* Byte 2 */
+               unsigned char opqual;                   /* Byte 3 */
+               unsigned short ev_seq;                  /* Bytes 4-5 */
+               unsigned char rsvd1[2];                 /* Bytes 6-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char rsvd2[4];                 /* Bytes 12-15 */
+       } __packed type3E;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char rsvd1[2];                 /* Bytes 2-3 */
+               unsigned char rbld_rate;                /* Byte 4 */
+               unsigned char rsvd2[3];                 /* Bytes 5-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char rsvd3[4];                 /* Bytes 12-15 */
+       } __packed type3R;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned short xfer_len;                /* Bytes 2-3 */
+               unsigned int lba;                       /* Bytes 4-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char ldev_num;                 /* Byte 12 */
+               unsigned char rsvd[3];                  /* Bytes 13-15 */
+       } __packed type4;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               struct {
+                       unsigned short xfer_len:11;     /* Bytes 2-3 */
+                       unsigned char ldev_num:5;       /* Byte 3 Bits 3-7 */
+               } __packed ld;
+               unsigned int lba;                       /* Bytes 4-7 */
+               u32 addr;                               /* Bytes 8-11 */
+               unsigned char sg_count:6;               /* Byte 12 Bits 0-5 */
+               enum {
+                       MYRB_SGL_ADDR32_COUNT32 = 0x0,
+                       MYRB_SGL_ADDR32_COUNT16 = 0x1,
+                       MYRB_SGL_COUNT32_ADDR32 = 0x2,
+                       MYRB_SGL_COUNT16_ADDR32 = 0x3
+               } __packed sg_type:2;   /* Byte 12 Bits 6-7 */
+               unsigned char rsvd[3];                  /* Bytes 13-15 */
+       } __packed type5;
+       struct {
+               enum myrb_cmd_opcode opcode;            /* Byte 0 */
+               unsigned char id;                       /* Byte 1 */
+               unsigned char opcode2;                  /* Byte 2 */
+               unsigned char rsvd1:8;                  /* Byte 3 */
+               u32 cmd_mbox_addr;                      /* Bytes 4-7 */
+               u32 stat_mbox_addr;                     /* Bytes 8-11 */
+               unsigned char rsvd2[4];                 /* Bytes 12-15 */
+       } __packed typeX;
+};
+
+/*
+ * DAC960 V1 Firmware Controller Status Mailbox structure.
+ */
+struct myrb_stat_mbox {
+       unsigned char id;               /* Byte 0 */
+       unsigned char rsvd:7;           /* Byte 1 Bits 0-6 */
+       unsigned char valid:1;                  /* Byte 1 Bit 7 */
+       unsigned short status;          /* Bytes 2-3 */
+};
+
+struct myrb_cmdblk {
+       union myrb_cmd_mbox mbox;
+       unsigned short status;
+       struct completion *completion;
+       struct myrb_dcdb *dcdb;
+       dma_addr_t dcdb_addr;
+       struct myrb_sge *sgl;
+       dma_addr_t sgl_addr;
+};
+
+struct myrb_hba {
+       unsigned int ldev_block_size;
+       unsigned char ldev_geom_heads;
+       unsigned char ldev_geom_sectors;
+       unsigned char bus_width;
+       unsigned short stripe_size;
+       unsigned short segment_size;
+       unsigned short new_ev_seq;
+       unsigned short old_ev_seq;
+       bool dual_mode_interface;
+       bool bgi_status_supported;
+       bool safte_enabled;
+       bool need_ldev_info;
+       bool need_err_info;
+       bool need_rbld;
+       bool need_cc_status;
+       bool need_bgi_status;
+       bool rbld_first;
+
+       struct pci_dev *pdev;
+       struct Scsi_Host *host;
+
+       struct workqueue_struct *work_q;
+       char work_q_name[20];
+       struct delayed_work monitor_work;
+       unsigned long primary_monitor_time;
+       unsigned long secondary_monitor_time;
+
+       struct dma_pool *sg_pool;
+       struct dma_pool *dcdb_pool;
+
+       spinlock_t queue_lock;
+
+       void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk);
+       void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox,
+                              union myrb_cmd_mbox *cmd_mbox);
+       void (*get_cmd_mbox)(void __iomem *base);
+       void (*disable_intr)(void __iomem *base);
+       void (*reset)(void __iomem *base);
+
+       unsigned int ctlr_num;
+       unsigned char model_name[20];
+       unsigned char fw_version[12];
+
+       unsigned int irq;
+       phys_addr_t io_addr;
+       phys_addr_t pci_addr;
+       void __iomem *io_base;
+       void __iomem *mmio_base;
+
+       size_t cmd_mbox_size;
+       dma_addr_t cmd_mbox_addr;
+       union myrb_cmd_mbox *first_cmd_mbox;
+       union myrb_cmd_mbox *last_cmd_mbox;
+       union myrb_cmd_mbox *next_cmd_mbox;
+       union myrb_cmd_mbox *prev_cmd_mbox1;
+       union myrb_cmd_mbox *prev_cmd_mbox2;
+
+       size_t stat_mbox_size;
+       dma_addr_t stat_mbox_addr;
+       struct myrb_stat_mbox *first_stat_mbox;
+       struct myrb_stat_mbox *last_stat_mbox;
+       struct myrb_stat_mbox *next_stat_mbox;
+
+       struct myrb_cmdblk dcmd_blk;
+       struct myrb_cmdblk mcmd_blk;
+       struct mutex dcmd_mutex;
+
+       struct myrb_enquiry *enquiry;
+       dma_addr_t enquiry_addr;
+
+       struct myrb_error_entry *err_table;
+       dma_addr_t err_table_addr;
+
+       unsigned short last_rbld_status;
+
+       struct myrb_ldev_info *ldev_info_buf;
+       dma_addr_t ldev_info_addr;
+
+       struct myrb_bgi_status bgi_status;
+
+       struct mutex dma_mutex;
+};
+
+/*
+ * DAC960 LA Series Controller Interface Register Offsets.
+ */
+#define DAC960_LA_mmio_size            0x80
+
+enum DAC960_LA_reg_offset {
+       DAC960_LA_IRQMASK_OFFSET        = 0x34,
+       DAC960_LA_CMDOP_OFFSET          = 0x50,
+       DAC960_LA_CMDID_OFFSET          = 0x51,
+       DAC960_LA_MBOX2_OFFSET          = 0x52,
+       DAC960_LA_MBOX3_OFFSET          = 0x53,
+       DAC960_LA_MBOX4_OFFSET          = 0x54,
+       DAC960_LA_MBOX5_OFFSET          = 0x55,
+       DAC960_LA_MBOX6_OFFSET          = 0x56,
+       DAC960_LA_MBOX7_OFFSET          = 0x57,
+       DAC960_LA_MBOX8_OFFSET          = 0x58,
+       DAC960_LA_MBOX9_OFFSET          = 0x59,
+       DAC960_LA_MBOX10_OFFSET         = 0x5A,
+       DAC960_LA_MBOX11_OFFSET         = 0x5B,
+       DAC960_LA_MBOX12_OFFSET         = 0x5C,
+       DAC960_LA_STSID_OFFSET          = 0x5D,
+       DAC960_LA_STS_OFFSET            = 0x5E,
+       DAC960_LA_IDB_OFFSET            = 0x60,
+       DAC960_LA_ODB_OFFSET            = 0x61,
+       DAC960_LA_ERRSTS_OFFSET         = 0x63,
+};
+
+/*
+ * DAC960 LA Series Inbound Door Bell Register.
+ */
+#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LA_IDB_GEN_IRQ 0x04
+#define DAC960_LA_IDB_CTRL_RESET 0x08
+#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_LA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 LA Series Outbound Door Bell Register.
+ */
+#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LA Series Interrupt Mask Register.
+ */
+#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LA Series Error Status Register.
+ */
+#define DAC960_LA_ERRSTS_PENDING 0x02
+
+/*
+ * DAC960 PG Series Controller Interface Register Offsets.
+ */
+#define DAC960_PG_mmio_size            0x2000
+
+enum DAC960_PG_reg_offset {
+       DAC960_PG_IDB_OFFSET            = 0x0020,
+       DAC960_PG_ODB_OFFSET            = 0x002C,
+       DAC960_PG_IRQMASK_OFFSET        = 0x0034,
+       DAC960_PG_CMDOP_OFFSET          = 0x1000,
+       DAC960_PG_CMDID_OFFSET          = 0x1001,
+       DAC960_PG_MBOX2_OFFSET          = 0x1002,
+       DAC960_PG_MBOX3_OFFSET          = 0x1003,
+       DAC960_PG_MBOX4_OFFSET          = 0x1004,
+       DAC960_PG_MBOX5_OFFSET          = 0x1005,
+       DAC960_PG_MBOX6_OFFSET          = 0x1006,
+       DAC960_PG_MBOX7_OFFSET          = 0x1007,
+       DAC960_PG_MBOX8_OFFSET          = 0x1008,
+       DAC960_PG_MBOX9_OFFSET          = 0x1009,
+       DAC960_PG_MBOX10_OFFSET         = 0x100A,
+       DAC960_PG_MBOX11_OFFSET         = 0x100B,
+       DAC960_PG_MBOX12_OFFSET         = 0x100C,
+       DAC960_PG_STSID_OFFSET          = 0x1018,
+       DAC960_PG_STS_OFFSET            = 0x101A,
+       DAC960_PG_ERRSTS_OFFSET         = 0x103F,
+};
+
+/*
+ * DAC960 PG Series Inbound Door Bell Register.
+ */
+#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PG_IDB_GEN_IRQ 0x04
+#define DAC960_PG_IDB_CTRL_RESET 0x08
+#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_PG_IDB_HWMBOX_FULL 0x01
+#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PG Series Outbound Door Bell Register.
+ */
+#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 PG Series Interrupt Mask Register.
+ */
+#define DAC960_PG_IRQMASK_MSI_MASK1 0x03
+#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8
+
+/*
+ * DAC960 PG Series Error Status Register.
+ */
+#define DAC960_PG_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 PD Series Controller Interface Register Offsets.
+ */
+#define DAC960_PD_mmio_size            0x80
+
+enum DAC960_PD_reg_offset {
+       DAC960_PD_CMDOP_OFFSET          = 0x00,
+       DAC960_PD_CMDID_OFFSET          = 0x01,
+       DAC960_PD_MBOX2_OFFSET          = 0x02,
+       DAC960_PD_MBOX3_OFFSET          = 0x03,
+       DAC960_PD_MBOX4_OFFSET          = 0x04,
+       DAC960_PD_MBOX5_OFFSET          = 0x05,
+       DAC960_PD_MBOX6_OFFSET          = 0x06,
+       DAC960_PD_MBOX7_OFFSET          = 0x07,
+       DAC960_PD_MBOX8_OFFSET          = 0x08,
+       DAC960_PD_MBOX9_OFFSET          = 0x09,
+       DAC960_PD_MBOX10_OFFSET         = 0x0A,
+       DAC960_PD_MBOX11_OFFSET         = 0x0B,
+       DAC960_PD_MBOX12_OFFSET         = 0x0C,
+       DAC960_PD_STSID_OFFSET          = 0x0D,
+       DAC960_PD_STS_OFFSET            = 0x0E,
+       DAC960_PD_ERRSTS_OFFSET         = 0x3F,
+       DAC960_PD_IDB_OFFSET            = 0x40,
+       DAC960_PD_ODB_OFFSET            = 0x41,
+       DAC960_PD_IRQEN_OFFSET          = 0x43,
+};
+
+/*
+ * DAC960 PD Series Inbound Door Bell Register.
+ */
+#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PD_IDB_GEN_IRQ 0x04
+#define DAC960_PD_IDB_CTRL_RESET 0x08
+
+#define DAC960_PD_IDB_HWMBOX_FULL 0x01
+#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PD Series Outbound Door Bell Register.
+ */
+#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01
+
+/*
+ * DAC960 PD Series Interrupt Enable Register.
+ */
+#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01
+
+/*
+ * DAC960 PD Series Error Status Register.
+ */
+#define DAC960_PD_ERRSTS_PENDING 0x04
+
+typedef int (*myrb_hw_init_t)(struct pci_dev *pdev,
+                             struct myrb_hba *cb, void __iomem *base);
+typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev,
+                                          void __iomem *base,
+                                          union myrb_cmd_mbox *mbox);
+
+struct myrb_privdata {
+       myrb_hw_init_t          hw_init;
+       irq_handler_t           irq_handler;
+       unsigned int            mmio_size;
+};
+
+#endif /* MYRB_H */
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
new file mode 100644 (file)
index 0000000..0264a2e
--- /dev/null
@@ -0,0 +1,3268 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrs.h"
+
+static struct raid_template *myrs_raid_template;
+
+static struct myrs_devstate_name_entry {
+       enum myrs_devstate state;
+       char *name;
+} myrs_devstate_name_list[] = {
+       { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
+       { MYRS_DEVICE_ONLINE, "Online" },
+       { MYRS_DEVICE_REBUILD, "Rebuild" },
+       { MYRS_DEVICE_MISSING, "Missing" },
+       { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
+       { MYRS_DEVICE_OFFLINE, "Offline" },
+       { MYRS_DEVICE_CRITICAL, "Critical" },
+       { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
+       { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
+       { MYRS_DEVICE_STANDBY, "Standby" },
+       { MYRS_DEVICE_INVALID_STATE, "Invalid" },
+};
+
+static char *myrs_devstate_name(enum myrs_devstate state)
+{
+       struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
+               if (entry[i].state == state)
+                       return entry[i].name;
+       }
+       return NULL;
+}
+
+static struct myrs_raid_level_name_entry {
+       enum myrs_raid_level level;
+       char *name;
+} myrs_raid_level_name_list[] = {
+       { MYRS_RAID_LEVEL0, "RAID0" },
+       { MYRS_RAID_LEVEL1, "RAID1" },
+       { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
+       { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
+       { MYRS_RAID_LEVEL6, "RAID6" },
+       { MYRS_RAID_JBOD, "JBOD" },
+       { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
+       { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
+       { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
+       { MYRS_RAID_SPAN, "Mylex SPAN" },
+       { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
+       { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
+       { MYRS_RAID_PHYSICAL, "Physical device" },
+};
+
+static char *myrs_raid_level_name(enum myrs_raid_level level)
+{
+       struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
+               if (entry[i].level == level)
+                       return entry[i].name;
+       }
+       return NULL;
+}
+
+/**
+ * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
+ */
+static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
+{
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+
+       memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+       cmd_blk->status = 0;
+}
+
+/**
+ * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
+ */
+static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+       void __iomem *base = cs->io_base;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
+
+       cs->write_cmd_mbox(next_mbox, mbox);
+
+       if (cs->prev_cmd_mbox1->words[0] == 0 ||
+           cs->prev_cmd_mbox2->words[0] == 0)
+               cs->get_cmd_mbox(base);
+
+       cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
+       cs->prev_cmd_mbox1 = next_mbox;
+
+       if (++next_mbox > cs->last_cmd_mbox)
+               next_mbox = cs->first_cmd_mbox;
+
+       cs->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrs_exec_cmd - executes V2 Command and waits for completion.
+ */
+static void myrs_exec_cmd(struct myrs_hba *cs,
+               struct myrs_cmdblk *cmd_blk)
+{
+       DECLARE_COMPLETION_ONSTACK(complete);
+       unsigned long flags;
+
+       cmd_blk->complete = &complete;
+       spin_lock_irqsave(&cs->queue_lock, flags);
+       myrs_qcmd(cs, cmd_blk);
+       spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+       WARN_ON(in_interrupt());
+       wait_for_completion(&complete);
+}
+
+/**
+ * myrs_report_progress - prints progress message
+ */
+static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
+               unsigned char *msg, unsigned long blocks,
+               unsigned long size)
+{
+       shost_printk(KERN_INFO, cs->host,
+                    "Logical Drive %d: %s in Progress: %d%% completed\n",
+                    ldev_num, msg,
+                    (100 * (int)(blocks >> 7)) / (int)(size >> 7));
+}
+
+/**
+ * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
+ */
+static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
+{
+       struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       dma_addr_t ctlr_info_addr;
+       union myrs_sgl *sgl;
+       unsigned char status;
+       struct myrs_ctlr_info old;
+
+       memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
+       ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
+                                       sizeof(struct myrs_ctlr_info),
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
+               return MYRS_STATUS_FAILED;
+
+       mutex_lock(&cs->dcmd_mutex);
+       myrs_reset_cmd(cmd_blk);
+       mbox->ctlr_info.id = MYRS_DCMD_TAG;
+       mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->ctlr_info.control.dma_ctrl_to_host = true;
+       mbox->ctlr_info.control.no_autosense = true;
+       mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
+       mbox->ctlr_info.ctlr_num = 0;
+       mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
+       sgl = &mbox->ctlr_info.dma_addr;
+       sgl->sge[0].sge_addr = ctlr_info_addr;
+       sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+       dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
+                        sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
+       if (status == MYRS_STATUS_SUCCESS) {
+               if (cs->ctlr_info->bg_init_active +
+                   cs->ctlr_info->ldev_init_active +
+                   cs->ctlr_info->pdev_init_active +
+                   cs->ctlr_info->cc_active +
+                   cs->ctlr_info->rbld_active +
+                   cs->ctlr_info->exp_active != 0)
+                       cs->needs_update = true;
+               if (cs->ctlr_info->ldev_present != old.ldev_present ||
+                   cs->ctlr_info->ldev_critical != old.ldev_critical ||
+                   cs->ctlr_info->ldev_offline != old.ldev_offline)
+                       shost_printk(KERN_INFO, cs->host,
+                                    "Logical drive count changes (%d/%d/%d)\n",
+                                    cs->ctlr_info->ldev_critical,
+                                    cs->ctlr_info->ldev_offline,
+                                    cs->ctlr_info->ldev_present);
+       }
+
+       return status;
+}
+
+/**
+ * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
+ */
+static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
+               unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
+{
+       struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       dma_addr_t ldev_info_addr;
+       struct myrs_ldev_info ldev_info_orig;
+       union myrs_sgl *sgl;
+       unsigned char status;
+
+       memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
+       ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
+                                       sizeof(struct myrs_ldev_info),
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
+               return MYRS_STATUS_FAILED;
+
+       mutex_lock(&cs->dcmd_mutex);
+       myrs_reset_cmd(cmd_blk);
+       mbox->ldev_info.id = MYRS_DCMD_TAG;
+       mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->ldev_info.control.dma_ctrl_to_host = true;
+       mbox->ldev_info.control.no_autosense = true;
+       mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
+       mbox->ldev_info.ldev.ldev_num = ldev_num;
+       mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
+       sgl = &mbox->ldev_info.dma_addr;
+       sgl->sge[0].sge_addr = ldev_info_addr;
+       sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
+       dev_dbg(&cs->host->shost_gendev,
+               "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
+                        sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
+       if (status == MYRS_STATUS_SUCCESS) {
+               unsigned short ldev_num = ldev_info->ldev_num;
+               struct myrs_ldev_info *new = ldev_info;
+               struct myrs_ldev_info *old = &ldev_info_orig;
+               unsigned long ldev_size = new->cfg_devsize;
+
+               if (new->dev_state != old->dev_state) {
+                       const char *name;
+
+                       name = myrs_devstate_name(new->dev_state);
+                       shost_printk(KERN_INFO, cs->host,
+                                    "Logical Drive %d is now %s\n",
+                                    ldev_num, name ? name : "Invalid");
+               }
+               if ((new->soft_errs != old->soft_errs) ||
+                   (new->cmds_failed != old->cmds_failed) ||
+                   (new->deferred_write_errs != old->deferred_write_errs))
+                       shost_printk(KERN_INFO, cs->host,
+                                    "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
+                                    ldev_num, new->soft_errs,
+                                    new->cmds_failed,
+                                    new->deferred_write_errs);
+               if (new->bg_init_active)
+                       myrs_report_progress(cs, ldev_num,
+                                            "Background Initialization",
+                                            new->bg_init_lba, ldev_size);
+               else if (new->fg_init_active)
+                       myrs_report_progress(cs, ldev_num,
+                                            "Foreground Initialization",
+                                            new->fg_init_lba, ldev_size);
+               else if (new->migration_active)
+                       myrs_report_progress(cs, ldev_num,
+                                            "Data Migration",
+                                            new->migration_lba, ldev_size);
+               else if (new->patrol_active)
+                       myrs_report_progress(cs, ldev_num,
+                                            "Patrol Operation",
+                                            new->patrol_lba, ldev_size);
+               if (old->bg_init_active && !new->bg_init_active)
+                       shost_printk(KERN_INFO, cs->host,
+                                    "Logical Drive %d: Background Initialization %s\n",
+                                    ldev_num,
+                                    (new->ldev_control.ldev_init_done ?
+                                     "Completed" : "Failed"));
+       }
+       return status;
+}
+
+/**
+ * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
+ */
+static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
+               unsigned char channel, unsigned char target, unsigned char lun,
+               struct myrs_pdev_info *pdev_info)
+{
+       struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       dma_addr_t pdev_info_addr;
+       union myrs_sgl *sgl;
+       unsigned char status;
+
+       pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
+                                       sizeof(struct myrs_pdev_info),
+                                       DMA_FROM_DEVICE);
+       if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
+               return MYRS_STATUS_FAILED;
+
+       mutex_lock(&cs->dcmd_mutex);
+       myrs_reset_cmd(cmd_blk);
+       mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->pdev_info.id = MYRS_DCMD_TAG;
+       mbox->pdev_info.control.dma_ctrl_to_host = true;
+       mbox->pdev_info.control.no_autosense = true;
+       mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
+       mbox->pdev_info.pdev.lun = lun;
+       mbox->pdev_info.pdev.target = target;
+       mbox->pdev_info.pdev.channel = channel;
+       mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
+       sgl = &mbox->pdev_info.dma_addr;
+       sgl->sge[0].sge_addr = pdev_info_addr;
+       sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+       dev_dbg(&cs->host->shost_gendev,
+               "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
+               channel, target, lun);
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
+                        sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
+       return status;
+}
+
+/**
+ * myrs_dev_op - executes a "Device Operation" Command
+ */
+static unsigned char myrs_dev_op(struct myrs_hba *cs,
+               enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
+{
+       struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       unsigned char status;
+
+       mutex_lock(&cs->dcmd_mutex);
+       myrs_reset_cmd(cmd_blk);
+       mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->dev_op.id = MYRS_DCMD_TAG;
+       mbox->dev_op.control.dma_ctrl_to_host = true;
+       mbox->dev_op.control.no_autosense = true;
+       mbox->dev_op.ioctl_opcode = opcode;
+       mbox->dev_op.opdev = opdev;
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       return status;
+}
+
+/**
+ * myrs_translate_pdev - translates a Physical Device Channel and
+ * TargetID into a Logical Device.
+ */
+static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
+               unsigned char channel, unsigned char target, unsigned char lun,
+               struct myrs_devmap *devmap)
+{
+       struct pci_dev *pdev = cs->pdev;
+       dma_addr_t devmap_addr;
+       struct myrs_cmdblk *cmd_blk;
+       union myrs_cmd_mbox *mbox;
+       union myrs_sgl *sgl;
+       unsigned char status;
+
+       memset(devmap, 0x0, sizeof(struct myrs_devmap));
+       devmap_addr = dma_map_single(&pdev->dev, devmap,
+                                    sizeof(struct myrs_devmap),
+                                    DMA_FROM_DEVICE);
+       if (dma_mapping_error(&pdev->dev, devmap_addr))
+               return MYRS_STATUS_FAILED;
+
+       mutex_lock(&cs->dcmd_mutex);
+       cmd_blk = &cs->dcmd_blk;
+       mbox = &cmd_blk->mbox;
+       mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->pdev_info.control.dma_ctrl_to_host = true;
+       mbox->pdev_info.control.no_autosense = true;
+       mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
+       mbox->pdev_info.pdev.target = target;
+       mbox->pdev_info.pdev.channel = channel;
+       mbox->pdev_info.pdev.lun = lun;
+       mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
+       sgl = &mbox->pdev_info.dma_addr;
+       sgl->sge[0].sge_addr = devmap_addr;
+       sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       dma_unmap_single(&pdev->dev, devmap_addr,
+                        sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
+       return status;
+}
+
+/**
+ * myrs_get_event - executes a Get Event Command
+ */
+static unsigned char myrs_get_event(struct myrs_hba *cs,
+               unsigned int event_num, struct myrs_event *event_buf)
+{
+       struct pci_dev *pdev = cs->pdev;
+       dma_addr_t event_addr;
+       struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       union myrs_sgl *sgl;
+       unsigned char status;
+
+       event_addr = dma_map_single(&pdev->dev, event_buf,
+                                   sizeof(struct myrs_event), DMA_FROM_DEVICE);
+       if (dma_mapping_error(&pdev->dev, event_addr))
+               return MYRS_STATUS_FAILED;
+
+       mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->get_event.dma_size = sizeof(struct myrs_event);
+       mbox->get_event.evnum_upper = event_num >> 16;
+       mbox->get_event.ctlr_num = 0;
+       mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
+       mbox->get_event.evnum_lower = event_num & 0xFFFF;
+       sgl = &mbox->get_event.dma_addr;
+       sgl->sge[0].sge_addr = event_addr;
+       sgl->sge[0].sge_count = mbox->get_event.dma_size;
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       dma_unmap_single(&pdev->dev, event_addr,
+                        sizeof(struct myrs_event), DMA_FROM_DEVICE);
+
+       return status;
+}
+
+/*
+ * myrs_get_fwstatus - executes a Get Health Status Command
+ */
+static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
+{
+       struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       union myrs_sgl *sgl;
+       unsigned char status = cmd_blk->status;
+
+       myrs_reset_cmd(cmd_blk);
+       mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->common.id = MYRS_MCMD_TAG;
+       mbox->common.control.dma_ctrl_to_host = true;
+       mbox->common.control.no_autosense = true;
+       mbox->common.dma_size = sizeof(struct myrs_fwstat);
+       mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
+       sgl = &mbox->common.dma_addr;
+       sgl->sge[0].sge_addr = cs->fwstat_addr;
+       sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+       dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+
+       return status;
+}
+
+/**
+ * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
+ */
+static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+               enable_mbox_t enable_mbox_fn)
+{
+       void __iomem *base = cs->io_base;
+       struct pci_dev *pdev = cs->pdev;
+       union myrs_cmd_mbox *cmd_mbox;
+       struct myrs_stat_mbox *stat_mbox;
+       union myrs_cmd_mbox *mbox;
+       dma_addr_t mbox_addr;
+       unsigned char status = MYRS_STATUS_FAILED;
+
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+               if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+                       dev_err(&pdev->dev, "DMA mask out of range\n");
+                       return false;
+               }
+
+       /* Temporary dma mapping, used only in the scope of this function */
+       mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+                                 &mbox_addr, GFP_KERNEL);
+       if (dma_mapping_error(&pdev->dev, mbox_addr))
+               return false;
+
+       /* These are the base addresses for the command memory mailbox array */
+       cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
+       cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
+                                     &cs->cmd_mbox_addr, GFP_KERNEL);
+       if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+               dev_err(&pdev->dev, "Failed to map command mailbox\n");
+               goto out_free;
+       }
+       cs->first_cmd_mbox = cmd_mbox;
+       cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
+       cs->last_cmd_mbox = cmd_mbox;
+       cs->next_cmd_mbox = cs->first_cmd_mbox;
+       cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
+       cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
+
+       /* These are the base addresses for the status memory mailbox array */
+       cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
+       stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
+                                      &cs->stat_mbox_addr, GFP_KERNEL);
+       if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+               dev_err(&pdev->dev, "Failed to map status mailbox\n");
+               goto out_free;
+       }
+
+       cs->first_stat_mbox = stat_mbox;
+       stat_mbox += MYRS_MAX_STAT_MBOX - 1;
+       cs->last_stat_mbox = stat_mbox;
+       cs->next_stat_mbox = cs->first_stat_mbox;
+
+       cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
+                                           sizeof(struct myrs_fwstat),
+                                           &cs->fwstat_addr, GFP_KERNEL);
+       if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+               dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
+               cs->fwstat_buf = NULL;
+               goto out_free;
+       }
+       cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
+                               GFP_KERNEL | GFP_DMA);
+       if (!cs->ctlr_info)
+               goto out_free;
+
+       cs->event_buf = kzalloc(sizeof(struct myrs_event),
+                               GFP_KERNEL | GFP_DMA);
+       if (!cs->event_buf)
+               goto out_free;
+
+       /* Enable the Memory Mailbox Interface. */
+       memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+       mbox->set_mbox.id = 1;
+       mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->set_mbox.control.no_autosense = true;
+       mbox->set_mbox.first_cmd_mbox_size_kb =
+               (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
+       mbox->set_mbox.first_stat_mbox_size_kb =
+               (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
+       mbox->set_mbox.second_cmd_mbox_size_kb = 0;
+       mbox->set_mbox.second_stat_mbox_size_kb = 0;
+       mbox->set_mbox.sense_len = 0;
+       mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
+       mbox->set_mbox.fwstat_buf_size_kb = 1;
+       mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
+       mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
+       mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
+       status = enable_mbox_fn(base, mbox_addr);
+
+out_free:
+       dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+                         mbox, mbox_addr);
+       if (status != MYRS_STATUS_SUCCESS)
+               dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
+                       status);
+       return (status == MYRS_STATUS_SUCCESS);
+}
+
+/**
+ * myrs_get_config - reads the Configuration Information
+ */
+static int myrs_get_config(struct myrs_hba *cs)
+{
+       struct myrs_ctlr_info *info = cs->ctlr_info;
+       struct Scsi_Host *shost = cs->host;
+       unsigned char status;
+       unsigned char model[20];
+       unsigned char fw_version[12];
+       int i, model_len;
+
+       /* Get data into dma-able area, then copy into permanent location */
+       mutex_lock(&cs->cinfo_mutex);
+       status = myrs_get_ctlr_info(cs);
+       mutex_unlock(&cs->cinfo_mutex);
+       if (status != MYRS_STATUS_SUCCESS) {
+               shost_printk(KERN_ERR, shost,
+                            "Failed to get controller information\n");
+               return -ENODEV;
+       }
+
+       /* Initialize the Controller Model Name and Full Model Name fields. */
+       model_len = sizeof(info->ctlr_name);
+       if (model_len > sizeof(model)-1)
+               model_len = sizeof(model)-1;
+       memcpy(model, info->ctlr_name, model_len);
+       model_len--;
+       while (model[model_len] == ' ' || model[model_len] == '\0')
+               model_len--;
+       model[++model_len] = '\0';
+       strcpy(cs->model_name, "DAC960 ");
+       strcat(cs->model_name, model);
+       /* Initialize the Controller Firmware Version field. */
+       sprintf(fw_version, "%d.%02d-%02d",
+               info->fw_major_version, info->fw_minor_version,
+               info->fw_turn_number);
+       if (info->fw_major_version == 6 &&
+           info->fw_minor_version == 0 &&
+           info->fw_turn_number < 1) {
+               shost_printk(KERN_WARNING, shost,
+                       "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
+                       "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
+                       "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+                       fw_version);
+               return -ENODEV;
+       }
+       /* Initialize the Controller Channels and Targets. */
+       shost->max_channel = info->physchan_present + info->virtchan_present;
+       shost->max_id = info->max_targets[0];
+       for (i = 1; i < 16; i++) {
+               if (!info->max_targets[i])
+                       continue;
+               if (shost->max_id < info->max_targets[i])
+                       shost->max_id = info->max_targets[i];
+       }
+
+       /*
+        * Initialize the Controller Queue Depth, Driver Queue Depth,
+        * Logical Drive Count, Maximum Blocks per Command, Controller
+        * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+        * The Driver Queue Depth must be at most three less than
+        * the Controller Queue Depth; tag '1' is reserved for
+        * direct commands, and tag '2' for monitoring commands.
+        */
+       shost->can_queue = info->max_tcq - 3;
+       if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
+               shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
+       shost->max_sectors = info->max_transfer_size;
+       shost->sg_tablesize = info->max_sge;
+       if (shost->sg_tablesize > MYRS_SG_LIMIT)
+               shost->sg_tablesize = MYRS_SG_LIMIT;
+
+       shost_printk(KERN_INFO, shost,
+               "Configuring %s PCI RAID Controller\n", model);
+       shost_printk(KERN_INFO, shost,
+               "  Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+               fw_version, info->physchan_present, info->mem_size_mb);
+
+       shost_printk(KERN_INFO, shost,
+                    "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+                    shost->can_queue, shost->max_sectors);
+
+       shost_printk(KERN_INFO, shost,
+                    "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+                    shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
+       for (i = 0; i < info->physchan_max; i++) {
+               if (!info->max_targets[i])
+                       continue;
+               shost_printk(KERN_INFO, shost,
+                            "  Device Channel %d: max %d devices\n",
+                            i, info->max_targets[i]);
+       }
+       shost_printk(KERN_INFO, shost,
+                    "  Physical: %d/%d channels, %d disks, %d devices\n",
+                    info->physchan_present, info->physchan_max,
+                    info->pdisk_present, info->pdev_present);
+
+       shost_printk(KERN_INFO, shost,
+                    "  Logical: %d/%d channels, %d disks\n",
+                    info->virtchan_present, info->virtchan_max,
+                    info->ldev_present);
+       return 0;
+}
+
+/**
+ * myrs_log_event - prints a Controller Event message
+ */
+static struct {
+       int ev_code;
+       unsigned char *ev_msg;
+} myrs_ev_list[] = {
+       /* Physical Device Events (0x0000 - 0x007F) */
+       { 0x0001, "P Online" },
+       { 0x0002, "P Standby" },
+       { 0x0005, "P Automatic Rebuild Started" },
+       { 0x0006, "P Manual Rebuild Started" },
+       { 0x0007, "P Rebuild Completed" },
+       { 0x0008, "P Rebuild Cancelled" },
+       { 0x0009, "P Rebuild Failed for Unknown Reasons" },
+       { 0x000A, "P Rebuild Failed due to New Physical Device" },
+       { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+       { 0x000C, "S Offline" },
+       { 0x000D, "P Found" },
+       { 0x000E, "P Removed" },
+       { 0x000F, "P Unconfigured" },
+       { 0x0010, "P Expand Capacity Started" },
+       { 0x0011, "P Expand Capacity Completed" },
+       { 0x0012, "P Expand Capacity Failed" },
+       { 0x0013, "P Command Timed Out" },
+       { 0x0014, "P Command Aborted" },
+       { 0x0015, "P Command Retried" },
+       { 0x0016, "P Parity Error" },
+       { 0x0017, "P Soft Error" },
+       { 0x0018, "P Miscellaneous Error" },
+       { 0x0019, "P Reset" },
+       { 0x001A, "P Active Spare Found" },
+       { 0x001B, "P Warm Spare Found" },
+       { 0x001C, "S Sense Data Received" },
+       { 0x001D, "P Initialization Started" },
+       { 0x001E, "P Initialization Completed" },
+       { 0x001F, "P Initialization Failed" },
+       { 0x0020, "P Initialization Cancelled" },
+       { 0x0021, "P Failed because Write Recovery Failed" },
+       { 0x0022, "P Failed because SCSI Bus Reset Failed" },
+       { 0x0023, "P Failed because of Double Check Condition" },
+       { 0x0024, "P Failed because Device Cannot Be Accessed" },
+       { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+       { 0x0026, "P Failed because of Bad Tag from Device" },
+       { 0x0027, "P Failed because of Command Timeout" },
+       { 0x0028, "P Failed because of System Reset" },
+       { 0x0029, "P Failed because of Busy Status or Parity Error" },
+       { 0x002A, "P Failed because Host Set Device to Failed State" },
+       { 0x002B, "P Failed because of Selection Timeout" },
+       { 0x002C, "P Failed because of SCSI Bus Phase Error" },
+       { 0x002D, "P Failed because Device Returned Unknown Status" },
+       { 0x002E, "P Failed because Device Not Ready" },
+       { 0x002F, "P Failed because Device Not Found at Startup" },
+       { 0x0030, "P Failed because COD Write Operation Failed" },
+       { 0x0031, "P Failed because BDT Write Operation Failed" },
+       { 0x0039, "P Missing at Startup" },
+       { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+       { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+       { 0x003D, "P Standby Rebuild Started" },
+       /* Logical Device Events (0x0080 - 0x00FF) */
+       { 0x0080, "M Consistency Check Started" },
+       { 0x0081, "M Consistency Check Completed" },
+       { 0x0082, "M Consistency Check Cancelled" },
+       { 0x0083, "M Consistency Check Completed With Errors" },
+       { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+       { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+       { 0x0086, "L Offline" },
+       { 0x0087, "L Critical" },
+       { 0x0088, "L Online" },
+       { 0x0089, "M Automatic Rebuild Started" },
+       { 0x008A, "M Manual Rebuild Started" },
+       { 0x008B, "M Rebuild Completed" },
+       { 0x008C, "M Rebuild Cancelled" },
+       { 0x008D, "M Rebuild Failed for Unknown Reasons" },
+       { 0x008E, "M Rebuild Failed due to New Physical Device" },
+       { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+       { 0x0090, "M Initialization Started" },
+       { 0x0091, "M Initialization Completed" },
+       { 0x0092, "M Initialization Cancelled" },
+       { 0x0093, "M Initialization Failed" },
+       { 0x0094, "L Found" },
+       { 0x0095, "L Deleted" },
+       { 0x0096, "M Expand Capacity Started" },
+       { 0x0097, "M Expand Capacity Completed" },
+       { 0x0098, "M Expand Capacity Failed" },
+       { 0x0099, "L Bad Block Found" },
+       { 0x009A, "L Size Changed" },
+       { 0x009B, "L Type Changed" },
+       { 0x009C, "L Bad Data Block Found" },
+       { 0x009E, "L Read of Data Block in BDT" },
+       { 0x009F, "L Write Back Data for Disk Block Lost" },
+       { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+       { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+       { 0x00A2, "L Standby Rebuild Started" },
+       /* Fault Management Events (0x0100 - 0x017F) */
+       { 0x0140, "E Fan %d Failed" },
+       { 0x0141, "E Fan %d OK" },
+       { 0x0142, "E Fan %d Not Present" },
+       { 0x0143, "E Power Supply %d Failed" },
+       { 0x0144, "E Power Supply %d OK" },
+       { 0x0145, "E Power Supply %d Not Present" },
+       { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+       { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+       { 0x0148, "E Temperature Sensor %d Temperature Normal" },
+       { 0x0149, "E Temperature Sensor %d Not Present" },
+       { 0x014A, "E Enclosure Management Unit %d Access Critical" },
+       { 0x014B, "E Enclosure Management Unit %d Access OK" },
+       { 0x014C, "E Enclosure Management Unit %d Access Offline" },
+       /* Controller Events (0x0180 - 0x01FF) */
+       { 0x0181, "C Cache Write Back Error" },
+       { 0x0188, "C Battery Backup Unit Found" },
+       { 0x0189, "C Battery Backup Unit Charge Level Low" },
+       { 0x018A, "C Battery Backup Unit Charge Level OK" },
+       { 0x0193, "C Installation Aborted" },
+       { 0x0195, "C Battery Backup Unit Physically Removed" },
+       { 0x0196, "C Memory Error During Warm Boot" },
+       { 0x019E, "C Memory Soft ECC Error Corrected" },
+       { 0x019F, "C Memory Hard ECC Error Corrected" },
+       { 0x01A2, "C Battery Backup Unit Failed" },
+       { 0x01AB, "C Mirror Race Recovery Failed" },
+       { 0x01AC, "C Mirror Race on Critical Drive" },
+       /* Controller Internal Processor Events */
+       { 0x0380, "C Internal Controller Hung" },
+       { 0x0381, "C Internal Controller Firmware Breakpoint" },
+       { 0x0390, "C Internal Controller i960 Processor Specific Error" },
+       { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+       { 0, "" }
+};
+
+static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
+{
+       unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
+       int ev_idx = 0, ev_code;
+       unsigned char ev_type, *ev_msg;
+       struct Scsi_Host *shost = cs->host;
+       struct scsi_device *sdev;
+       struct scsi_sense_hdr sshdr;
+       unsigned char sense_info[4];
+       unsigned char cmd_specific[4];
+
+       if (ev->ev_code == 0x1C) {
+               if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
+                       memset(&sshdr, 0x0, sizeof(sshdr));
+                       memset(sense_info, 0x0, sizeof(sense_info));
+                       memset(cmd_specific, 0x0, sizeof(cmd_specific));
+               } else {
+                       memcpy(sense_info, &ev->sense_data[3], 4);
+                       memcpy(cmd_specific, &ev->sense_data[7], 4);
+               }
+       }
+       if (sshdr.sense_key == VENDOR_SPECIFIC &&
+           (sshdr.asc == 0x80 || sshdr.asc == 0x81))
+               ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
+       while (true) {
+               ev_code = myrs_ev_list[ev_idx].ev_code;
+               if (ev_code == ev->ev_code || ev_code == 0)
+                       break;
+               ev_idx++;
+       }
+       ev_type = myrs_ev_list[ev_idx].ev_msg[0];
+       ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
+       if (ev_code == 0) {
+               shost_printk(KERN_WARNING, shost,
+                            "Unknown Controller Event Code %04X\n",
+                            ev->ev_code);
+               return;
+       }
+       switch (ev_type) {
+       case 'P':
+               sdev = scsi_device_lookup(shost, ev->channel,
+                                         ev->target, 0);
+               sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
+                           ev->ev_seq, ev_msg);
+               if (sdev && sdev->hostdata &&
+                   sdev->channel < cs->ctlr_info->physchan_present) {
+                       struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+                       switch (ev->ev_code) {
+                       case 0x0001:
+                       case 0x0007:
+                               pdev_info->dev_state = MYRS_DEVICE_ONLINE;
+                               break;
+                       case 0x0002:
+                               pdev_info->dev_state = MYRS_DEVICE_STANDBY;
+                               break;
+                       case 0x000C:
+                               pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
+                               break;
+                       case 0x000E:
+                               pdev_info->dev_state = MYRS_DEVICE_MISSING;
+                               break;
+                       case 0x000F:
+                               pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
+                               break;
+                       }
+               }
+               break;
+       case 'L':
+               shost_printk(KERN_INFO, shost,
+                            "event %d: Logical Drive %d %s\n",
+                            ev->ev_seq, ev->lun, ev_msg);
+               cs->needs_update = true;
+               break;
+       case 'M':
+               shost_printk(KERN_INFO, shost,
+                            "event %d: Logical Drive %d %s\n",
+                            ev->ev_seq, ev->lun, ev_msg);
+               cs->needs_update = true;
+               break;
+       case 'S':
+               if (sshdr.sense_key == NO_SENSE ||
+                   (sshdr.sense_key == NOT_READY &&
+                    sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
+                                           sshdr.ascq == 0x02)))
+                       break;
+               shost_printk(KERN_INFO, shost,
+                            "event %d: Physical Device %d:%d %s\n",
+                            ev->ev_seq, ev->channel, ev->target, ev_msg);
+               shost_printk(KERN_INFO, shost,
+                            "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+                            ev->channel, ev->target,
+                            sshdr.sense_key, sshdr.asc, sshdr.ascq);
+               shost_printk(KERN_INFO, shost,
+                            "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
+                            ev->channel, ev->target,
+                            sense_info[0], sense_info[1],
+                            sense_info[2], sense_info[3],
+                            cmd_specific[0], cmd_specific[1],
+                            cmd_specific[2], cmd_specific[3]);
+               break;
+       case 'E':
+               if (cs->disable_enc_msg)
+                       break;
+               sprintf(msg_buf, ev_msg, ev->lun);
+               shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
+                            ev->ev_seq, ev->target, msg_buf);
+               break;
+       case 'C':
+               shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
+                            ev->ev_seq, ev_msg);
+               break;
+       default:
+               shost_printk(KERN_INFO, shost,
+                            "event %d: Unknown Event Code %04X\n",
+                            ev->ev_seq, ev->ev_code);
+               break;
+       }
+}
+
+/*
+ * SCSI sysfs interface functions
+ */
+static ssize_t raid_state_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       int ret;
+
+       if (!sdev->hostdata)
+               return snprintf(buf, 16, "Unknown\n");
+
+       if (sdev->channel >= cs->ctlr_info->physchan_present) {
+               struct myrs_ldev_info *ldev_info = sdev->hostdata;
+               const char *name;
+
+               name = myrs_devstate_name(ldev_info->dev_state);
+               if (name)
+                       ret = snprintf(buf, 32, "%s\n", name);
+               else
+                       ret = snprintf(buf, 32, "Invalid (%02X)\n",
+                                      ldev_info->dev_state);
+       } else {
+               struct myrs_pdev_info *pdev_info;
+               const char *name;
+
+               pdev_info = sdev->hostdata;
+               name = myrs_devstate_name(pdev_info->dev_state);
+               if (name)
+                       ret = snprintf(buf, 32, "%s\n", name);
+               else
+                       ret = snprintf(buf, 32, "Invalid (%02X)\n",
+                                      pdev_info->dev_state);
+       }
+       return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_cmdblk *cmd_blk;
+       union myrs_cmd_mbox *mbox;
+       enum myrs_devstate new_state;
+       unsigned short ldev_num;
+       unsigned char status;
+
+       if (!strncmp(buf, "offline", 7) ||
+           !strncmp(buf, "kill", 4))
+               new_state = MYRS_DEVICE_OFFLINE;
+       else if (!strncmp(buf, "online", 6))
+               new_state = MYRS_DEVICE_ONLINE;
+       else if (!strncmp(buf, "standby", 7))
+               new_state = MYRS_DEVICE_STANDBY;
+       else
+               return -EINVAL;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present) {
+               struct myrs_pdev_info *pdev_info = sdev->hostdata;
+               struct myrs_devmap *pdev_devmap =
+                       (struct myrs_devmap *)&pdev_info->rsvd13;
+
+               if (pdev_info->dev_state == new_state) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Device already in %s\n",
+                                   myrs_devstate_name(new_state));
+                       return count;
+               }
+               status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
+                                            sdev->lun, pdev_devmap);
+               if (status != MYRS_STATUS_SUCCESS)
+                       return -ENXIO;
+               ldev_num = pdev_devmap->ldev_num;
+       } else {
+               struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+               if (ldev_info->dev_state == new_state) {
+                       sdev_printk(KERN_INFO, sdev,
+                                   "Device already in %s\n",
+                                   myrs_devstate_name(new_state));
+                       return count;
+               }
+               ldev_num = ldev_info->ldev_num;
+       }
+       mutex_lock(&cs->dcmd_mutex);
+       cmd_blk = &cs->dcmd_blk;
+       myrs_reset_cmd(cmd_blk);
+       mbox = &cmd_blk->mbox;
+       mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->common.id = MYRS_DCMD_TAG;
+       mbox->common.control.dma_ctrl_to_host = true;
+       mbox->common.control.no_autosense = true;
+       mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
+       mbox->set_devstate.state = new_state;
+       mbox->set_devstate.ldev.ldev_num = ldev_num;
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       if (status == MYRS_STATUS_SUCCESS) {
+               if (sdev->channel < cs->ctlr_info->physchan_present) {
+                       struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+                       pdev_info->dev_state = new_state;
+               } else {
+                       struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+                       ldev_info->dev_state = new_state;
+               }
+               sdev_printk(KERN_INFO, sdev,
+                           "Set device state to %s\n",
+                           myrs_devstate_name(new_state));
+               return count;
+       }
+       sdev_printk(KERN_INFO, sdev,
+                   "Failed to set device state to %s, status 0x%02x\n",
+                   myrs_devstate_name(new_state), status);
+       return -EINVAL;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       const char *name = NULL;
+
+       if (!sdev->hostdata)
+               return snprintf(buf, 16, "Unknown\n");
+
+       if (sdev->channel >= cs->ctlr_info->physchan_present) {
+               struct myrs_ldev_info *ldev_info;
+
+               ldev_info = sdev->hostdata;
+               name = myrs_raid_level_name(ldev_info->raid_level);
+               if (!name)
+                       return snprintf(buf, 32, "Invalid (%02X)\n",
+                                       ldev_info->dev_state);
+
+       } else
+               name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+
+       return snprintf(buf, 32, "%s\n", name);
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info;
+       unsigned short ldev_num;
+       unsigned char status;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present)
+               return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+       ldev_info = sdev->hostdata;
+       ldev_num = ldev_info->ldev_num;
+       status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+       if (status != MYRS_STATUS_SUCCESS) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Failed to get device information, status 0x%02x\n",
+                           status);
+               return -EIO;
+       }
+       if (ldev_info->rbld_active) {
+               return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+                               (size_t)ldev_info->rbld_lba,
+                               (size_t)ldev_info->cfg_devsize);
+       } else
+               return snprintf(buf, 32, "not rebuilding\n");
+}
+
+static ssize_t rebuild_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info;
+       struct myrs_cmdblk *cmd_blk;
+       union myrs_cmd_mbox *mbox;
+       unsigned short ldev_num;
+       unsigned char status;
+       int rebuild, ret;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present)
+               return -EINVAL;
+
+       ldev_info = sdev->hostdata;
+       if (!ldev_info)
+               return -ENXIO;
+       ldev_num = ldev_info->ldev_num;
+
+       ret = kstrtoint(buf, 0, &rebuild);
+       if (ret)
+               return ret;
+
+       status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+       if (status != MYRS_STATUS_SUCCESS) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Failed to get device information, status 0x%02x\n",
+                           status);
+               return -EIO;
+       }
+
+       if (rebuild && ldev_info->rbld_active) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Rebuild Not Initiated; already in progress\n");
+               return -EALREADY;
+       }
+       if (!rebuild && !ldev_info->rbld_active) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Rebuild Not Cancelled; no rebuild in progress\n");
+               return count;
+       }
+
+       mutex_lock(&cs->dcmd_mutex);
+       cmd_blk = &cs->dcmd_blk;
+       myrs_reset_cmd(cmd_blk);
+       mbox = &cmd_blk->mbox;
+       mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->common.id = MYRS_DCMD_TAG;
+       mbox->common.control.dma_ctrl_to_host = true;
+       mbox->common.control.no_autosense = true;
+       if (rebuild) {
+               mbox->ldev_info.ldev.ldev_num = ldev_num;
+               mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
+       } else {
+               mbox->ldev_info.ldev.ldev_num = ldev_num;
+               mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
+       }
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       if (status) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Rebuild Not %s, status 0x%02x\n",
+                           rebuild ? "Initiated" : "Cancelled", status);
+               ret = -EIO;
+       } else {
+               sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+                           rebuild ? "Initiated" : "Cancelled");
+               ret = count;
+       }
+
+       return ret;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info;
+       unsigned short ldev_num;
+       unsigned char status;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present)
+               return snprintf(buf, 32, "physical device - not checking\n");
+
+       ldev_info = sdev->hostdata;
+       if (!ldev_info)
+               return -ENXIO;
+       ldev_num = ldev_info->ldev_num;
+       status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+       if (ldev_info->cc_active)
+               return snprintf(buf, 32, "checking block %zu of %zu\n",
+                               (size_t)ldev_info->cc_lba,
+                               (size_t)ldev_info->cfg_devsize);
+       else
+               return snprintf(buf, 32, "not checking\n");
+}
+
+static ssize_t consistency_check_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info;
+       struct myrs_cmdblk *cmd_blk;
+       union myrs_cmd_mbox *mbox;
+       unsigned short ldev_num;
+       unsigned char status;
+       int check, ret;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present)
+               return -EINVAL;
+
+       ldev_info = sdev->hostdata;
+       if (!ldev_info)
+               return -ENXIO;
+       ldev_num = ldev_info->ldev_num;
+
+       ret = kstrtoint(buf, 0, &check);
+       if (ret)
+               return ret;
+
+       status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+       if (status != MYRS_STATUS_SUCCESS) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Failed to get device information, status 0x%02x\n",
+                           status);
+               return -EIO;
+       }
+       if (check && ldev_info->cc_active) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Consistency Check Not Initiated; "
+                           "already in progress\n");
+               return -EALREADY;
+       }
+       if (!check && !ldev_info->cc_active) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Consistency Check Not Cancelled; "
+                           "check not in progress\n");
+               return count;
+       }
+
+       mutex_lock(&cs->dcmd_mutex);
+       cmd_blk = &cs->dcmd_blk;
+       myrs_reset_cmd(cmd_blk);
+       mbox = &cmd_blk->mbox;
+       mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->common.id = MYRS_DCMD_TAG;
+       mbox->common.control.dma_ctrl_to_host = true;
+       mbox->common.control.no_autosense = true;
+       if (check) {
+               mbox->cc.ldev.ldev_num = ldev_num;
+               mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
+               mbox->cc.restore_consistency = true;
+               mbox->cc.initialized_area_only = false;
+       } else {
+               mbox->cc.ldev.ldev_num = ldev_num;
+               mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
+       }
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       if (status != MYRS_STATUS_SUCCESS) {
+               sdev_printk(KERN_INFO, sdev,
+                           "Consistency Check Not %s, status 0x%02x\n",
+                           check ? "Initiated" : "Cancelled", status);
+               ret = -EIO;
+       } else {
+               sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
+                           check ? "Initiated" : "Cancelled");
+               ret = count;
+       }
+
+       return ret;
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static struct device_attribute *myrs_sdev_attrs[] = {
+       &dev_attr_consistency_check,
+       &dev_attr_rebuild,
+       &dev_attr_raid_state,
+       &dev_attr_raid_level,
+       NULL,
+};
+
+static ssize_t serial_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+       char serial[17];
+
+       memcpy(serial, cs->ctlr_info->serial_number, 16);
+       serial[16] = '\0';
+       return snprintf(buf, 16, "%s\n", serial);
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t ctlr_num_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+
+       return snprintf(buf, 20, "%d\n", cs->host->host_no);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static struct myrs_cpu_type_tbl {
+       enum myrs_cpu_type type;
+       char *name;
+} myrs_cpu_type_names[] = {
+       { MYRS_CPUTYPE_i960CA, "i960CA" },
+       { MYRS_CPUTYPE_i960RD, "i960RD" },
+       { MYRS_CPUTYPE_i960RN, "i960RN" },
+       { MYRS_CPUTYPE_i960RP, "i960RP" },
+       { MYRS_CPUTYPE_NorthBay, "NorthBay" },
+       { MYRS_CPUTYPE_StrongArm, "StrongARM" },
+       { MYRS_CPUTYPE_i960RM, "i960RM" },
+};
+
+static ssize_t processor_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+       struct myrs_cpu_type_tbl *tbl;
+       const char *first_processor = NULL;
+       const char *second_processor = NULL;
+       struct myrs_ctlr_info *info = cs->ctlr_info;
+       ssize_t ret;
+       int i;
+
+       if (info->cpu[0].cpu_count) {
+               tbl = myrs_cpu_type_names;
+               for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+                       if (tbl[i].type == info->cpu[0].cpu_type) {
+                               first_processor = tbl[i].name;
+                               break;
+                       }
+               }
+       }
+       if (info->cpu[1].cpu_count) {
+               tbl = myrs_cpu_type_names;
+               for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+                       if (tbl[i].type == info->cpu[1].cpu_type) {
+                               second_processor = tbl[i].name;
+                               break;
+                       }
+               }
+       }
+       if (first_processor && second_processor)
+               ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
+                              "2: %s (%s, %d cpus)\n",
+                              info->cpu[0].cpu_name,
+                              first_processor, info->cpu[0].cpu_count,
+                              info->cpu[1].cpu_name,
+                              second_processor, info->cpu[1].cpu_count);
+       else if (first_processor && !second_processor)
+               ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
+                              info->cpu[0].cpu_name,
+                              first_processor, info->cpu[0].cpu_count);
+       else if (!first_processor && second_processor)
+               ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
+                              info->cpu[1].cpu_name,
+                              second_processor, info->cpu[1].cpu_count);
+       else
+               ret = snprintf(buf, 64, "1: absent\n2: absent\n");
+
+       return ret;
+}
+static DEVICE_ATTR_RO(processor);
+
+static ssize_t model_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+
+       return snprintf(buf, 28, "%s\n", cs->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t ctlr_type_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+
+       return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
+}
+static DEVICE_ATTR_RO(ctlr_type);
+
+static ssize_t cache_size_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+
+       return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t firmware_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+
+       return snprintf(buf, 16, "%d.%02d-%02d\n",
+                       cs->ctlr_info->fw_major_version,
+                       cs->ctlr_info->fw_minor_version,
+                       cs->ctlr_info->fw_turn_number);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t discovery_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+       struct myrs_cmdblk *cmd_blk;
+       union myrs_cmd_mbox *mbox;
+       unsigned char status;
+
+       mutex_lock(&cs->dcmd_mutex);
+       cmd_blk = &cs->dcmd_blk;
+       myrs_reset_cmd(cmd_blk);
+       mbox = &cmd_blk->mbox;
+       mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+       mbox->common.id = MYRS_DCMD_TAG;
+       mbox->common.control.dma_ctrl_to_host = true;
+       mbox->common.control.no_autosense = true;
+       mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
+       myrs_exec_cmd(cs, cmd_blk);
+       status = cmd_blk->status;
+       mutex_unlock(&cs->dcmd_mutex);
+       if (status != MYRS_STATUS_SUCCESS) {
+               shost_printk(KERN_INFO, shost,
+                            "Discovery Not Initiated, status %02X\n",
+                            status);
+               return -EINVAL;
+       }
+       shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
+       cs->next_evseq = 0;
+       cs->needs_update = true;
+       queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+       flush_delayed_work(&cs->monitor_work);
+       shost_printk(KERN_INFO, shost, "Discovery Completed\n");
+
+       return count;
+}
+static DEVICE_ATTR_WO(discovery);
+
+static ssize_t flush_cache_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+       unsigned char status;
+
+       status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
+                            MYRS_RAID_CONTROLLER);
+       if (status == MYRS_STATUS_SUCCESS) {
+               shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
+               return count;
+       }
+       shost_printk(KERN_INFO, shost,
+                    "Cache Flush failed, status 0x%02x\n", status);
+       return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static ssize_t disable_enclosure_messages_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct myrs_hba *cs = shost_priv(shost);
+
+       return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
+}
+
+static ssize_t disable_enclosure_messages_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       int value, ret;
+
+       ret = kstrtoint(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       if (value > 2)
+               return -EINVAL;
+
+       cs->disable_enc_msg = value;
+       return count;
+}
+static DEVICE_ATTR_RW(disable_enclosure_messages);
+
+static struct device_attribute *myrs_shost_attrs[] = {
+       &dev_attr_serial,
+       &dev_attr_ctlr_num,
+       &dev_attr_processor,
+       &dev_attr_model,
+       &dev_attr_ctlr_type,
+       &dev_attr_cache_size,
+       &dev_attr_firmware,
+       &dev_attr_discovery,
+       &dev_attr_flush_cache,
+       &dev_attr_disable_enclosure_messages,
+       NULL,
+};
+
+/*
+ * SCSI midlayer interface
+ */
+int myrs_host_reset(struct scsi_cmnd *scmd)
+{
+       struct Scsi_Host *shost = scmd->device->host;
+       struct myrs_hba *cs = shost_priv(shost);
+
+       cs->reset(cs->io_base);
+       return SUCCESS;
+}
+
+static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
+               struct myrs_ldev_info *ldev_info)
+{
+       unsigned char modes[32], *mode_pg;
+       bool dbd;
+       size_t mode_len;
+
+       dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+       if (dbd) {
+               mode_len = 24;
+               mode_pg = &modes[4];
+       } else {
+               mode_len = 32;
+               mode_pg = &modes[12];
+       }
+       memset(modes, 0, sizeof(modes));
+       modes[0] = mode_len - 1;
+       modes[2] = 0x10; /* Enable FUA */
+       if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
+               modes[2] |= 0x80;
+       if (!dbd) {
+               unsigned char *block_desc = &modes[4];
+
+               modes[3] = 8;
+               put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
+               put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
+       }
+       mode_pg[0] = 0x08;
+       mode_pg[1] = 0x12;
+       if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
+               mode_pg[2] |= 0x01;
+       if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+           ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+               mode_pg[2] |= 0x04;
+       if (ldev_info->cacheline_size) {
+               mode_pg[2] |= 0x08;
+               put_unaligned_be16(1 << ldev_info->cacheline_size,
+                                  &mode_pg[14]);
+       }
+
+       scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static int myrs_queuecommand(struct Scsi_Host *shost,
+               struct scsi_cmnd *scmd)
+{
+       struct myrs_hba *cs = shost_priv(shost);
+       struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+       union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+       struct scsi_device *sdev = scmd->device;
+       union myrs_sgl *hw_sge;
+       dma_addr_t sense_addr;
+       struct scatterlist *sgl;
+       unsigned long flags, timeout;
+       int nsge;
+
+       if (!scmd->device->hostdata) {
+               scmd->result = (DID_NO_CONNECT << 16);
+               scmd->scsi_done(scmd);
+               return 0;
+       }
+
+       switch (scmd->cmnd[0]) {
+       case REPORT_LUNS:
+               scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+                                       0x20, 0x0);
+               scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+               scmd->scsi_done(scmd);
+               return 0;
+       case MODE_SENSE:
+               if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+                       struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+                       if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+                           (scmd->cmnd[2] & 0x3F) != 0x08) {
+                               /* Illegal request, invalid field in CDB */
+                               scsi_build_sense_buffer(0, scmd->sense_buffer,
+                                       ILLEGAL_REQUEST, 0x24, 0);
+                               scmd->result = (DRIVER_SENSE << 24) |
+                                       SAM_STAT_CHECK_CONDITION;
+                       } else {
+                               myrs_mode_sense(cs, scmd, ldev_info);
+                               scmd->result = (DID_OK << 16);
+                       }
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+               break;
+       }
+
+       myrs_reset_cmd(cmd_blk);
+       cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
+                                       &sense_addr);
+       if (!cmd_blk->sense)
+               return SCSI_MLQUEUE_HOST_BUSY;
+       cmd_blk->sense_addr = sense_addr;
+
+       timeout = scmd->request->timeout;
+       if (scmd->cmd_len <= 10) {
+               if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+                       struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+                       mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
+                       mbox->SCSI_10.pdev.lun = ldev_info->lun;
+                       mbox->SCSI_10.pdev.target = ldev_info->target;
+                       mbox->SCSI_10.pdev.channel = ldev_info->channel;
+                       mbox->SCSI_10.pdev.ctlr = 0;
+               } else {
+                       mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
+                       mbox->SCSI_10.pdev.lun = sdev->lun;
+                       mbox->SCSI_10.pdev.target = sdev->id;
+                       mbox->SCSI_10.pdev.channel = sdev->channel;
+               }
+               mbox->SCSI_10.id = scmd->request->tag + 3;
+               mbox->SCSI_10.control.dma_ctrl_to_host =
+                       (scmd->sc_data_direction == DMA_FROM_DEVICE);
+               if (scmd->request->cmd_flags & REQ_FUA)
+                       mbox->SCSI_10.control.fua = true;
+               mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
+               mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
+               mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
+               mbox->SCSI_10.cdb_len = scmd->cmd_len;
+               if (timeout > 60) {
+                       mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+                       mbox->SCSI_10.tmo.tmo_val = timeout / 60;
+               } else {
+                       mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+                       mbox->SCSI_10.tmo.tmo_val = timeout;
+               }
+               memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
+               hw_sge = &mbox->SCSI_10.dma_addr;
+               cmd_blk->dcdb = NULL;
+       } else {
+               dma_addr_t dcdb_dma;
+
+               cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
+                                              &dcdb_dma);
+               if (!cmd_blk->dcdb) {
+                       dma_pool_free(cs->sense_pool, cmd_blk->sense,
+                                     cmd_blk->sense_addr);
+                       cmd_blk->sense = NULL;
+                       cmd_blk->sense_addr = 0;
+                       return SCSI_MLQUEUE_HOST_BUSY;
+               }
+               cmd_blk->dcdb_dma = dcdb_dma;
+               if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+                       struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+                       mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
+                       mbox->SCSI_255.pdev.lun = ldev_info->lun;
+                       mbox->SCSI_255.pdev.target = ldev_info->target;
+                       mbox->SCSI_255.pdev.channel = ldev_info->channel;
+                       mbox->SCSI_255.pdev.ctlr = 0;
+               } else {
+                       mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
+                       mbox->SCSI_255.pdev.lun = sdev->lun;
+                       mbox->SCSI_255.pdev.target = sdev->id;
+                       mbox->SCSI_255.pdev.channel = sdev->channel;
+               }
+               mbox->SCSI_255.id = scmd->request->tag + 3;
+               mbox->SCSI_255.control.dma_ctrl_to_host =
+                       (scmd->sc_data_direction == DMA_FROM_DEVICE);
+               if (scmd->request->cmd_flags & REQ_FUA)
+                       mbox->SCSI_255.control.fua = true;
+               mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
+               mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
+               mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
+               mbox->SCSI_255.cdb_len = scmd->cmd_len;
+               mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
+               if (timeout > 60) {
+                       mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+                       mbox->SCSI_255.tmo.tmo_val = timeout / 60;
+               } else {
+                       mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+                       mbox->SCSI_255.tmo.tmo_val = timeout;
+               }
+               memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
+               hw_sge = &mbox->SCSI_255.dma_addr;
+       }
+       if (scmd->sc_data_direction == DMA_NONE)
+               goto submit;
+       nsge = scsi_dma_map(scmd);
+       if (nsge == 1) {
+               sgl = scsi_sglist(scmd);
+               hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
+               hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
+       } else {
+               struct myrs_sge *hw_sgl;
+               dma_addr_t hw_sgl_addr;
+               int i;
+
+               if (nsge > 2) {
+                       hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
+                                               &hw_sgl_addr);
+                       if (WARN_ON(!hw_sgl)) {
+                               if (cmd_blk->dcdb) {
+                                       dma_pool_free(cs->dcdb_pool,
+                                                     cmd_blk->dcdb,
+                                                     cmd_blk->dcdb_dma);
+                                       cmd_blk->dcdb = NULL;
+                                       cmd_blk->dcdb_dma = 0;
+                               }
+                               dma_pool_free(cs->sense_pool,
+                                             cmd_blk->sense,
+                                             cmd_blk->sense_addr);
+                               cmd_blk->sense = NULL;
+                               cmd_blk->sense_addr = 0;
+                               return SCSI_MLQUEUE_HOST_BUSY;
+                       }
+                       cmd_blk->sgl = hw_sgl;
+                       cmd_blk->sgl_addr = hw_sgl_addr;
+                       if (scmd->cmd_len <= 10)
+                               mbox->SCSI_10.control.add_sge_mem = true;
+                       else
+                               mbox->SCSI_255.control.add_sge_mem = true;
+                       hw_sge->ext.sge0_len = nsge;
+                       hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
+               } else
+                       hw_sgl = hw_sge->sge;
+
+               scsi_for_each_sg(scmd, sgl, nsge, i) {
+                       if (WARN_ON(!hw_sgl)) {
+                               scsi_dma_unmap(scmd);
+                               scmd->result = (DID_ERROR << 16);
+                               scmd->scsi_done(scmd);
+                               return 0;
+                       }
+                       hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
+                       hw_sgl->sge_count = (u64)sg_dma_len(sgl);
+                       hw_sgl++;
+               }
+       }
+submit:
+       spin_lock_irqsave(&cs->queue_lock, flags);
+       myrs_qcmd(cs, cmd_blk);
+       spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+       return 0;
+}
+
+static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
+               struct scsi_device *sdev)
+{
+       unsigned short ldev_num;
+       unsigned int chan_offset =
+               sdev->channel - cs->ctlr_info->physchan_present;
+
+       ldev_num = sdev->id + chan_offset * sdev->host->max_id;
+
+       return ldev_num;
+}
+
+static int myrs_slave_alloc(struct scsi_device *sdev)
+{
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       unsigned char status;
+
+       if (sdev->channel > sdev->host->max_channel)
+               return 0;
+
+       if (sdev->channel >= cs->ctlr_info->physchan_present) {
+               struct myrs_ldev_info *ldev_info;
+               unsigned short ldev_num;
+
+               if (sdev->lun > 0)
+                       return -ENXIO;
+
+               ldev_num = myrs_translate_ldev(cs, sdev);
+
+               ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+               if (!ldev_info)
+                       return -ENOMEM;
+
+               status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+               if (status != MYRS_STATUS_SUCCESS) {
+                       sdev->hostdata = NULL;
+                       kfree(ldev_info);
+               } else {
+                       enum raid_level level;
+
+                       dev_dbg(&sdev->sdev_gendev,
+                               "Logical device mapping %d:%d:%d -> %d\n",
+                               ldev_info->channel, ldev_info->target,
+                               ldev_info->lun, ldev_info->ldev_num);
+
+                       sdev->hostdata = ldev_info;
+                       switch (ldev_info->raid_level) {
+                       case MYRS_RAID_LEVEL0:
+                               level = RAID_LEVEL_LINEAR;
+                               break;
+                       case MYRS_RAID_LEVEL1:
+                               level = RAID_LEVEL_1;
+                               break;
+                       case MYRS_RAID_LEVEL3:
+                       case MYRS_RAID_LEVEL3F:
+                       case MYRS_RAID_LEVEL3L:
+                               level = RAID_LEVEL_3;
+                               break;
+                       case MYRS_RAID_LEVEL5:
+                       case MYRS_RAID_LEVEL5L:
+                               level = RAID_LEVEL_5;
+                               break;
+                       case MYRS_RAID_LEVEL6:
+                               level = RAID_LEVEL_6;
+                               break;
+                       case MYRS_RAID_LEVELE:
+                       case MYRS_RAID_NEWSPAN:
+                       case MYRS_RAID_SPAN:
+                               level = RAID_LEVEL_LINEAR;
+                               break;
+                       case MYRS_RAID_JBOD:
+                               level = RAID_LEVEL_JBOD;
+                               break;
+                       default:
+                               level = RAID_LEVEL_UNKNOWN;
+                               break;
+                       }
+                       raid_set_level(myrs_raid_template,
+                                      &sdev->sdev_gendev, level);
+                       if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
+                               const char *name;
+
+                               name = myrs_devstate_name(ldev_info->dev_state);
+                               sdev_printk(KERN_DEBUG, sdev,
+                                           "logical device in state %s\n",
+                                           name ? name : "Invalid");
+                       }
+               }
+       } else {
+               struct myrs_pdev_info *pdev_info;
+
+               pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+               if (!pdev_info)
+                       return -ENOMEM;
+
+               status = myrs_get_pdev_info(cs, sdev->channel,
+                                           sdev->id, sdev->lun,
+                                           pdev_info);
+               if (status != MYRS_STATUS_SUCCESS) {
+                       sdev->hostdata = NULL;
+                       kfree(pdev_info);
+                       return -ENXIO;
+               }
+               sdev->hostdata = pdev_info;
+       }
+       return 0;
+}
+
+static int myrs_slave_configure(struct scsi_device *sdev)
+{
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info;
+
+       if (sdev->channel > sdev->host->max_channel)
+               return -ENXIO;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present) {
+               /* Skip HBA device */
+               if (sdev->type == TYPE_RAID)
+                       return -ENXIO;
+               sdev->no_uld_attach = 1;
+               return 0;
+       }
+       if (sdev->lun != 0)
+               return -ENXIO;
+
+       ldev_info = sdev->hostdata;
+       if (!ldev_info)
+               return -ENXIO;
+       if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+           ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+               sdev->wce_default_on = 1;
+       sdev->tagged_supported = 1;
+       return 0;
+}
+
+static void myrs_slave_destroy(struct scsi_device *sdev)
+{
+       kfree(sdev->hostdata);
+}
+
+struct scsi_host_template myrs_template = {
+       .module                 = THIS_MODULE,
+       .name                   = "DAC960",
+       .proc_name              = "myrs",
+       .queuecommand           = myrs_queuecommand,
+       .eh_host_reset_handler  = myrs_host_reset,
+       .slave_alloc            = myrs_slave_alloc,
+       .slave_configure        = myrs_slave_configure,
+       .slave_destroy          = myrs_slave_destroy,
+       .cmd_size               = sizeof(struct myrs_cmdblk),
+       .shost_attrs            = myrs_shost_attrs,
+       .sdev_attrs             = myrs_sdev_attrs,
+       .this_id                = -1,
+};
+
+static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
+               const struct pci_device_id *entry)
+{
+       struct Scsi_Host *shost;
+       struct myrs_hba *cs;
+
+       shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
+       if (!shost)
+               return NULL;
+
+       shost->max_cmd_len = 16;
+       shost->max_lun = 256;
+       cs = shost_priv(shost);
+       mutex_init(&cs->dcmd_mutex);
+       mutex_init(&cs->cinfo_mutex);
+       cs->host = shost;
+
+       return cs;
+}
+
+/*
+ * RAID template functions
+ */
+
+/**
+ * myrs_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+myrs_is_raid(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+
+       return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
+}
+
+/**
+ * myrs_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+myrs_get_resync(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info = sdev->hostdata;
+       u64 percent_complete = 0;
+       u8 status;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+               return;
+       if (ldev_info->rbld_active) {
+               unsigned short ldev_num = ldev_info->ldev_num;
+
+               status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+               percent_complete = ldev_info->rbld_lba * 100;
+               do_div(percent_complete, ldev_info->cfg_devsize);
+       }
+       raid_set_resync(myrs_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrs_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+myrs_get_state(struct device *dev)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct myrs_hba *cs = shost_priv(sdev->host);
+       struct myrs_ldev_info *ldev_info = sdev->hostdata;
+       enum raid_state state = RAID_STATE_UNKNOWN;
+
+       if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+               state = RAID_STATE_UNKNOWN;
+       else {
+               switch (ldev_info->dev_state) {
+               case MYRS_DEVICE_ONLINE:
+                       state = RAID_STATE_ACTIVE;
+                       break;
+               case MYRS_DEVICE_SUSPECTED_CRITICAL:
+               case MYRS_DEVICE_CRITICAL:
+                       state = RAID_STATE_DEGRADED;
+                       break;
+               case MYRS_DEVICE_REBUILD:
+                       state = RAID_STATE_RESYNCING;
+                       break;
+               case MYRS_DEVICE_UNCONFIGURED:
+               case MYRS_DEVICE_INVALID_STATE:
+                       state = RAID_STATE_UNKNOWN;
+                       break;
+               default:
+                       state = RAID_STATE_OFFLINE;
+               }
+       }
+       raid_set_state(myrs_raid_template, dev, state);
+}
+
+struct raid_function_template myrs_raid_functions = {
+       .cookie         = &myrs_template,
+       .is_raid        = myrs_is_raid,
+       .get_resync     = myrs_get_resync,
+       .get_state      = myrs_get_state,
+};
+
+/*
+ * PCI interface functions
+ */
+void myrs_flush_cache(struct myrs_hba *cs)
+{
+       myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
+}
+
+static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
+               struct scsi_cmnd *scmd)
+{
+       unsigned char status;
+
+       if (!cmd_blk)
+               return;
+
+       scsi_dma_unmap(scmd);
+       status = cmd_blk->status;
+       if (cmd_blk->sense) {
+               if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
+                       unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
+
+                       if (sense_len > cmd_blk->sense_len)
+                               sense_len = cmd_blk->sense_len;
+                       memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
+               }
+               dma_pool_free(cs->sense_pool, cmd_blk->sense,
+                             cmd_blk->sense_addr);
+               cmd_blk->sense = NULL;
+               cmd_blk->sense_addr = 0;
+       }
+       if (cmd_blk->dcdb) {
+               dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
+                             cmd_blk->dcdb_dma);
+               cmd_blk->dcdb = NULL;
+               cmd_blk->dcdb_dma = 0;
+       }
+       if (cmd_blk->sgl) {
+               dma_pool_free(cs->sg_pool, cmd_blk->sgl,
+                             cmd_blk->sgl_addr);
+               cmd_blk->sgl = NULL;
+               cmd_blk->sgl_addr = 0;
+       }
+       if (cmd_blk->residual)
+               scsi_set_resid(scmd, cmd_blk->residual);
+       if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
+           status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
+               scmd->result = (DID_BAD_TARGET << 16);
+       else
+               scmd->result = (DID_OK << 16) | status;
+       scmd->scsi_done(scmd);
+}
+
+static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+       if (!cmd_blk)
+               return;
+
+       if (cmd_blk->complete) {
+               complete(cmd_blk->complete);
+               cmd_blk->complete = NULL;
+       }
+}
+
+static void myrs_monitor(struct work_struct *work)
+{
+       struct myrs_hba *cs = container_of(work, struct myrs_hba,
+                                          monitor_work.work);
+       struct Scsi_Host *shost = cs->host;
+       struct myrs_ctlr_info *info = cs->ctlr_info;
+       unsigned int epoch = cs->fwstat_buf->epoch;
+       unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
+       unsigned char status;
+
+       dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+       status = myrs_get_fwstatus(cs);
+
+       if (cs->needs_update) {
+               cs->needs_update = false;
+               mutex_lock(&cs->cinfo_mutex);
+               status = myrs_get_ctlr_info(cs);
+               mutex_unlock(&cs->cinfo_mutex);
+       }
+       if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
+               status = myrs_get_event(cs, cs->next_evseq,
+                                       cs->event_buf);
+               if (status == MYRS_STATUS_SUCCESS) {
+                       myrs_log_event(cs, cs->event_buf);
+                       cs->next_evseq++;
+                       interval = 1;
+               }
+       }
+
+       if (time_after(jiffies, cs->secondary_monitor_time
+                      + MYRS_SECONDARY_MONITOR_INTERVAL))
+               cs->secondary_monitor_time = jiffies;
+
+       if (info->bg_init_active +
+           info->ldev_init_active +
+           info->pdev_init_active +
+           info->cc_active +
+           info->rbld_active +
+           info->exp_active != 0) {
+               struct scsi_device *sdev;
+
+               shost_for_each_device(sdev, shost) {
+                       struct myrs_ldev_info *ldev_info;
+                       int ldev_num;
+
+                       if (sdev->channel < info->physchan_present)
+                               continue;
+                       ldev_info = sdev->hostdata;
+                       if (!ldev_info)
+                               continue;
+                       ldev_num = ldev_info->ldev_num;
+                       myrs_get_ldev_info(cs, ldev_num, ldev_info);
+               }
+               cs->needs_update = true;
+       }
+       if (epoch == cs->epoch &&
+           cs->fwstat_buf->next_evseq == cs->next_evseq &&
+           (cs->needs_update == false ||
+            time_before(jiffies, cs->primary_monitor_time
+                        + MYRS_PRIMARY_MONITOR_INTERVAL))) {
+               interval = MYRS_SECONDARY_MONITOR_INTERVAL;
+       }
+
+       if (interval > 1)
+               cs->primary_monitor_time = jiffies;
+       queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
+}
+
+static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
+{
+       struct Scsi_Host *shost = cs->host;
+       size_t elem_size, elem_align;
+
+       elem_align = sizeof(struct myrs_sge);
+       elem_size = shost->sg_tablesize * elem_align;
+       cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
+                                     elem_size, elem_align, 0);
+       if (cs->sg_pool == NULL) {
+               shost_printk(KERN_ERR, shost,
+                            "Failed to allocate SG pool\n");
+               return false;
+       }
+
+       cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
+                                        MYRS_SENSE_SIZE, sizeof(int), 0);
+       if (cs->sense_pool == NULL) {
+               dma_pool_destroy(cs->sg_pool);
+               cs->sg_pool = NULL;
+               shost_printk(KERN_ERR, shost,
+                            "Failed to allocate sense data pool\n");
+               return false;
+       }
+
+       cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
+                                       MYRS_DCDB_SIZE,
+                                       sizeof(unsigned char), 0);
+       if (!cs->dcdb_pool) {
+               dma_pool_destroy(cs->sg_pool);
+               cs->sg_pool = NULL;
+               dma_pool_destroy(cs->sense_pool);
+               cs->sense_pool = NULL;
+               shost_printk(KERN_ERR, shost,
+                            "Failed to allocate DCDB pool\n");
+               return false;
+       }
+
+       snprintf(cs->work_q_name, sizeof(cs->work_q_name),
+                "myrs_wq_%d", shost->host_no);
+       cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+       if (!cs->work_q) {
+               dma_pool_destroy(cs->dcdb_pool);
+               cs->dcdb_pool = NULL;
+               dma_pool_destroy(cs->sg_pool);
+               cs->sg_pool = NULL;
+               dma_pool_destroy(cs->sense_pool);
+               cs->sense_pool = NULL;
+               shost_printk(KERN_ERR, shost,
+                            "Failed to create workqueue\n");
+               return false;
+       }
+
+       /* Initialize the Monitoring Timer. */
+       INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
+       queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+
+       return true;
+}
+
+static void myrs_destroy_mempools(struct myrs_hba *cs)
+{
+       cancel_delayed_work_sync(&cs->monitor_work);
+       destroy_workqueue(cs->work_q);
+
+       dma_pool_destroy(cs->sg_pool);
+       dma_pool_destroy(cs->dcdb_pool);
+       dma_pool_destroy(cs->sense_pool);
+}
+
+static void myrs_unmap(struct myrs_hba *cs)
+{
+       kfree(cs->event_buf);
+       kfree(cs->ctlr_info);
+       if (cs->fwstat_buf) {
+               dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
+                                 cs->fwstat_buf, cs->fwstat_addr);
+               cs->fwstat_buf = NULL;
+       }
+       if (cs->first_stat_mbox) {
+               dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
+                                 cs->first_stat_mbox, cs->stat_mbox_addr);
+               cs->first_stat_mbox = NULL;
+       }
+       if (cs->first_cmd_mbox) {
+               dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
+                                 cs->first_cmd_mbox, cs->cmd_mbox_addr);
+               cs->first_cmd_mbox = NULL;
+       }
+}
+
+static void myrs_cleanup(struct myrs_hba *cs)
+{
+       struct pci_dev *pdev = cs->pdev;
+
+       /* Free the memory mailbox, status, and related structures */
+       myrs_unmap(cs);
+
+       if (cs->mmio_base) {
+               cs->disable_intr(cs);
+               iounmap(cs->mmio_base);
+       }
+       if (cs->irq)
+               free_irq(cs->irq, cs);
+       if (cs->io_addr)
+               release_region(cs->io_addr, 0x80);
+       iounmap(cs->mmio_base);
+       pci_set_drvdata(pdev, NULL);
+       pci_disable_device(pdev);
+       scsi_host_put(cs->host);
+}
+
+static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
+               const struct pci_device_id *entry)
+{
+       struct myrs_privdata *privdata =
+               (struct myrs_privdata *)entry->driver_data;
+       irq_handler_t irq_handler = privdata->irq_handler;
+       unsigned int mmio_size = privdata->mmio_size;
+       struct myrs_hba *cs = NULL;
+
+       cs = myrs_alloc_host(pdev, entry);
+       if (!cs) {
+               dev_err(&pdev->dev, "Unable to allocate Controller\n");
+               return NULL;
+       }
+       cs->pdev = pdev;
+
+       if (pci_enable_device(pdev))
+               goto Failure;
+
+       cs->pci_addr = pci_resource_start(pdev, 0);
+
+       pci_set_drvdata(pdev, cs);
+       spin_lock_init(&cs->queue_lock);
+       /* Map the Controller Register Window. */
+       if (mmio_size < PAGE_SIZE)
+               mmio_size = PAGE_SIZE;
+       cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+       if (cs->mmio_base == NULL) {
+               dev_err(&pdev->dev,
+                       "Unable to map Controller Register Window\n");
+               goto Failure;
+       }
+
+       cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
+       if (privdata->hw_init(pdev, cs, cs->io_base))
+               goto Failure;
+
+       /* Acquire shared access to the IRQ Channel. */
+       if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
+               dev_err(&pdev->dev,
+                       "Unable to acquire IRQ Channel %d\n", pdev->irq);
+               goto Failure;
+       }
+       cs->irq = pdev->irq;
+       return cs;
+
+Failure:
+       dev_err(&pdev->dev,
+               "Failed to initialize Controller\n");
+       myrs_cleanup(cs);
+       return NULL;
+}
+
+/**
+ * myrs_err_status reports Controller BIOS Messages passed through
+  the Error Status Register when the driver performs the BIOS handshaking.
+  It returns true for fatal errors and false otherwise.
+*/
+
+static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
+               unsigned char parm0, unsigned char parm1)
+{
+       struct pci_dev *pdev = cs->pdev;
+
+       switch (status) {
+       case 0x00:
+               dev_info(&pdev->dev,
+                        "Physical Device %d:%d Not Responding\n",
+                        parm1, parm0);
+               break;
+       case 0x08:
+               dev_notice(&pdev->dev, "Spinning Up Drives\n");
+               break;
+       case 0x30:
+               dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+               break;
+       case 0x60:
+               dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+               break;
+       case 0x70:
+               dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+               break;
+       case 0x90:
+               dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+                          parm1, parm0);
+               break;
+       case 0xA0:
+               dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+               break;
+       case 0xB0:
+               dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+               break;
+       case 0xD0:
+               dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+               break;
+       case 0xF0:
+               dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+               return true;
+       default:
+               dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+                       status);
+               return true;
+       }
+       return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 GEM Series Controllers.
+ */
+
+static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+       writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
+
+       writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_gen_intr(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
+
+       writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
+
+       writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+       writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
+{
+       __le32 val;
+
+       val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+       return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
+{
+       __le32 val;
+
+       val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+       return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
+
+       writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
+{
+       __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
+
+       writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_intr(void __iomem *base)
+{
+       __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
+                                 DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
+
+       writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
+{
+       __le32 val;
+
+       val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+       return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
+{
+       __le32 val;
+
+       val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+       return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_GEM_enable_intr(void __iomem *base)
+{
+       __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+                                 DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
+       writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_disable_intr(void __iomem *base)
+{
+       __le32 val = 0;
+
+       writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
+{
+       __le32 val;
+
+       val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
+       return !((le32_to_cpu(val) >> 24) &
+                (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+                 DAC960_GEM_IRQMASK_MMBOX_IRQ));
+}
+
+static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+               union myrs_cmd_mbox *mbox)
+{
+       memcpy(&mem_mbox->words[1], &mbox->words[1],
+              sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+       /* Barrier to avoid reordering */
+       wmb();
+       mem_mbox->words[0] = mbox->words[0];
+       /* Barrier to force PCI access */
+       mb();
+}
+
+static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
+               dma_addr_t cmd_mbox_addr)
+{
+       dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
+{
+       return readw(base + DAC960_GEM_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
+{
+       return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
+               unsigned char *param0, unsigned char *param1)
+{
+       __le32 val;
+
+       val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
+       if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
+               return false;
+       *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
+       *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
+       *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
+       writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
+       return true;
+}
+
+static inline unsigned char
+DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+       unsigned char status;
+
+       while (DAC960_GEM_hw_mbox_is_full(base))
+               udelay(1);
+       DAC960_GEM_write_hw_mbox(base, mbox_addr);
+       DAC960_GEM_hw_mbox_new_cmd(base);
+       while (!DAC960_GEM_hw_mbox_status_available(base))
+               udelay(1);
+       status = DAC960_GEM_read_cmd_status(base);
+       DAC960_GEM_ack_hw_mbox_intr(base);
+       DAC960_GEM_ack_hw_mbox_status(base);
+
+       return status;
+}
+
+static int DAC960_GEM_hw_init(struct pci_dev *pdev,
+               struct myrs_hba *cs, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char status, parm0, parm1;
+
+       DAC960_GEM_disable_intr(base);
+       DAC960_GEM_ack_hw_mbox_status(base);
+       udelay(1000);
+       while (DAC960_GEM_init_in_progress(base) &&
+              timeout < MYRS_MAILBOX_TIMEOUT) {
+               if (DAC960_GEM_read_error_status(base, &status,
+                                                &parm0, &parm1) &&
+                   myrs_err_status(cs, status, parm0, parm1))
+                       return -EIO;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRS_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
+               dev_err(&pdev->dev,
+                       "Unable to Enable Memory Mailbox Interface\n");
+               DAC960_GEM_reset_ctrl(base);
+               return -EAGAIN;
+       }
+       DAC960_GEM_enable_intr(base);
+       cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
+       cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
+       cs->disable_intr = DAC960_GEM_disable_intr;
+       cs->reset = DAC960_GEM_reset_ctrl;
+       return 0;
+}
+
+static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
+{
+       struct myrs_hba *cs = arg;
+       void __iomem *base = cs->io_base;
+       struct myrs_stat_mbox *next_stat_mbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cs->queue_lock, flags);
+       DAC960_GEM_ack_intr(base);
+       next_stat_mbox = cs->next_stat_mbox;
+       while (next_stat_mbox->id > 0) {
+               unsigned short id = next_stat_mbox->id;
+               struct scsi_cmnd *scmd = NULL;
+               struct myrs_cmdblk *cmd_blk = NULL;
+
+               if (id == MYRS_DCMD_TAG)
+                       cmd_blk = &cs->dcmd_blk;
+               else if (id == MYRS_MCMD_TAG)
+                       cmd_blk = &cs->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cs->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk) {
+                       cmd_blk->status = next_stat_mbox->status;
+                       cmd_blk->sense_len = next_stat_mbox->sense_len;
+                       cmd_blk->residual = next_stat_mbox->residual;
+               } else
+                       dev_err(&cs->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+               if (++next_stat_mbox > cs->last_stat_mbox)
+                       next_stat_mbox = cs->first_stat_mbox;
+
+               if (cmd_blk) {
+                       if (id < 3)
+                               myrs_handle_cmdblk(cs, cmd_blk);
+                       else
+                               myrs_handle_scsi(cs, cmd_blk, scmd);
+               }
+       }
+       cs->next_stat_mbox = next_stat_mbox;
+       spin_unlock_irqrestore(&cs->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_GEM_privdata = {
+       .hw_init =              DAC960_GEM_hw_init,
+       .irq_handler =          DAC960_GEM_intr_handler,
+       .mmio_size =            DAC960_GEM_mmio_size,
+};
+
+/*
+ * DAC960 BA Series Controllers.
+ */
+
+static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
+{
+       writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_gen_intr(void __iomem *base)
+{
+       writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_reset_ctrl(void __iomem *base)
+{
+       writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_BA_IDB_OFFSET);
+       return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_BA_init_in_progress(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_BA_IDB_OFFSET);
+       return !(val & DAC960_BA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
+{
+       writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
+{
+       writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_intr(void __iomem *base)
+{
+       writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
+              base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_BA_ODB_OFFSET);
+       return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_BA_ODB_OFFSET);
+       return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_BA_enable_intr(void __iomem *base)
+{
+       writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_BA_disable_intr(void __iomem *base)
+{
+       writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_BA_intr_enabled(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_BA_IRQMASK_OFFSET);
+       return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+               union myrs_cmd_mbox *mbox)
+{
+       memcpy(&mem_mbox->words[1], &mbox->words[1],
+              sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+       /* Barrier to avoid reordering */
+       wmb();
+       mem_mbox->words[0] = mbox->words[0];
+       /* Barrier to force PCI access */
+       mb();
+}
+
+
+static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
+               dma_addr_t cmd_mbox_addr)
+{
+       dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
+{
+       return readw(base + DAC960_BA_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
+{
+       return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
+               unsigned char *param0, unsigned char *param1)
+{
+       u8 val;
+
+       val = readb(base + DAC960_BA_ERRSTS_OFFSET);
+       if (!(val & DAC960_BA_ERRSTS_PENDING))
+               return false;
+       val &= ~DAC960_BA_ERRSTS_PENDING;
+       *error = val;
+       *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
+       *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
+       writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
+       return true;
+}
+
+static inline unsigned char
+DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+       unsigned char status;
+
+       while (DAC960_BA_hw_mbox_is_full(base))
+               udelay(1);
+       DAC960_BA_write_hw_mbox(base, mbox_addr);
+       DAC960_BA_hw_mbox_new_cmd(base);
+       while (!DAC960_BA_hw_mbox_status_available(base))
+               udelay(1);
+       status = DAC960_BA_read_cmd_status(base);
+       DAC960_BA_ack_hw_mbox_intr(base);
+       DAC960_BA_ack_hw_mbox_status(base);
+
+       return status;
+}
+
+static int DAC960_BA_hw_init(struct pci_dev *pdev,
+               struct myrs_hba *cs, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char status, parm0, parm1;
+
+       DAC960_BA_disable_intr(base);
+       DAC960_BA_ack_hw_mbox_status(base);
+       udelay(1000);
+       while (DAC960_BA_init_in_progress(base) &&
+              timeout < MYRS_MAILBOX_TIMEOUT) {
+               if (DAC960_BA_read_error_status(base, &status,
+                                             &parm0, &parm1) &&
+                   myrs_err_status(cs, status, parm0, parm1))
+                       return -EIO;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRS_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
+               dev_err(&pdev->dev,
+                       "Unable to Enable Memory Mailbox Interface\n");
+               DAC960_BA_reset_ctrl(base);
+               return -EAGAIN;
+       }
+       DAC960_BA_enable_intr(base);
+       cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
+       cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
+       cs->disable_intr = DAC960_BA_disable_intr;
+       cs->reset = DAC960_BA_reset_ctrl;
+       return 0;
+}
+
+static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
+{
+       struct myrs_hba *cs = arg;
+       void __iomem *base = cs->io_base;
+       struct myrs_stat_mbox *next_stat_mbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cs->queue_lock, flags);
+       DAC960_BA_ack_intr(base);
+       next_stat_mbox = cs->next_stat_mbox;
+       while (next_stat_mbox->id > 0) {
+               unsigned short id = next_stat_mbox->id;
+               struct scsi_cmnd *scmd = NULL;
+               struct myrs_cmdblk *cmd_blk = NULL;
+
+               if (id == MYRS_DCMD_TAG)
+                       cmd_blk = &cs->dcmd_blk;
+               else if (id == MYRS_MCMD_TAG)
+                       cmd_blk = &cs->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cs->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk) {
+                       cmd_blk->status = next_stat_mbox->status;
+                       cmd_blk->sense_len = next_stat_mbox->sense_len;
+                       cmd_blk->residual = next_stat_mbox->residual;
+               } else
+                       dev_err(&cs->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+               if (++next_stat_mbox > cs->last_stat_mbox)
+                       next_stat_mbox = cs->first_stat_mbox;
+
+               if (cmd_blk) {
+                       if (id < 3)
+                               myrs_handle_cmdblk(cs, cmd_blk);
+                       else
+                               myrs_handle_scsi(cs, cmd_blk, scmd);
+               }
+       }
+       cs->next_stat_mbox = next_stat_mbox;
+       spin_unlock_irqrestore(&cs->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_BA_privdata = {
+       .hw_init =              DAC960_BA_hw_init,
+       .irq_handler =          DAC960_BA_intr_handler,
+       .mmio_size =            DAC960_BA_mmio_size,
+};
+
+/*
+ * DAC960 LP Series Controllers.
+ */
+
+static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
+{
+       writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_gen_intr(void __iomem *base)
+{
+       writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_reset_ctrl(void __iomem *base)
+{
+       writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
+{
+       writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_LP_IDB_OFFSET);
+       return val & DAC960_LP_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_LP_init_in_progress(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_LP_IDB_OFFSET);
+       return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
+{
+       writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
+{
+       writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_intr(void __iomem *base)
+{
+       writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
+              base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_LP_ODB_OFFSET);
+       return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_LP_ODB_OFFSET);
+       return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LP_enable_intr(void __iomem *base)
+{
+       writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LP_disable_intr(void __iomem *base)
+{
+       writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LP_intr_enabled(void __iomem *base)
+{
+       u8 val;
+
+       val = readb(base + DAC960_LP_IRQMASK_OFFSET);
+       return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+               union myrs_cmd_mbox *mbox)
+{
+       memcpy(&mem_mbox->words[1], &mbox->words[1],
+              sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+       /* Barrier to avoid reordering */
+       wmb();
+       mem_mbox->words[0] = mbox->words[0];
+       /* Barrier to force PCI access */
+       mb();
+}
+
+static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
+               dma_addr_t cmd_mbox_addr)
+{
+       dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
+{
+       return readw(base + DAC960_LP_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
+{
+       return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
+               unsigned char *param0, unsigned char *param1)
+{
+       u8 val;
+
+       val = readb(base + DAC960_LP_ERRSTS_OFFSET);
+       if (!(val & DAC960_LP_ERRSTS_PENDING))
+               return false;
+       val &= ~DAC960_LP_ERRSTS_PENDING;
+       *error = val;
+       *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
+       *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
+       writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
+       return true;
+}
+
+static inline unsigned char
+DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+       unsigned char status;
+
+       while (DAC960_LP_hw_mbox_is_full(base))
+               udelay(1);
+       DAC960_LP_write_hw_mbox(base, mbox_addr);
+       DAC960_LP_hw_mbox_new_cmd(base);
+       while (!DAC960_LP_hw_mbox_status_available(base))
+               udelay(1);
+       status = DAC960_LP_read_cmd_status(base);
+       DAC960_LP_ack_hw_mbox_intr(base);
+       DAC960_LP_ack_hw_mbox_status(base);
+
+       return status;
+}
+
+static int DAC960_LP_hw_init(struct pci_dev *pdev,
+               struct myrs_hba *cs, void __iomem *base)
+{
+       int timeout = 0;
+       unsigned char status, parm0, parm1;
+
+       DAC960_LP_disable_intr(base);
+       DAC960_LP_ack_hw_mbox_status(base);
+       udelay(1000);
+       while (DAC960_LP_init_in_progress(base) &&
+              timeout < MYRS_MAILBOX_TIMEOUT) {
+               if (DAC960_LP_read_error_status(base, &status,
+                                             &parm0, &parm1) &&
+                   myrs_err_status(cs, status, parm0, parm1))
+                       return -EIO;
+               udelay(10);
+               timeout++;
+       }
+       if (timeout == MYRS_MAILBOX_TIMEOUT) {
+               dev_err(&pdev->dev,
+                       "Timeout waiting for Controller Initialisation\n");
+               return -ETIMEDOUT;
+       }
+       if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
+               dev_err(&pdev->dev,
+                       "Unable to Enable Memory Mailbox Interface\n");
+               DAC960_LP_reset_ctrl(base);
+               return -ENODEV;
+       }
+       DAC960_LP_enable_intr(base);
+       cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
+       cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
+       cs->disable_intr = DAC960_LP_disable_intr;
+       cs->reset = DAC960_LP_reset_ctrl;
+
+       return 0;
+}
+
+static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
+{
+       struct myrs_hba *cs = arg;
+       void __iomem *base = cs->io_base;
+       struct myrs_stat_mbox *next_stat_mbox;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cs->queue_lock, flags);
+       DAC960_LP_ack_intr(base);
+       next_stat_mbox = cs->next_stat_mbox;
+       while (next_stat_mbox->id > 0) {
+               unsigned short id = next_stat_mbox->id;
+               struct scsi_cmnd *scmd = NULL;
+               struct myrs_cmdblk *cmd_blk = NULL;
+
+               if (id == MYRS_DCMD_TAG)
+                       cmd_blk = &cs->dcmd_blk;
+               else if (id == MYRS_MCMD_TAG)
+                       cmd_blk = &cs->mcmd_blk;
+               else {
+                       scmd = scsi_host_find_tag(cs->host, id - 3);
+                       if (scmd)
+                               cmd_blk = scsi_cmd_priv(scmd);
+               }
+               if (cmd_blk) {
+                       cmd_blk->status = next_stat_mbox->status;
+                       cmd_blk->sense_len = next_stat_mbox->sense_len;
+                       cmd_blk->residual = next_stat_mbox->residual;
+               } else
+                       dev_err(&cs->pdev->dev,
+                               "Unhandled command completion %d\n", id);
+
+               memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+               if (++next_stat_mbox > cs->last_stat_mbox)
+                       next_stat_mbox = cs->first_stat_mbox;
+
+               if (cmd_blk) {
+                       if (id < 3)
+                               myrs_handle_cmdblk(cs, cmd_blk);
+                       else
+                               myrs_handle_scsi(cs, cmd_blk, scmd);
+               }
+       }
+       cs->next_stat_mbox = next_stat_mbox;
+       spin_unlock_irqrestore(&cs->queue_lock, flags);
+       return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_LP_privdata = {
+       .hw_init =              DAC960_LP_hw_init,
+       .irq_handler =          DAC960_LP_intr_handler,
+       .mmio_size =            DAC960_LP_mmio_size,
+};
+
+/*
+ * Module functions
+ */
+static int
+myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+       struct myrs_hba *cs;
+       int ret;
+
+       cs = myrs_detect(dev, entry);
+       if (!cs)
+               return -ENODEV;
+
+       ret = myrs_get_config(cs);
+       if (ret < 0) {
+               myrs_cleanup(cs);
+               return ret;
+       }
+
+       if (!myrs_create_mempools(dev, cs)) {
+               ret = -ENOMEM;
+               goto failed;
+       }
+
+       ret = scsi_add_host(cs->host, &dev->dev);
+       if (ret) {
+               dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+               myrs_destroy_mempools(cs);
+               goto failed;
+       }
+       scsi_scan_host(cs->host);
+       return 0;
+failed:
+       myrs_cleanup(cs);
+       return ret;
+}
+
+
+static void myrs_remove(struct pci_dev *pdev)
+{
+       struct myrs_hba *cs = pci_get_drvdata(pdev);
+
+       if (cs == NULL)
+               return;
+
+       shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
+       myrs_flush_cache(cs);
+       myrs_destroy_mempools(cs);
+       myrs_cleanup(cs);
+}
+
+
+static const struct pci_device_id myrs_id_table[] = {
+       {
+               PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
+                              PCI_DEVICE_ID_MYLEX_DAC960_GEM,
+                              PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
+               .driver_data    = (unsigned long) &DAC960_GEM_privdata,
+       },
+       {
+               PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
+       },
+       {
+               PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
+       },
+       {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrs_id_table);
+
+static struct pci_driver myrs_pci_driver = {
+       .name           = "myrs",
+       .id_table       = myrs_id_table,
+       .probe          = myrs_probe,
+       .remove         = myrs_remove,
+};
+
+static int __init myrs_init_module(void)
+{
+       int ret;
+
+       myrs_raid_template = raid_class_attach(&myrs_raid_functions);
+       if (!myrs_raid_template)
+               return -ENODEV;
+
+       ret = pci_register_driver(&myrs_pci_driver);
+       if (ret)
+               raid_class_release(myrs_raid_template);
+
+       return ret;
+}
+
+static void __exit myrs_cleanup_module(void)
+{
+       pci_unregister_driver(&myrs_pci_driver);
+       raid_class_release(myrs_raid_template);
+}
+
+module_init(myrs_init_module);
+module_exit(myrs_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
new file mode 100644 (file)
index 0000000..e6702ee
--- /dev/null
@@ -0,0 +1,1134 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#ifndef _MYRS_H
+#define _MYRS_H
+
+#define MYRS_MAILBOX_TIMEOUT 1000000
+
+#define MYRS_DCMD_TAG 1
+#define MYRS_MCMD_TAG 2
+
+#define MYRS_LINE_BUFFER_SIZE 128
+
+#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/* Maximum number of Scatter/Gather Segments supported */
+#define MYRS_SG_LIMIT          128
+
+/*
+ * Number of Command and Status Mailboxes used by the
+ * DAC960 V2 Firmware Memory Mailbox Interface.
+ */
+#define MYRS_MAX_CMD_MBOX              512
+#define MYRS_MAX_STAT_MBOX             512
+
+#define MYRS_DCDB_SIZE                 16
+#define MYRS_SENSE_SIZE                        14
+
+/*
+ * DAC960 V2 Firmware Command Opcodes.
+ */
+enum myrs_cmd_opcode {
+       MYRS_CMD_OP_MEMCOPY             = 0x01,
+       MYRS_CMD_OP_SCSI_10_PASSTHRU    = 0x02,
+       MYRS_CMD_OP_SCSI_255_PASSTHRU   = 0x03,
+       MYRS_CMD_OP_SCSI_10             = 0x04,
+       MYRS_CMD_OP_SCSI_256            = 0x05,
+       MYRS_CMD_OP_IOCTL               = 0x20,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware IOCTL Opcodes.
+ */
+enum myrs_ioctl_opcode {
+       MYRS_IOCTL_GET_CTLR_INFO        = 0x01,
+       MYRS_IOCTL_GET_LDEV_INFO_VALID  = 0x03,
+       MYRS_IOCTL_GET_PDEV_INFO_VALID  = 0x05,
+       MYRS_IOCTL_GET_HEALTH_STATUS    = 0x11,
+       MYRS_IOCTL_GET_EVENT            = 0x15,
+       MYRS_IOCTL_START_DISCOVERY      = 0x81,
+       MYRS_IOCTL_SET_DEVICE_STATE     = 0x82,
+       MYRS_IOCTL_INIT_PDEV_START      = 0x84,
+       MYRS_IOCTL_INIT_PDEV_STOP       = 0x85,
+       MYRS_IOCTL_INIT_LDEV_START      = 0x86,
+       MYRS_IOCTL_INIT_LDEV_STOP       = 0x87,
+       MYRS_IOCTL_RBLD_DEVICE_START    = 0x88,
+       MYRS_IOCTL_RBLD_DEVICE_STOP     = 0x89,
+       MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A,
+       MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B,
+       MYRS_IOCTL_CC_START             = 0x8C,
+       MYRS_IOCTL_CC_STOP              = 0x8D,
+       MYRS_IOCTL_SET_MEM_MBOX         = 0x8E,
+       MYRS_IOCTL_RESET_DEVICE         = 0x90,
+       MYRS_IOCTL_FLUSH_DEVICE_DATA    = 0x91,
+       MYRS_IOCTL_PAUSE_DEVICE         = 0x92,
+       MYRS_IOCTL_UNPAUS_EDEVICE       = 0x93,
+       MYRS_IOCTL_LOCATE_DEVICE        = 0x94,
+       MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0,
+       MYRS_IOCTL_DELETE_LDEV          = 0xC1,
+       MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2,
+       MYRS_IOCTL_RENAME_LDEV          = 0xC3,
+       MYRS_IOCTL_ADD_CONFIGURATION    = 0xC4,
+       MYRS_IOCTL_XLATE_PDEV_TO_LDEV   = 0xC5,
+       MYRS_IOCTL_CLEAR_CONFIGURATION  = 0xCA,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Command Status Codes.
+ */
+#define MYRS_STATUS_SUCCESS                    0x00
+#define MYRS_STATUS_FAILED                     0x02
+#define MYRS_STATUS_DEVICE_BUSY                        0x08
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE      0x0E
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2     0x0F
+#define MYRS_STATUS_RESERVATION_CONFLICT       0x18
+
+/*
+ * DAC960 V2 Firmware Memory Type structure.
+ */
+struct myrs_mem_type {
+       enum {
+               MYRS_MEMTYPE_RESERVED   = 0x00,
+               MYRS_MEMTYPE_DRAM       = 0x01,
+               MYRS_MEMTYPE_EDRAM      = 0x02,
+               MYRS_MEMTYPE_EDO        = 0x03,
+               MYRS_MEMTYPE_SDRAM      = 0x04,
+               MYRS_MEMTYPE_LAST       = 0x1F,
+       } __packed mem_type:5;  /* Byte 0 Bits 0-4 */
+       unsigned rsvd:1;                        /* Byte 0 Bit 5 */
+       unsigned mem_parity:1;                  /* Byte 0 Bit 6 */
+       unsigned mem_ecc:1;                     /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Processor Type structure.
+ */
+enum myrs_cpu_type {
+       MYRS_CPUTYPE_i960CA     = 0x01,
+       MYRS_CPUTYPE_i960RD     = 0x02,
+       MYRS_CPUTYPE_i960RN     = 0x03,
+       MYRS_CPUTYPE_i960RP     = 0x04,
+       MYRS_CPUTYPE_NorthBay   = 0x05,
+       MYRS_CPUTYPE_StrongArm  = 0x06,
+       MYRS_CPUTYPE_i960RM     = 0x07,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Controller Info reply structure.
+ */
+struct myrs_ctlr_info {
+       unsigned char rsvd1;                            /* Byte 0 */
+       enum {
+               MYRS_SCSI_BUS   = 0x00,
+               MYRS_Fibre_BUS  = 0x01,
+               MYRS_PCI_BUS    = 0x03
+       } __packed bus; /* Byte 1 */
+       enum {
+               MYRS_CTLR_DAC960E       = 0x01,
+               MYRS_CTLR_DAC960M       = 0x08,
+               MYRS_CTLR_DAC960PD      = 0x10,
+               MYRS_CTLR_DAC960PL      = 0x11,
+               MYRS_CTLR_DAC960PU      = 0x12,
+               MYRS_CTLR_DAC960PE      = 0x13,
+               MYRS_CTLR_DAC960PG      = 0x14,
+               MYRS_CTLR_DAC960PJ      = 0x15,
+               MYRS_CTLR_DAC960PTL0    = 0x16,
+               MYRS_CTLR_DAC960PR      = 0x17,
+               MYRS_CTLR_DAC960PRL     = 0x18,
+               MYRS_CTLR_DAC960PT      = 0x19,
+               MYRS_CTLR_DAC1164P      = 0x1A,
+               MYRS_CTLR_DAC960PTL1    = 0x1B,
+               MYRS_CTLR_EXR2000P      = 0x1C,
+               MYRS_CTLR_EXR3000P      = 0x1D,
+               MYRS_CTLR_ACCELERAID352 = 0x1E,
+               MYRS_CTLR_ACCELERAID170 = 0x1F,
+               MYRS_CTLR_ACCELERAID160 = 0x20,
+               MYRS_CTLR_DAC960S       = 0x60,
+               MYRS_CTLR_DAC960SU      = 0x61,
+               MYRS_CTLR_DAC960SX      = 0x62,
+               MYRS_CTLR_DAC960SF      = 0x63,
+               MYRS_CTLR_DAC960SS      = 0x64,
+               MYRS_CTLR_DAC960FL      = 0x65,
+               MYRS_CTLR_DAC960LL      = 0x66,
+               MYRS_CTLR_DAC960FF      = 0x67,
+               MYRS_CTLR_DAC960HP      = 0x68,
+               MYRS_CTLR_RAIDBRICK     = 0x69,
+               MYRS_CTLR_METEOR_FL     = 0x6A,
+               MYRS_CTLR_METEOR_FF     = 0x6B
+       } __packed ctlr_type;   /* Byte 2 */
+       unsigned char rsvd2;                    /* Byte 3 */
+       unsigned short bus_speed_mhz;           /* Bytes 4-5 */
+       unsigned char bus_width;                /* Byte 6 */
+       unsigned char flash_code;               /* Byte 7 */
+       unsigned char ports_present;            /* Byte 8 */
+       unsigned char rsvd3[7];                 /* Bytes 9-15 */
+       unsigned char bus_name[16];             /* Bytes 16-31 */
+       unsigned char ctlr_name[16];            /* Bytes 32-47 */
+       unsigned char rsvd4[16];                /* Bytes 48-63 */
+       /* Firmware Release Information */
+       unsigned char fw_major_version;         /* Byte 64 */
+       unsigned char fw_minor_version;         /* Byte 65 */
+       unsigned char fw_turn_number;           /* Byte 66 */
+       unsigned char fw_build_number;          /* Byte 67 */
+       unsigned char fw_release_day;           /* Byte 68 */
+       unsigned char fw_release_month;         /* Byte 69 */
+       unsigned char fw_release_year_hi;       /* Byte 70 */
+       unsigned char fw_release_year_lo;       /* Byte 71 */
+       /* Hardware Release Information */
+       unsigned char hw_rev;                   /* Byte 72 */
+       unsigned char rsvd5[3];                 /* Bytes 73-75 */
+       unsigned char hw_release_day;           /* Byte 76 */
+       unsigned char hw_release_month;         /* Byte 77 */
+       unsigned char hw_release_year_hi;       /* Byte 78 */
+       unsigned char hw_release_year_lo;       /* Byte 79 */
+       /* Hardware Manufacturing Information */
+       unsigned char manuf_batch_num;          /* Byte 80 */
+       unsigned char rsvd6;                    /* Byte 81 */
+       unsigned char manuf_plant_num;          /* Byte 82 */
+       unsigned char rsvd7;                    /* Byte 83 */
+       unsigned char hw_manuf_day;             /* Byte 84 */
+       unsigned char hw_manuf_month;           /* Byte 85 */
+       unsigned char hw_manuf_year_hi;         /* Byte 86 */
+       unsigned char hw_manuf_year_lo;         /* Byte 87 */
+       unsigned char max_pd_per_xld;           /* Byte 88 */
+       unsigned char max_ild_per_xld;          /* Byte 89 */
+       unsigned short nvram_size_kb;           /* Bytes 90-91 */
+       unsigned char max_xld;                  /* Byte 92 */
+       unsigned char rsvd8[3];                 /* Bytes 93-95 */
+       /* Unique Information per Controller */
+       unsigned char serial_number[16];        /* Bytes 96-111 */
+       unsigned char rsvd9[16];                /* Bytes 112-127 */
+       /* Vendor Information */
+       unsigned char rsvd10[3];                /* Bytes 128-130 */
+       unsigned char oem_code;                 /* Byte 131 */
+       unsigned char vendor[16];               /* Bytes 132-147 */
+       /* Other Physical/Controller/Operation Information */
+       unsigned char bbu_present:1;            /* Byte 148 Bit 0 */
+       unsigned char cluster_mode:1;           /* Byte 148 Bit 1 */
+       unsigned char rsvd11:6;                 /* Byte 148 Bits 2-7 */
+       unsigned char rsvd12[3];                /* Bytes 149-151 */
+       /* Physical Device Scan Information */
+       unsigned char pscan_active:1;           /* Byte 152 Bit 0 */
+       unsigned char rsvd13:7;                 /* Byte 152 Bits 1-7 */
+       unsigned char pscan_chan;               /* Byte 153 */
+       unsigned char pscan_target;             /* Byte 154 */
+       unsigned char pscan_lun;                /* Byte 155 */
+       /* Maximum Command Data Transfer Sizes */
+       unsigned short max_transfer_size;       /* Bytes 156-157 */
+       unsigned short max_sge;                 /* Bytes 158-159 */
+       /* Logical/Physical Device Counts */
+       unsigned short ldev_present;            /* Bytes 160-161 */
+       unsigned short ldev_critical;           /* Bytes 162-163 */
+       unsigned short ldev_offline;            /* Bytes 164-165 */
+       unsigned short pdev_present;            /* Bytes 166-167 */
+       unsigned short pdisk_present;           /* Bytes 168-169 */
+       unsigned short pdisk_critical;          /* Bytes 170-171 */
+       unsigned short pdisk_offline;           /* Bytes 172-173 */
+       unsigned short max_tcq;                 /* Bytes 174-175 */
+       /* Channel and Target ID Information */
+       unsigned char physchan_present;         /* Byte 176 */
+       unsigned char virtchan_present;         /* Byte 177 */
+       unsigned char physchan_max;             /* Byte 178 */
+       unsigned char virtchan_max;             /* Byte 179 */
+       unsigned char max_targets[16];          /* Bytes 180-195 */
+       unsigned char rsvd14[12];               /* Bytes 196-207 */
+       /* Memory/Cache Information */
+       unsigned short mem_size_mb;             /* Bytes 208-209 */
+       unsigned short cache_size_mb;           /* Bytes 210-211 */
+       unsigned int valid_cache_bytes;         /* Bytes 212-215 */
+       unsigned int dirty_cache_bytes;         /* Bytes 216-219 */
+       unsigned short mem_speed_mhz;           /* Bytes 220-221 */
+       unsigned char mem_data_width;           /* Byte 222 */
+       struct myrs_mem_type mem_type;          /* Byte 223 */
+       unsigned char cache_mem_type_name[16];  /* Bytes 224-239 */
+       /* Execution Memory Information */
+       unsigned short exec_mem_size_mb;        /* Bytes 240-241 */
+       unsigned short exec_l2_cache_size_mb;   /* Bytes 242-243 */
+       unsigned char rsvd15[8];                /* Bytes 244-251 */
+       unsigned short exec_mem_speed_mhz;      /* Bytes 252-253 */
+       unsigned char exec_mem_data_width;      /* Byte 254 */
+       struct myrs_mem_type exec_mem_type;     /* Byte 255 */
+       unsigned char exec_mem_type_name[16];   /* Bytes 256-271 */
+       /* CPU Type Information */
+       struct {                                /* Bytes 272-335 */
+               unsigned short cpu_speed_mhz;
+               enum myrs_cpu_type cpu_type;
+               unsigned char cpu_count;
+               unsigned char rsvd16[12];
+               unsigned char cpu_name[16];
+       } __packed cpu[2];
+       /* Debugging/Profiling/Command Time Tracing Information */
+       unsigned short cur_prof_page_num;       /* Bytes 336-337 */
+       unsigned short num_prof_waiters;        /* Bytes 338-339 */
+       unsigned short cur_trace_page_num;      /* Bytes 340-341 */
+       unsigned short num_trace_waiters;       /* Bytes 342-343 */
+       unsigned char rsvd18[8];                /* Bytes 344-351 */
+       /* Error Counters on Physical Devices */
+       unsigned short pdev_bus_resets;         /* Bytes 352-353 */
+       unsigned short pdev_parity_errors;      /* Bytes 355-355 */
+       unsigned short pdev_soft_errors;        /* Bytes 356-357 */
+       unsigned short pdev_cmds_failed;        /* Bytes 358-359 */
+       unsigned short pdev_misc_errors;        /* Bytes 360-361 */
+       unsigned short pdev_cmd_timeouts;       /* Bytes 362-363 */
+       unsigned short pdev_sel_timeouts;       /* Bytes 364-365 */
+       unsigned short pdev_retries_done;       /* Bytes 366-367 */
+       unsigned short pdev_aborts_done;        /* Bytes 368-369 */
+       unsigned short pdev_host_aborts_done;   /* Bytes 370-371 */
+       unsigned short pdev_predicted_failures; /* Bytes 372-373 */
+       unsigned short pdev_host_cmds_failed;   /* Bytes 374-375 */
+       unsigned short pdev_hard_errors;        /* Bytes 376-377 */
+       unsigned char rsvd19[6];                /* Bytes 378-383 */
+       /* Error Counters on Logical Devices */
+       unsigned short ldev_soft_errors;        /* Bytes 384-385 */
+       unsigned short ldev_cmds_failed;        /* Bytes 386-387 */
+       unsigned short ldev_host_aborts_done;   /* Bytes 388-389 */
+       unsigned char rsvd20[2];                /* Bytes 390-391 */
+       /* Error Counters on Controller */
+       unsigned short ctlr_mem_errors;         /* Bytes 392-393 */
+       unsigned short ctlr_host_aborts_done;   /* Bytes 394-395 */
+       unsigned char rsvd21[4];                /* Bytes 396-399 */
+       /* Long Duration Activity Information */
+       unsigned short bg_init_active;          /* Bytes 400-401 */
+       unsigned short ldev_init_active;        /* Bytes 402-403 */
+       unsigned short pdev_init_active;        /* Bytes 404-405 */
+       unsigned short cc_active;               /* Bytes 406-407 */
+       unsigned short rbld_active;             /* Bytes 408-409 */
+       unsigned short exp_active;              /* Bytes 410-411 */
+       unsigned short patrol_active;           /* Bytes 412-413 */
+       unsigned char rsvd22[2];                /* Bytes 414-415 */
+       /* Flash ROM Information */
+       unsigned char flash_type;               /* Byte 416 */
+       unsigned char rsvd23;                   /* Byte 417 */
+       unsigned short flash_size_MB;           /* Bytes 418-419 */
+       unsigned int flash_limit;               /* Bytes 420-423 */
+       unsigned int flash_count;               /* Bytes 424-427 */
+       unsigned char rsvd24[4];                /* Bytes 428-431 */
+       unsigned char flash_type_name[16];      /* Bytes 432-447 */
+       /* Firmware Run Time Information */
+       unsigned char rbld_rate;                /* Byte 448 */
+       unsigned char bg_init_rate;             /* Byte 449 */
+       unsigned char fg_init_rate;             /* Byte 450 */
+       unsigned char cc_rate;                  /* Byte 451 */
+       unsigned char rsvd25[4];                /* Bytes 452-455 */
+       unsigned int max_dp;                    /* Bytes 456-459 */
+       unsigned int free_dp;                   /* Bytes 460-463 */
+       unsigned int max_iop;                   /* Bytes 464-467 */
+       unsigned int free_iop;                  /* Bytes 468-471 */
+       unsigned short max_combined_len;        /* Bytes 472-473 */
+       unsigned short num_cfg_groups;          /* Bytes 474-475 */
+       unsigned installation_abort_status:1;   /* Byte 476 Bit 0 */
+       unsigned maint_mode_status:1;           /* Byte 476 Bit 1 */
+       unsigned rsvd26:6;                      /* Byte 476 Bits 2-7 */
+       unsigned char rsvd27[6];                /* Bytes 477-511 */
+       unsigned char rsvd28[512];              /* Bytes 512-1023 */
+};
+
+/*
+ * DAC960 V2 Firmware Device State type.
+ */
+enum myrs_devstate {
+       MYRS_DEVICE_UNCONFIGURED        = 0x00,
+       MYRS_DEVICE_ONLINE              = 0x01,
+       MYRS_DEVICE_REBUILD             = 0x03,
+       MYRS_DEVICE_MISSING             = 0x04,
+       MYRS_DEVICE_SUSPECTED_CRITICAL  = 0x05,
+       MYRS_DEVICE_OFFLINE             = 0x08,
+       MYRS_DEVICE_CRITICAL            = 0x09,
+       MYRS_DEVICE_SUSPECTED_DEAD      = 0x0C,
+       MYRS_DEVICE_COMMANDED_OFFLINE   = 0x10,
+       MYRS_DEVICE_STANDBY             = 0x21,
+       MYRS_DEVICE_INVALID_STATE       = 0xFF,
+} __packed;
+
+/*
+ * DAC960 V2 RAID Levels
+ */
+enum myrs_raid_level {
+       MYRS_RAID_LEVEL0        = 0x0,     /* RAID 0 */
+       MYRS_RAID_LEVEL1        = 0x1,     /* RAID 1 */
+       MYRS_RAID_LEVEL3        = 0x3,     /* RAID 3 right asymmetric parity */
+       MYRS_RAID_LEVEL5        = 0x5,     /* RAID 5 right asymmetric parity */
+       MYRS_RAID_LEVEL6        = 0x6,     /* RAID 6 (Mylex RAID 6) */
+       MYRS_RAID_JBOD          = 0x7,     /* RAID 7 (JBOD) */
+       MYRS_RAID_NEWSPAN       = 0x8,     /* New Mylex SPAN */
+       MYRS_RAID_LEVEL3F       = 0x9,     /* RAID 3 fixed parity */
+       MYRS_RAID_LEVEL3L       = 0xb,     /* RAID 3 left symmetric parity */
+       MYRS_RAID_SPAN          = 0xc,     /* current spanning implementation */
+       MYRS_RAID_LEVEL5L       = 0xd,     /* RAID 5 left symmetric parity */
+       MYRS_RAID_LEVELE        = 0xe,     /* RAID E (concatenation) */
+       MYRS_RAID_PHYSICAL      = 0xf,     /* physical device */
+} __packed;
+
+enum myrs_stripe_size {
+       MYRS_STRIPE_SIZE_0      = 0x0,  /* no stripe (RAID 1, RAID 7, etc) */
+       MYRS_STRIPE_SIZE_512B   = 0x1,
+       MYRS_STRIPE_SIZE_1K     = 0x2,
+       MYRS_STRIPE_SIZE_2K     = 0x3,
+       MYRS_STRIPE_SIZE_4K     = 0x4,
+       MYRS_STRIPE_SIZE_8K     = 0x5,
+       MYRS_STRIPE_SIZE_16K    = 0x6,
+       MYRS_STRIPE_SIZE_32K    = 0x7,
+       MYRS_STRIPE_SIZE_64K    = 0x8,
+       MYRS_STRIPE_SIZE_128K   = 0x9,
+       MYRS_STRIPE_SIZE_256K   = 0xa,
+       MYRS_STRIPE_SIZE_512K   = 0xb,
+       MYRS_STRIPE_SIZE_1M     = 0xc,
+} __packed;
+
+enum myrs_cacheline_size {
+       MYRS_CACHELINE_ZERO     = 0x0,  /* caching cannot be enabled */
+       MYRS_CACHELINE_512B     = 0x1,
+       MYRS_CACHELINE_1K       = 0x2,
+       MYRS_CACHELINE_2K       = 0x3,
+       MYRS_CACHELINE_4K       = 0x4,
+       MYRS_CACHELINE_8K       = 0x5,
+       MYRS_CACHELINE_16K      = 0x6,
+       MYRS_CACHELINE_32K      = 0x7,
+       MYRS_CACHELINE_64K      = 0x8,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Logical Device Info reply structure.
+ */
+struct myrs_ldev_info {
+       unsigned char ctlr;                     /* Byte 0 */
+       unsigned char channel;                  /* Byte 1 */
+       unsigned char target;                   /* Byte 2 */
+       unsigned char lun;                      /* Byte 3 */
+       enum myrs_devstate dev_state;           /* Byte 4 */
+       unsigned char raid_level;               /* Byte 5 */
+       enum myrs_stripe_size stripe_size;      /* Byte 6 */
+       enum myrs_cacheline_size cacheline_size; /* Byte 7 */
+       struct {
+               enum {
+                       MYRS_READCACHE_DISABLED         = 0x0,
+                       MYRS_READCACHE_ENABLED          = 0x1,
+                       MYRS_READAHEAD_ENABLED          = 0x2,
+                       MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3,
+                       MYRS_READCACHE_LAST             = 0x7,
+               } __packed rce:3; /* Byte 8 Bits 0-2 */
+               enum {
+                       MYRS_WRITECACHE_DISABLED        = 0x0,
+                       MYRS_LOGICALDEVICE_RO           = 0x1,
+                       MYRS_WRITECACHE_ENABLED         = 0x2,
+                       MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3,
+                       MYRS_WRITECACHE_LAST            = 0x7,
+               } __packed wce:3; /* Byte 8 Bits 3-5 */
+               unsigned rsvd1:1;               /* Byte 8 Bit 6 */
+               unsigned ldev_init_done:1;      /* Byte 8 Bit 7 */
+       } ldev_control;                         /* Byte 8 */
+       /* Logical Device Operations Status */
+       unsigned char cc_active:1;              /* Byte 9 Bit 0 */
+       unsigned char rbld_active:1;            /* Byte 9 Bit 1 */
+       unsigned char bg_init_active:1;         /* Byte 9 Bit 2 */
+       unsigned char fg_init_active:1;         /* Byte 9 Bit 3 */
+       unsigned char migration_active:1;       /* Byte 9 Bit 4 */
+       unsigned char patrol_active:1;          /* Byte 9 Bit 5 */
+       unsigned char rsvd2:2;                  /* Byte 9 Bits 6-7 */
+       unsigned char raid5_writeupdate;        /* Byte 10 */
+       unsigned char raid5_algo;               /* Byte 11 */
+       unsigned short ldev_num;                /* Bytes 12-13 */
+       /* BIOS Info */
+       unsigned char bios_disabled:1;          /* Byte 14 Bit 0 */
+       unsigned char cdrom_boot:1;             /* Byte 14 Bit 1 */
+       unsigned char drv_coercion:1;           /* Byte 14 Bit 2 */
+       unsigned char write_same_disabled:1;    /* Byte 14 Bit 3 */
+       unsigned char hba_mode:1;               /* Byte 14 Bit 4 */
+       enum {
+               MYRS_GEOMETRY_128_32    = 0x0,
+               MYRS_GEOMETRY_255_63    = 0x1,
+               MYRS_GEOMETRY_RSVD1     = 0x2,
+               MYRS_GEOMETRY_RSVD2     = 0x3
+       } __packed drv_geom:2;  /* Byte 14 Bits 5-6 */
+       unsigned char super_ra_enabled:1;       /* Byte 14 Bit 7 */
+       unsigned char rsvd3;                    /* Byte 15 */
+       /* Error Counters */
+       unsigned short soft_errs;               /* Bytes 16-17 */
+       unsigned short cmds_failed;             /* Bytes 18-19 */
+       unsigned short cmds_aborted;            /* Bytes 20-21 */
+       unsigned short deferred_write_errs;     /* Bytes 22-23 */
+       unsigned int rsvd4;                     /* Bytes 24-27 */
+       unsigned int rsvd5;                     /* Bytes 28-31 */
+       /* Device Size Information */
+       unsigned short rsvd6;                   /* Bytes 32-33 */
+       unsigned short devsize_bytes;           /* Bytes 34-35 */
+       unsigned int orig_devsize;              /* Bytes 36-39 */
+       unsigned int cfg_devsize;               /* Bytes 40-43 */
+       unsigned int rsvd7;                     /* Bytes 44-47 */
+       unsigned char ldev_name[32];            /* Bytes 48-79 */
+       unsigned char inquiry[36];              /* Bytes 80-115 */
+       unsigned char rsvd8[12];                /* Bytes 116-127 */
+       u64 last_read_lba;                      /* Bytes 128-135 */
+       u64 last_write_lba;                     /* Bytes 136-143 */
+       u64 cc_lba;                             /* Bytes 144-151 */
+       u64 rbld_lba;                           /* Bytes 152-159 */
+       u64 bg_init_lba;                        /* Bytes 160-167 */
+       u64 fg_init_lba;                        /* Bytes 168-175 */
+       u64 migration_lba;                      /* Bytes 176-183 */
+       u64 patrol_lba;                         /* Bytes 184-191 */
+       unsigned char rsvd9[64];                /* Bytes 192-255 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Physical Device Info reply structure.
+ */
+struct myrs_pdev_info {
+       unsigned char rsvd1;                    /* Byte 0 */
+       unsigned char channel;                  /* Byte 1 */
+       unsigned char target;                   /* Byte 2 */
+       unsigned char lun;                      /* Byte 3 */
+       /* Configuration Status Bits */
+       unsigned char pdev_fault_tolerant:1;    /* Byte 4 Bit 0 */
+       unsigned char pdev_connected:1;         /* Byte 4 Bit 1 */
+       unsigned char pdev_local_to_ctlr:1;     /* Byte 4 Bit 2 */
+       unsigned char rsvd2:5;                  /* Byte 4 Bits 3-7 */
+       /* Multiple Host/Controller Status Bits */
+       unsigned char remote_host_dead:1;       /* Byte 5 Bit 0 */
+       unsigned char remove_ctlr_dead:1;       /* Byte 5 Bit 1 */
+       unsigned char rsvd3:6;                  /* Byte 5 Bits 2-7 */
+       enum myrs_devstate dev_state;           /* Byte 6 */
+       unsigned char nego_data_width;          /* Byte 7 */
+       unsigned short nego_sync_rate;          /* Bytes 8-9 */
+       /* Multiported Physical Device Information */
+       unsigned char num_ports;                /* Byte 10 */
+       unsigned char drv_access_bitmap;        /* Byte 11 */
+       unsigned int rsvd4;                     /* Bytes 12-15 */
+       unsigned char ip_address[16];           /* Bytes 16-31 */
+       unsigned short max_tags;                /* Bytes 32-33 */
+       /* Physical Device Operations Status */
+       unsigned char cc_in_progress:1;         /* Byte 34 Bit 0 */
+       unsigned char rbld_in_progress:1;       /* Byte 34 Bit 1 */
+       unsigned char makecc_in_progress:1;     /* Byte 34 Bit 2 */
+       unsigned char pdevinit_in_progress:1;   /* Byte 34 Bit 3 */
+       unsigned char migration_in_progress:1;  /* Byte 34 Bit 4 */
+       unsigned char patrol_in_progress:1;     /* Byte 34 Bit 5 */
+       unsigned char rsvd5:2;                  /* Byte 34 Bits 6-7 */
+       unsigned char long_op_status;           /* Byte 35 */
+       unsigned char parity_errs;              /* Byte 36 */
+       unsigned char soft_errs;                /* Byte 37 */
+       unsigned char hard_errs;                /* Byte 38 */
+       unsigned char misc_errs;                /* Byte 39 */
+       unsigned char cmd_timeouts;             /* Byte 40 */
+       unsigned char retries;                  /* Byte 41 */
+       unsigned char aborts;                   /* Byte 42 */
+       unsigned char pred_failures;            /* Byte 43 */
+       unsigned int rsvd6;                     /* Bytes 44-47 */
+       unsigned short rsvd7;                   /* Bytes 48-49 */
+       unsigned short devsize_bytes;           /* Bytes 50-51 */
+       unsigned int orig_devsize;              /* Bytes 52-55 */
+       unsigned int cfg_devsize;               /* Bytes 56-59 */
+       unsigned int rsvd8;                     /* Bytes 60-63 */
+       unsigned char pdev_name[16];            /* Bytes 64-79 */
+       unsigned char rsvd9[16];                /* Bytes 80-95 */
+       unsigned char rsvd10[32];               /* Bytes 96-127 */
+       unsigned char inquiry[36];              /* Bytes 128-163 */
+       unsigned char rsvd11[20];               /* Bytes 164-183 */
+       unsigned char rsvd12[8];                /* Bytes 184-191 */
+       u64 last_read_lba;                      /* Bytes 192-199 */
+       u64 last_write_lba;                     /* Bytes 200-207 */
+       u64 cc_lba;                             /* Bytes 208-215 */
+       u64 rbld_lba;                           /* Bytes 216-223 */
+       u64 makecc_lba;                         /* Bytes 224-231 */
+       u64 devinit_lba;                        /* Bytes 232-239 */
+       u64 migration_lba;                      /* Bytes 240-247 */
+       u64 patrol_lba;                         /* Bytes 248-255 */
+       unsigned char rsvd13[256];              /* Bytes 256-511 */
+};
+
+/*
+ * DAC960 V2 Firmware Health Status Buffer structure.
+ */
+struct myrs_fwstat {
+       unsigned int uptime_usecs;              /* Bytes 0-3 */
+       unsigned int uptime_msecs;              /* Bytes 4-7 */
+       unsigned int seconds;                   /* Bytes 8-11 */
+       unsigned char rsvd1[4];                 /* Bytes 12-15 */
+       unsigned int epoch;                     /* Bytes 16-19 */
+       unsigned char rsvd2[4];                 /* Bytes 20-23 */
+       unsigned int dbg_msgbuf_idx;            /* Bytes 24-27 */
+       unsigned int coded_msgbuf_idx;          /* Bytes 28-31 */
+       unsigned int cur_timetrace_page;        /* Bytes 32-35 */
+       unsigned int cur_prof_page;             /* Bytes 36-39 */
+       unsigned int next_evseq;                /* Bytes 40-43 */
+       unsigned char rsvd3[4];                 /* Bytes 44-47 */
+       unsigned char rsvd4[16];                /* Bytes 48-63 */
+       unsigned char rsvd5[64];                /* Bytes 64-127 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Event reply structure.
+ */
+struct myrs_event {
+       unsigned int ev_seq;                    /* Bytes 0-3 */
+       unsigned int ev_time;                   /* Bytes 4-7 */
+       unsigned int ev_code;                   /* Bytes 8-11 */
+       unsigned char rsvd1;                    /* Byte 12 */
+       unsigned char channel;                  /* Byte 13 */
+       unsigned char target;                   /* Byte 14 */
+       unsigned char lun;                      /* Byte 15 */
+       unsigned int rsvd2;                     /* Bytes 16-19 */
+       unsigned int ev_parm;                   /* Bytes 20-23 */
+       unsigned char sense_data[40];           /* Bytes 24-63 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Control Bits structure.
+ */
+struct myrs_cmd_ctrl {
+       unsigned char fua:1;                    /* Byte 0 Bit 0 */
+       unsigned char disable_pgout:1;          /* Byte 0 Bit 1 */
+       unsigned char rsvd1:1;                  /* Byte 0 Bit 2 */
+       unsigned char add_sge_mem:1;            /* Byte 0 Bit 3 */
+       unsigned char dma_ctrl_to_host:1;       /* Byte 0 Bit 4 */
+       unsigned char rsvd2:1;                  /* Byte 0 Bit 5 */
+       unsigned char no_autosense:1;           /* Byte 0 Bit 6 */
+       unsigned char disc_prohibited:1;        /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Timeout structure.
+ */
+struct myrs_cmd_tmo {
+       unsigned char tmo_val:6;                        /* Byte 0 Bits 0-5 */
+       enum {
+               MYRS_TMO_SCALE_SECONDS  = 0,
+               MYRS_TMO_SCALE_MINUTES  = 1,
+               MYRS_TMO_SCALE_HOURS    = 2,
+               MYRS_TMO_SCALE_RESERVED = 3
+       } __packed tmo_scale:2;         /* Byte 0 Bits 6-7 */
+};
+
+/*
+ * DAC960 V2 Firmware Physical Device structure.
+ */
+struct myrs_pdev {
+       unsigned char lun;                      /* Byte 0 */
+       unsigned char target;                   /* Byte 1 */
+       unsigned char channel:3;                /* Byte 2 Bits 0-2 */
+       unsigned char ctlr:5;                   /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Logical Device structure.
+ */
+struct myrs_ldev {
+       unsigned short ldev_num;                /* Bytes 0-1 */
+       unsigned char rsvd:3;                   /* Byte 2 Bits 0-2 */
+       unsigned char ctlr:5;                   /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Operation Device type.
+ */
+enum myrs_opdev {
+       MYRS_PHYSICAL_DEVICE    = 0x00,
+       MYRS_RAID_DEVICE        = 0x01,
+       MYRS_PHYSICAL_CHANNEL   = 0x02,
+       MYRS_RAID_CHANNEL       = 0x03,
+       MYRS_PHYSICAL_CONTROLLER = 0x04,
+       MYRS_RAID_CONTROLLER    = 0x05,
+       MYRS_CONFIGURATION_GROUP = 0x10,
+       MYRS_ENCLOSURE          = 0x11,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Translate Physical To Logical Device structure.
+ */
+struct myrs_devmap {
+       unsigned short ldev_num;                /* Bytes 0-1 */
+       unsigned short rsvd;                    /* Bytes 2-3 */
+       unsigned char prev_boot_ctlr;           /* Byte 4 */
+       unsigned char prev_boot_channel;        /* Byte 5 */
+       unsigned char prev_boot_target;         /* Byte 6 */
+       unsigned char prev_boot_lun;            /* Byte 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Scatter/Gather List Entry structure.
+ */
+struct myrs_sge {
+       u64 sge_addr;                   /* Bytes 0-7 */
+       u64 sge_count;                  /* Bytes 8-15 */
+};
+
+/*
+ * DAC960 V2 Firmware Data Transfer Memory Address structure.
+ */
+union myrs_sgl {
+       struct myrs_sge sge[2]; /* Bytes 0-31 */
+       struct {
+               unsigned short sge0_len;        /* Bytes 0-1 */
+               unsigned short sge1_len;        /* Bytes 2-3 */
+               unsigned short sge2_len;        /* Bytes 4-5 */
+               unsigned short rsvd;            /* Bytes 6-7 */
+               u64 sge0_addr;                  /* Bytes 8-15 */
+               u64 sge1_addr;                  /* Bytes 16-23 */
+               u64 sge2_addr;                  /* Bytes 24-31 */
+       } ext;
+};
+
+/*
+ * 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+ */
+union myrs_cmd_mbox {
+       unsigned int words[16];                         /* Words 0-15 */
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               unsigned int rsvd1:24;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned char rsvd2[10];                /* Bytes 22-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } common;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size;                           /* Bytes 4-7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               struct myrs_pdev pdev;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               unsigned char cdb_len;                  /* Byte 21 */
+               unsigned char cdb[10];                  /* Bytes 22-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } SCSI_10;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size;                           /* Bytes 4-7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               struct myrs_pdev pdev;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               unsigned char cdb_len;                  /* Byte 21 */
+               unsigned short rsvd;                    /* Bytes 22-23 */
+               u64 cdb_addr;                           /* Bytes 24-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } SCSI_255;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               unsigned short rsvd1;                   /* Bytes 16-17 */
+               unsigned char ctlr_num;                 /* Byte 18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned char rsvd2[10];                /* Bytes 22-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } ctlr_info;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               struct myrs_ldev ldev;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned char rsvd[10];                 /* Bytes 22-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } ldev_info;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               struct myrs_pdev pdev;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned char rsvd[10];                 /* Bytes 22-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } pdev_info;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               unsigned short evnum_upper;             /* Bytes 16-17 */
+               unsigned char ctlr_num;                 /* Byte 18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned short evnum_lower;             /* Bytes 22-23 */
+               unsigned char rsvd[8];                  /* Bytes 24-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } get_event;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               union {
+                       struct myrs_ldev ldev;          /* Bytes 16-18 */
+                       struct myrs_pdev pdev;          /* Bytes 16-18 */
+               };
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               enum myrs_devstate state;               /* Byte 22 */
+               unsigned char rsvd[9];                  /* Bytes 23-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } set_devstate;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               struct myrs_ldev ldev;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned char restore_consistency:1;    /* Byte 22 Bit 0 */
+               unsigned char initialized_area_only:1;  /* Byte 22 Bit 1 */
+               unsigned char rsvd1:6;                  /* Byte 22 Bits 2-7 */
+               unsigned char rsvd2[9];                 /* Bytes 23-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } cc;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               unsigned char first_cmd_mbox_size_kb;   /* Byte 4 */
+               unsigned char first_stat_mbox_size_kb;  /* Byte 5 */
+               unsigned char second_cmd_mbox_size_kb;  /* Byte 6 */
+               unsigned char second_stat_mbox_size_kb; /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               unsigned int rsvd1:24;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               unsigned char fwstat_buf_size_kb;       /* Byte 22 */
+               unsigned char rsvd2;                    /* Byte 23 */
+               u64 fwstat_buf_addr;                    /* Bytes 24-31 */
+               u64 first_cmd_mbox_addr;                /* Bytes 32-39 */
+               u64 first_stat_mbox_addr;               /* Bytes 40-47 */
+               u64 second_cmd_mbox_addr;               /* Bytes 48-55 */
+               u64 second_stat_mbox_addr;              /* Bytes 56-63 */
+       } set_mbox;
+       struct {
+               unsigned short id;                      /* Bytes 0-1 */
+               enum myrs_cmd_opcode opcode;            /* Byte 2 */
+               struct myrs_cmd_ctrl control;           /* Byte 3 */
+               u32 dma_size:24;                        /* Bytes 4-6 */
+               unsigned char dma_num;                  /* Byte 7 */
+               u64 sense_addr;                         /* Bytes 8-15 */
+               struct myrs_pdev pdev;                  /* Bytes 16-18 */
+               struct myrs_cmd_tmo tmo;                /* Byte 19 */
+               unsigned char sense_len;                /* Byte 20 */
+               enum myrs_ioctl_opcode ioctl_opcode;    /* Byte 21 */
+               enum myrs_opdev opdev;                  /* Byte 22 */
+               unsigned char rsvd[9];                  /* Bytes 23-31 */
+               union myrs_sgl dma_addr;                /* Bytes 32-63 */
+       } dev_op;
+};
+
+/*
+ * DAC960 V2 Firmware Controller Status Mailbox structure.
+ */
+struct myrs_stat_mbox {
+       unsigned short id;              /* Bytes 0-1 */
+       unsigned char status;           /* Byte 2 */
+       unsigned char sense_len;        /* Byte 3 */
+       int residual;                   /* Bytes 4-7 */
+};
+
+struct myrs_cmdblk {
+       union myrs_cmd_mbox mbox;
+       unsigned char status;
+       unsigned char sense_len;
+       int residual;
+       struct completion *complete;
+       struct myrs_sge *sgl;
+       dma_addr_t sgl_addr;
+       unsigned char *dcdb;
+       dma_addr_t dcdb_dma;
+       unsigned char *sense;
+       dma_addr_t sense_addr;
+};
+
+/*
+ * DAC960 Driver Controller structure.
+ */
+struct myrs_hba {
+       void __iomem *io_base;
+       void __iomem *mmio_base;
+       phys_addr_t io_addr;
+       phys_addr_t pci_addr;
+       unsigned int irq;
+
+       unsigned char model_name[28];
+       unsigned char fw_version[12];
+
+       struct Scsi_Host *host;
+       struct pci_dev *pdev;
+
+       unsigned int epoch;
+       unsigned int next_evseq;
+       /* Monitor flags */
+       bool needs_update;
+       bool disable_enc_msg;
+
+       struct workqueue_struct *work_q;
+       char work_q_name[20];
+       struct delayed_work monitor_work;
+       unsigned long primary_monitor_time;
+       unsigned long secondary_monitor_time;
+
+       spinlock_t queue_lock;
+
+       struct dma_pool *sg_pool;
+       struct dma_pool *sense_pool;
+       struct dma_pool *dcdb_pool;
+
+       void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox,
+                              union myrs_cmd_mbox *cmd_mbox);
+       void (*get_cmd_mbox)(void __iomem *base);
+       void (*disable_intr)(void __iomem *base);
+       void (*reset)(void __iomem *base);
+
+       dma_addr_t cmd_mbox_addr;
+       size_t cmd_mbox_size;
+       union myrs_cmd_mbox *first_cmd_mbox;
+       union myrs_cmd_mbox *last_cmd_mbox;
+       union myrs_cmd_mbox *next_cmd_mbox;
+       union myrs_cmd_mbox *prev_cmd_mbox1;
+       union myrs_cmd_mbox *prev_cmd_mbox2;
+
+       dma_addr_t stat_mbox_addr;
+       size_t stat_mbox_size;
+       struct myrs_stat_mbox *first_stat_mbox;
+       struct myrs_stat_mbox *last_stat_mbox;
+       struct myrs_stat_mbox *next_stat_mbox;
+
+       struct myrs_cmdblk dcmd_blk;
+       struct myrs_cmdblk mcmd_blk;
+       struct mutex dcmd_mutex;
+
+       struct myrs_fwstat *fwstat_buf;
+       dma_addr_t fwstat_addr;
+
+       struct myrs_ctlr_info *ctlr_info;
+       struct mutex cinfo_mutex;
+
+       struct myrs_event *event_buf;
+};
+
+typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr);
+typedef int (*myrs_hwinit_t)(struct pci_dev *pdev,
+                            struct myrs_hba *c, void __iomem *base);
+
+struct myrs_privdata {
+       myrs_hwinit_t           hw_init;
+       irq_handler_t           irq_handler;
+       unsigned int            mmio_size;
+};
+
+/*
+ * DAC960 GEM Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_GEM_mmio_size   0x600
+
+enum DAC960_GEM_reg_offset {
+       DAC960_GEM_IDB_READ_OFFSET      = 0x214,
+       DAC960_GEM_IDB_CLEAR_OFFSET     = 0x218,
+       DAC960_GEM_ODB_READ_OFFSET      = 0x224,
+       DAC960_GEM_ODB_CLEAR_OFFSET     = 0x228,
+       DAC960_GEM_IRQSTS_OFFSET        = 0x208,
+       DAC960_GEM_IRQMASK_READ_OFFSET  = 0x22C,
+       DAC960_GEM_IRQMASK_CLEAR_OFFSET = 0x230,
+       DAC960_GEM_CMDMBX_OFFSET        = 0x510,
+       DAC960_GEM_CMDSTS_OFFSET        = 0x518,
+       DAC960_GEM_ERRSTS_READ_OFFSET   = 0x224,
+       DAC960_GEM_ERRSTS_CLEAR_OFFSET  = 0x228,
+};
+
+/*
+ * DAC960 GEM Series Inbound Door Bell Register.
+ */
+#define DAC960_GEM_IDB_HWMBOX_NEW_CMD  0x01
+#define DAC960_GEM_IDB_HWMBOX_ACK_STS  0x02
+#define DAC960_GEM_IDB_GEN_IRQ         0x04
+#define DAC960_GEM_IDB_CTRL_RESET      0x08
+#define DAC960_GEM_IDB_MMBOX_NEW_CMD   0x10
+
+#define DAC960_GEM_IDB_HWMBOX_FULL     0x01
+#define DAC960_GEM_IDB_INIT_IN_PROGRESS        0x02
+
+/*
+ * DAC960 GEM Series Outbound Door Bell Register.
+ */
+#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ  0x01
+#define DAC960_GEM_ODB_MMBOX_ACK_IRQ   0x02
+#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_GEM_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 GEM Series Interrupt Mask Register.
+ */
+#define DAC960_GEM_IRQMASK_HWMBOX_IRQ  0x01
+#define DAC960_GEM_IRQMASK_MMBOX_IRQ   0x02
+
+/*
+ * DAC960 GEM Series Error Status Register.
+ */
+#define DAC960_GEM_ERRSTS_PENDING      0x20
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register.  The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM,  dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+       union {
+               u64 wq;
+               uint wl[2];
+       } u;
+
+       u.wq = addr;
+
+       writel(u.wl[0], write_address);
+       writel(u.wl[1], write_address + 4);
+}
+
+/*
+ * DAC960 BA Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_BA_mmio_size            0x80
+
+enum DAC960_BA_reg_offset {
+       DAC960_BA_IRQSTS_OFFSET = 0x30,
+       DAC960_BA_IRQMASK_OFFSET = 0x34,
+       DAC960_BA_CMDMBX_OFFSET = 0x50,
+       DAC960_BA_CMDSTS_OFFSET = 0x58,
+       DAC960_BA_IDB_OFFSET    = 0x60,
+       DAC960_BA_ODB_OFFSET    = 0x61,
+       DAC960_BA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 BA Series Inbound Door Bell Register.
+ */
+#define DAC960_BA_IDB_HWMBOX_NEW_CMD   0x01
+#define DAC960_BA_IDB_HWMBOX_ACK_STS   0x02
+#define DAC960_BA_IDB_GEN_IRQ          0x04
+#define DAC960_BA_IDB_CTRL_RESET       0x08
+#define DAC960_BA_IDB_MMBOX_NEW_CMD    0x10
+
+#define DAC960_BA_IDB_HWMBOX_EMPTY     0x01
+#define DAC960_BA_IDB_INIT_DONE                0x02
+
+/*
+ * DAC960 BA Series Outbound Door Bell Register.
+ */
+#define DAC960_BA_ODB_HWMBOX_ACK_IRQ   0x01
+#define DAC960_BA_ODB_MMBOX_ACK_IRQ    0x02
+
+#define DAC960_BA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_BA_ODB_MMBOX_STS_AVAIL  0x02
+
+/*
+ * DAC960 BA Series Interrupt Mask Register.
+ */
+#define DAC960_BA_IRQMASK_DISABLE_IRQ  0x04
+#define DAC960_BA_IRQMASK_DISABLEW_I2O 0x08
+
+/*
+ * DAC960 BA Series Error Status Register.
+ */
+#define DAC960_BA_ERRSTS_PENDING       0x04
+
+/*
+ * DAC960 LP Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_LP_mmio_size            0x80
+
+enum DAC960_LP_reg_offset {
+       DAC960_LP_CMDMBX_OFFSET = 0x10,
+       DAC960_LP_CMDSTS_OFFSET = 0x18,
+       DAC960_LP_IDB_OFFSET    = 0x20,
+       DAC960_LP_ODB_OFFSET    = 0x2C,
+       DAC960_LP_ERRSTS_OFFSET = 0x2E,
+       DAC960_LP_IRQSTS_OFFSET = 0x30,
+       DAC960_LP_IRQMASK_OFFSET = 0x34,
+};
+
+/*
+ * DAC960 LP Series Inbound Door Bell Register.
+ */
+#define DAC960_LP_IDB_HWMBOX_NEW_CMD   0x01
+#define DAC960_LP_IDB_HWMBOX_ACK_STS   0x02
+#define DAC960_LP_IDB_GEN_IRQ          0x04
+#define DAC960_LP_IDB_CTRL_RESET       0x08
+#define DAC960_LP_IDB_MMBOX_NEW_CMD    0x10
+
+#define DAC960_LP_IDB_HWMBOX_FULL      0x01
+#define DAC960_LP_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 LP Series Outbound Door Bell Register.
+ */
+#define DAC960_LP_ODB_HWMBOX_ACK_IRQ   0x01
+#define DAC960_LP_ODB_MMBOX_ACK_IRQ    0x02
+
+#define DAC960_LP_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LP_ODB_MMBOX_STS_AVAIL  0x02
+
+/*
+ * DAC960 LP Series Interrupt Mask Register.
+ */
+#define DAC960_LP_IRQMASK_DISABLE_IRQ  0x04
+
+/*
+ * DAC960 LP Series Error Status Register.
+ */
+#define DAC960_LP_ERRSTS_PENDING       0x04
+
+#endif /* _MYRS_H */
index 8620ac5..5aac3e8 100644 (file)
@@ -2638,7 +2638,7 @@ static int nsp32_detect(struct pci_dev *pdev)
        /*
         * setup DMA 
         */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
                nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
                goto scsi_unregister;
        }
@@ -2646,7 +2646,9 @@ static int nsp32_detect(struct pci_dev *pdev)
        /*
         * allocate autoparam DMA resource.
         */
-       data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
+       data->autoparam = dma_alloc_coherent(&pdev->dev,
+                       sizeof(nsp32_autoparam), &(data->auto_paddr),
+                       GFP_KERNEL);
        if (data->autoparam == NULL) {
                nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
                goto scsi_unregister;
@@ -2655,8 +2657,8 @@ static int nsp32_detect(struct pci_dev *pdev)
        /*
         * allocate scatter-gather DMA resource.
         */
-       data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
-                                            &(data->sg_paddr));
+       data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
+                       &data->sg_paddr, GFP_KERNEL);
        if (data->sg_list == NULL) {
                nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
                goto free_autoparam;
@@ -2761,11 +2763,11 @@ static int nsp32_detect(struct pci_dev *pdev)
        free_irq(host->irq, data);
 
  free_sg_list:
-       pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
+       dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
                            data->sg_list, data->sg_paddr);
 
  free_autoparam:
-       pci_free_consistent(pdev, sizeof(nsp32_autoparam),
+       dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
                            data->autoparam, data->auto_paddr);
        
  scsi_unregister:
@@ -2780,12 +2782,12 @@ static int nsp32_release(struct Scsi_Host *host)
        nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
 
        if (data->autoparam) {
-               pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
+               dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam),
                                    data->autoparam, data->auto_paddr);
        }
 
        if (data->sg_list) {
-               pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
+               dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE,
                                    data->sg_list, data->sg_paddr);
        }
 
index 67b1457..e19fa88 100644 (file)
@@ -445,7 +445,7 @@ static void _put_request(struct request *rq)
         *       code paths.
         */
        if (unlikely(rq->bio))
-               blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
+               blk_mq_end_request(rq, BLK_STS_IOERR);
        else
                blk_put_request(rq);
 }
index 199527d..48e0624 100644 (file)
@@ -132,4 +132,12 @@ enum pm8001_hba_info_flags {
        PM8001F_RUN_TIME        = (1U << 1),
 };
 
+/**
+ * Phy Status
+ */
+#define PHY_LINK_DISABLE       0x00
+#define PHY_LINK_DOWN          0x01
+#define PHY_STATE_LINK_UP_SPCV 0x2
+#define PHY_STATE_LINK_UP_SPC  0x1
+
 #endif
index 4dd6cad..d0bb357 100644 (file)
@@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
                } else {
                        u32 producer_index;
                        void *pi_virt = circularQ->pi_virt;
+                       /* spurious interrupt during setup if
+                        * kexec-ing and driver doing a doorbell access
+                        * with the pre-kexec oq interrupt setup
+                        */
+                       if (!pi_virt)
+                               break;
                        /* Update the producer index from SPC */
                        producer_index = pm8001_read_32(pi_virt);
                        circularQ->producer_index = cpu_to_le32(producer_index);
@@ -2414,7 +2420,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                        sata_resp = &psataPayload->sata_resp[0];
                        resp = (struct ata_task_resp *)ts->buf;
                        if (t->ata_task.dma_xfer == 0 &&
-                       t->data_dir == PCI_DMA_FROMDEVICE) {
+                           t->data_dir == DMA_FROM_DEVICE) {
                                len = sizeof(struct pio_setup_fis);
                                PM8001_IO_DBG(pm8001_ha,
                                pm8001_printk("PIO read len = %d\n", len));
@@ -3810,7 +3816,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
                        " status = %x\n", status));
                if (status == 0) {
                        phy->phy_state = 1;
-                       if (pm8001_ha->flags == PM8001F_RUN_TIME)
+                       if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+                                       phy->enable_completion != NULL)
                                complete(phy->enable_completion);
                }
                break;
@@ -4196,12 +4203,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
        return ret;
 }
 
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
 static const u8 data_dir_flags[] = {
-       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
-       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT,/* OUTBOUND */
-       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
-       [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
+       [DMA_BIDIRECTIONAL]     = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+       [DMA_TO_DEVICE]         = DATA_DIR_OUT,         /* OUTBOUND */
+       [DMA_FROM_DEVICE]       = DATA_DIR_IN,          /* INBOUND */
+       [DMA_NONE]              = DATA_DIR_NONE,        /* NO TRANSFER */
 };
 void
 pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
@@ -4248,13 +4255,13 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
         * DMA-map SMP request, response buffers
         */
        sg_req = &task->smp_task.smp_req;
-       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
        if (!elem)
                return -ENOMEM;
        req_len = sg_dma_len(sg_req);
 
        sg_resp = &task->smp_task.smp_resp;
-       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
        if (!elem) {
                rc = -ENOMEM;
                goto err_out;
@@ -4287,10 +4294,10 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
 
 err_out_2:
        dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
-                       PCI_DMA_FROMDEVICE);
+                       DMA_FROM_DEVICE);
 err_out:
        dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
-                       PCI_DMA_TODEVICE);
+                       DMA_TO_DEVICE);
        return rc;
 }
 
@@ -4369,7 +4376,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        u32  opc = OPC_INB_SATA_HOST_OPSTART;
        memset(&sata_cmd, 0, sizeof(sata_cmd));
        circularQ = &pm8001_ha->inbnd_q_tbl[0];
-       if (task->data_dir == PCI_DMA_NONE) {
+       if (task->data_dir == DMA_NONE) {
                ATAP = 0x04;  /* no data*/
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
        } else if (likely(!task->ata_task.device_control_reg_update)) {
index e4867e6..6d91e24 100644 (file)
 #define LINKRATE_30                    (0x02 << 8)
 #define LINKRATE_60                    (0x04 << 8)
 
-/* for phy state */
-
-#define PHY_STATE_LINK_UP_SPC          0x1
-
 /* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
 #define GSM_SM_BASE                    0x4F0000
 struct mpi_msg_hdr{
index 7a697ca..d71e7e4 100644 (file)
@@ -121,7 +121,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
 {
        struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
        struct asd_sas_phy *sas_phy = &phy->sas_phy;
-       phy->phy_state = 0;
+       phy->phy_state = PHY_LINK_DISABLE;
        phy->pm8001_ha = pm8001_ha;
        sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
        sas_phy->class = SAS;
@@ -152,7 +152,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
 
        for (i = 0; i < USI_MAX_MEMCNT; i++) {
                if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
-                       pci_free_consistent(pm8001_ha->pdev,
+                       dma_free_coherent(&pm8001_ha->pdev->dev,
                                (pm8001_ha->memoryMap.region[i].total_len +
                                pm8001_ha->memoryMap.region[i].alignment),
                                pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -501,30 +501,12 @@ static int pci_go_44(struct pci_dev *pdev)
 {
        int rc;
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
-               if (rc) {
-                       rc = pci_set_consistent_dma_mask(pdev,
-                               DMA_BIT_MASK(32));
-                       if (rc) {
-                               dev_printk(KERN_ERR, &pdev->dev,
-                                       "44-bit DMA enable failed\n");
-                               return rc;
-                       }
-               }
-       } else {
-               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+       if (rc) {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (rc)
                        dev_printk(KERN_ERR, &pdev->dev,
                                "32-bit DMA enable failed\n");
-                       return rc;
-               }
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_printk(KERN_ERR, &pdev->dev,
-                               "32-bit consistent DMA enable failed\n");
-                       return rc;
-               }
        }
        return rc;
 }
@@ -1067,6 +1049,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
        if (rc)
                goto err_out_shost;
        scsi_scan_host(pm8001_ha->shost);
+       pm8001_ha->flags = PM8001F_RUN_TIME;
        return 0;
 
 err_out_shost:
index 947d601..b3be49d 100644 (file)
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
        u64 align_offset = 0;
        if (align)
                align_offset = (dma_addr_t)align - 1;
-       mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
-                                              &mem_dma_handle);
+       mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
+                       &mem_dma_handle, GFP_KERNEL);
        if (!mem_virt_alloc) {
                pm8001_printk("memory allocation error\n");
                return -1;
@@ -157,9 +157,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
        int rc = 0, phy_id = sas_phy->id;
        struct pm8001_hba_info *pm8001_ha = NULL;
        struct sas_phy_linkrates *rates;
+       struct sas_ha_struct *sas_ha;
+       struct pm8001_phy *phy;
        DECLARE_COMPLETION_ONSTACK(completion);
        unsigned long flags;
        pm8001_ha = sas_phy->ha->lldd_ha;
+       phy = &pm8001_ha->phy[phy_id];
        pm8001_ha->phy[phy_id].enable_completion = &completion;
        switch (func) {
        case PHY_FUNC_SET_LINK_RATE:
@@ -172,7 +175,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                        pm8001_ha->phy[phy_id].maximum_linkrate =
                                rates->maximum_linkrate;
                }
-               if (pm8001_ha->phy[phy_id].phy_state == 0) {
+               if (pm8001_ha->phy[phy_id].phy_state ==  PHY_LINK_DISABLE) {
                        PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
                        wait_for_completion(&completion);
                }
@@ -180,7 +183,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                                              PHY_LINK_RESET);
                break;
        case PHY_FUNC_HARD_RESET:
-               if (pm8001_ha->phy[phy_id].phy_state == 0) {
+               if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
                        PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
                        wait_for_completion(&completion);
                }
@@ -188,7 +191,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                                              PHY_HARD_RESET);
                break;
        case PHY_FUNC_LINK_RESET:
-               if (pm8001_ha->phy[phy_id].phy_state == 0) {
+               if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
                        PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
                        wait_for_completion(&completion);
                }
@@ -200,6 +203,25 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
                                              PHY_LINK_RESET);
                break;
        case PHY_FUNC_DISABLE:
+               if (pm8001_ha->chip_id != chip_8001) {
+                       if (pm8001_ha->phy[phy_id].phy_state ==
+                               PHY_STATE_LINK_UP_SPCV) {
+                               sas_ha = pm8001_ha->sas;
+                               sas_phy_disconnected(&phy->sas_phy);
+                               sas_ha->notify_phy_event(&phy->sas_phy,
+                                       PHYE_LOSS_OF_SIGNAL);
+                               phy->phy_attached = 0;
+                       }
+               } else {
+                       if (pm8001_ha->phy[phy_id].phy_state ==
+                               PHY_STATE_LINK_UP_SPC) {
+                               sas_ha = pm8001_ha->sas;
+                               sas_phy_disconnected(&phy->sas_phy);
+                               sas_ha->notify_phy_event(&phy->sas_phy,
+                                       PHYE_LOSS_OF_SIGNAL);
+                               phy->phy_attached = 0;
+                       }
+               }
                PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
                break;
        case PHY_FUNC_GET_EVENTS:
@@ -374,6 +396,13 @@ static int pm8001_task_exec(struct sas_task *task,
                return 0;
        }
        pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+       if (pm8001_ha->controller_fatal_error) {
+               struct task_status_struct *ts = &t->task_status;
+
+               ts->resp = SAS_TASK_UNDELIVERED;
+               t->task_done(t);
+               return 0;
+       }
        PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
        spin_lock_irqsave(&pm8001_ha->lock, flags);
        do {
@@ -466,7 +495,7 @@ err_out:
        dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
        if (!sas_protocol_ata(t->task_proto))
                if (n_elem)
-                       dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+                       dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
                                t->data_dir);
 out_done:
        spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -504,9 +533,9 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
        switch (task->task_proto) {
        case SAS_PROTOCOL_SMP:
                dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
-                       PCI_DMA_FROMDEVICE);
+                       DMA_FROM_DEVICE);
                dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
-                       PCI_DMA_TODEVICE);
+                       DMA_TO_DEVICE);
                break;
 
        case SAS_PROTOCOL_SATA:
@@ -1020,13 +1049,11 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
        struct pm8001_device *pm8001_dev;
        struct pm8001_hba_info *pm8001_ha;
        struct sas_phy *phy;
-       u32 device_id = 0;
 
        if (!dev || !dev->lldd_dev)
                return -1;
 
        pm8001_dev = dev->lldd_dev;
-       device_id = pm8001_dev->device_id;
        pm8001_ha = pm8001_find_ha_by_dev(dev);
 
        PM8001_EH_DBG(pm8001_ha,
@@ -1159,7 +1186,6 @@ int pm8001_abort_task(struct sas_task *task)
 {
        unsigned long flags;
        u32 tag;
-       u32 device_id;
        struct domain_device *dev ;
        struct pm8001_hba_info *pm8001_ha;
        struct scsi_lun lun;
@@ -1173,7 +1199,6 @@ int pm8001_abort_task(struct sas_task *task)
        dev = task->dev;
        pm8001_dev = dev->lldd_dev;
        pm8001_ha = pm8001_find_ha_by_dev(dev);
-       device_id = pm8001_dev->device_id;
        phy_id = pm8001_dev->attached_phy;
        rc = pm8001_find_tag(task, &tag);
        if (rc == 0) {
index 80b4dd6..f88b0d3 100644 (file)
@@ -58,7 +58,7 @@
 #include "pm8001_defs.h"
 
 #define DRV_NAME               "pm80xx"
-#define DRV_VERSION            "0.1.38"
+#define DRV_VERSION            "0.1.39"
 #define PM8001_FAIL_LOGGING    0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING    0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING    0x04 /* discovery layer logging */
@@ -538,6 +538,7 @@ struct pm8001_hba_info {
        u32                     logging_level;
        u32                     fw_status;
        u32                     smp_exp_mode;
+       bool                    controller_fatal_error;
        const struct firmware   *fw_image;
        struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
        u32                     reset_in_progress;
index 42f0405..63e4f7d 100644 (file)
@@ -577,6 +577,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
        pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
                pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+       /* Update Fatal error interrupt vector */
+       pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+                                       ((pm8001_ha->number_of_intr - 1) << 8);
        pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
                pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
        pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -1110,6 +1113,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
                return -EBUSY;
        }
 
+       /* Initialize the controller fatal error flag */
+       pm8001_ha->controller_fatal_error = false;
+
        /* Initialize pci space address eg: mpi offset */
        init_pci_device_addresses(pm8001_ha);
        init_default_table_values(pm8001_ha);
@@ -1218,13 +1224,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
        u32 bootloader_state;
        u32 ibutton0, ibutton1;
 
-       /* Check if MPI is in ready state to reset */
-       if (mpi_uninit_check(pm8001_ha) != 0) {
-               PM8001_FAIL_DBG(pm8001_ha,
-                       pm8001_printk("MPI state is not ready\n"));
-               return -1;
+       /* Process MPI table uninitialization only if FW is ready */
+       if (!pm8001_ha->controller_fatal_error) {
+               /* Check if MPI is in ready state to reset */
+               if (mpi_uninit_check(pm8001_ha) != 0) {
+                       regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+                       PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                               "MPI state is not ready scratch1 :0x%x\n",
+                               regval));
+                       return -1;
+               }
        }
-
        /* checked for reset register normal state; 0x0 */
        regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
        PM8001_INIT_DBG(pm8001_ha,
@@ -2123,7 +2133,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
                        sata_resp = &psataPayload->sata_resp[0];
                        resp = (struct ata_task_resp *)ts->buf;
                        if (t->ata_task.dma_xfer == 0 &&
-                       t->data_dir == PCI_DMA_FROMDEVICE) {
+                           t->data_dir == DMA_FROM_DEVICE) {
                                len = sizeof(struct pio_setup_fis);
                                PM8001_IO_DBG(pm8001_ha,
                                pm8001_printk("PIO read len = %d\n", len));
@@ -3118,8 +3128,9 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
                pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
                                status, phy_id));
        if (status == 0) {
-               phy->phy_state = 1;
-               if (pm8001_ha->flags == PM8001F_RUN_TIME)
+               phy->phy_state = PHY_LINK_DOWN;
+               if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+                               phy->enable_completion != NULL)
                        complete(phy->enable_completion);
        }
        return 0;
@@ -3211,7 +3222,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
                        return 0;
                }
                phy->phy_attached = 0;
-               phy->phy_state = 0;
+               phy->phy_state = PHY_LINK_DISABLE;
                break;
        case HW_EVENT_PORT_INVALID:
                PM8001_MSG_DBG(pm8001_ha,
@@ -3384,13 +3395,14 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
        u32 status =
                le32_to_cpu(pPayload->status);
        u32 phyid =
-               le32_to_cpu(pPayload->phyid);
+               le32_to_cpu(pPayload->phyid) & 0xFF;
        struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
        PM8001_MSG_DBG(pm8001_ha,
                        pm8001_printk("phy:0x%x status:0x%x\n",
                                        phyid, status));
-       if (status == 0)
-               phy->phy_state = 0;
+       if (status == PHY_STOP_SUCCESS ||
+               status == PHY_STOP_ERR_DEVICE_ATTACHED)
+               phy->phy_state = PHY_LINK_DISABLE;
        return 0;
 }
 
@@ -3752,6 +3764,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
        }
 }
 
+static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
+{
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
+       PM8001_FAIL_DBG(pm8001_ha,
+               pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+                       pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+}
+
 static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
        struct outbound_queue_table *circularQ;
@@ -3759,10 +3811,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
        u8 uninitialized_var(bc);
        u32 ret = MPI_IO_STATUS_FAIL;
        unsigned long flags;
+       u32 regval;
 
+       if (vec == (pm8001_ha->number_of_intr - 1)) {
+               regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+               if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
+                                       SCRATCH_PAD_MIPSALL_READY) {
+                       pm8001_ha->controller_fatal_error = true;
+                       PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+                               "Firmware Fatal error! Regval:0x%x\n", regval));
+                       print_scratchpad_registers(pm8001_ha);
+                       return ret;
+               }
+       }
        spin_lock_irqsave(&pm8001_ha->lock, flags);
        circularQ = &pm8001_ha->outbnd_q_tbl[vec];
        do {
+               /* spurious interrupt during setup if kexec-ing and
+                * driver doing a doorbell access w/ the pre-kexec oq
+                * interrupt setup.
+                */
+               if (!circularQ->pi_virt)
+                       break;
                ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
                if (MPI_IO_STATUS_SUCCESS == ret) {
                        /* process the outbound message */
@@ -3785,12 +3855,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
        return ret;
 }
 
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
 static const u8 data_dir_flags[] = {
-       [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
-       [PCI_DMA_TODEVICE]      = DATA_DIR_OUT,/* OUTBOUND */
-       [PCI_DMA_FROMDEVICE]    = DATA_DIR_IN,/* INBOUND */
-       [PCI_DMA_NONE]          = DATA_DIR_NONE,/* NO TRANSFER */
+       [DMA_BIDIRECTIONAL]     = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+       [DMA_TO_DEVICE]         = DATA_DIR_OUT,         /* OUTBOUND */
+       [DMA_FROM_DEVICE]       = DATA_DIR_IN,          /* INBOUND */
+       [DMA_NONE]              = DATA_DIR_NONE,        /* NO TRANSFER */
 };
 
 static void build_smp_cmd(u32 deviceID, __le32 hTag,
@@ -3832,13 +3902,13 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
         * DMA-map SMP request, response buffers
         */
        sg_req = &task->smp_task.smp_req;
-       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
        if (!elem)
                return -ENOMEM;
        req_len = sg_dma_len(sg_req);
 
        sg_resp = &task->smp_task.smp_resp;
-       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
        if (!elem) {
                rc = -ENOMEM;
                goto err_out;
@@ -3929,10 +3999,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
 
 err_out_2:
        dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
-                       PCI_DMA_FROMDEVICE);
+                       DMA_FROM_DEVICE);
 err_out:
        dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
-                       PCI_DMA_TODEVICE);
+                       DMA_TO_DEVICE);
        return rc;
 }
 
@@ -4156,7 +4226,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
        q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
        circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
 
-       if (task->data_dir == PCI_DMA_NONE) {
+       if (task->data_dir == DMA_NONE) {
                ATAP = 0x04; /* no data*/
                PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
        } else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4606,9 +4676,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
 void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
        u32 length, u8 *buf)
 {
-       u32 page_code, i;
+       u32 i;
 
-       page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
        for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
                mpi_set_phy_profile_req(pm8001_ha,
                        SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
index 889e69c..84d7426 100644 (file)
 #define LINKRATE_60                    (0x04 << 8)
 #define LINKRATE_120                   (0x08 << 8)
 
+/*phy_stop*/
+#define PHY_STOP_SUCCESS               0x00
+#define PHY_STOP_ERR_DEVICE_ATTACHED   0x1046
+
 /* phy_profile */
 #define SAS_PHY_ANALOG_SETTINGS_PAGE   0x04
 #define PHY_DWORD_LENGTH               0xC
 #define SAS_DOPNRJT_RTRY_TMO            128
 #define SAS_COPNRJT_RTRY_TMO            128
 
-/* for phy state */
-#define PHY_STATE_LINK_UP_SPCV         0x2
 /*
   Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
   Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
@@ -1384,6 +1386,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
 #define SCRATCH_PAD_BOOT_LOAD_SUCCESS  0x0
 #define SCRATCH_PAD_IOP0_READY         0xC00
 #define SCRATCH_PAD_IOP1_READY         0x3000
+#define SCRATCH_PAD_MIPSALL_READY      (SCRATCH_PAD_IOP1_READY | \
+                                       SCRATCH_PAD_IOP0_READY | \
+                                       SCRATCH_PAD_RAAE_READY)
 
 /* boot loader state */
 #define SCRATCH_PAD1_BOOTSTATE_MASK            0x70    /* Bit 4-6 */
index 0a5dd55..d5a4f17 100644 (file)
@@ -2855,12 +2855,12 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
                   qedf->num_queues);
 
-       qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+       qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
            qedf->num_queues * sizeof(struct qedf_glbl_q_params),
-           &qedf->hw_p_cpuq);
+           &qedf->hw_p_cpuq, GFP_KERNEL);
 
        if (!qedf->p_cpuq) {
-               QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+               QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
                return 1;
        }
 
@@ -2929,7 +2929,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
 
        if (qedf->p_cpuq) {
                size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
-               pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+               dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
                    qedf->hw_p_cpuq);
        }
 
index e5bd035..105b0e4 100644 (file)
@@ -806,11 +806,11 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
        memset(&qedi->pf_params.iscsi_pf_params, 0,
               sizeof(qedi->pf_params.iscsi_pf_params));
 
-       qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+       qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
                        qedi->num_queues * sizeof(struct qedi_glbl_q_params),
-                       &qedi->hw_p_cpuq);
+                       &qedi->hw_p_cpuq, GFP_KERNEL);
        if (!qedi->p_cpuq) {
-               QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+               QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
                rval = -1;
                goto err_alloc_mem;
        }
@@ -871,7 +871,7 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
 
        if (qedi->p_cpuq) {
                size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
-               pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+               dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
                                    qedi->hw_p_cpuq);
        }
 
index 390775d..15a50cc 100644 (file)
@@ -1750,7 +1750,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
        uint8_t *sp, *tbuf;
        dma_addr_t p_tbuf;
 
-       tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+       tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
        if (!tbuf)
                return -ENOMEM;
 #endif
@@ -1841,7 +1841,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
 
  out:
 #if DUMP_IT_BACK
-       pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
+       dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
 #endif
        return err;
 }
@@ -4259,8 +4259,8 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        ha->devnum = devnum;    /* specifies microcode load address */
 
 #ifdef QLA_64BIT_PTR
-       if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
-               if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+               if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
                        printk(KERN_WARNING "scsi(%li): Unable to set a "
                               "suitable DMA mask - aborting\n", ha->host_no);
                        error = -ENODEV;
@@ -4270,7 +4270,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
                        ha->host_no);
 #else
-       if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
                printk(KERN_WARNING "scsi(%li): Unable to set a "
                       "suitable DMA mask - aborting\n", ha->host_no);
                error = -ENODEV;
@@ -4278,17 +4278,17 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 #endif
 
-       ha->request_ring = pci_alloc_consistent(ha->pdev,
+       ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
                        ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
-                       &ha->request_dma);
+                       &ha->request_dma, GFP_KERNEL);
        if (!ha->request_ring) {
                printk(KERN_INFO "qla1280: Failed to get request memory\n");
                goto error_put_host;
        }
 
-       ha->response_ring = pci_alloc_consistent(ha->pdev,
+       ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
                        ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
-                       &ha->response_dma);
+                       &ha->response_dma, GFP_KERNEL);
        if (!ha->response_ring) {
                printk(KERN_INFO "qla1280: Failed to get response memory\n");
                goto error_free_request_ring;
@@ -4370,11 +4370,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        release_region(host->io_port, 0xff);
 #endif
  error_free_response_ring:
-       pci_free_consistent(ha->pdev,
+       dma_free_coherent(&ha->pdev->dev,
                        ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
                        ha->response_ring, ha->response_dma);
  error_free_request_ring:
-       pci_free_consistent(ha->pdev,
+       dma_free_coherent(&ha->pdev->dev,
                        ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
                        ha->request_ring, ha->request_dma);
  error_put_host:
@@ -4404,10 +4404,10 @@ qla1280_remove_one(struct pci_dev *pdev)
        release_region(host->io_port, 0xff);
 #endif
 
-       pci_free_consistent(ha->pdev,
+       dma_free_coherent(&ha->pdev->dev,
                        ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
                        ha->request_ring, ha->request_dma);
-       pci_free_consistent(ha->pdev,
+       dma_free_coherent(&ha->pdev->dev,
                        ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
                        ha->response_ring, ha->response_dma);
 
index 4888b99..b28f159 100644 (file)
@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return 0;
 
+       mutex_lock(&ha->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&ha->optrom_mutex);
+               return -EAGAIN;
+       }
+
        if (IS_NOCACHE_VPD_TYPE(ha))
                ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
                    ha->nvram_size);
+       mutex_unlock(&ha->optrom_mutex);
+
        return memory_read_from_buffer(buf, count, &off, ha->nvram,
                                        ha->nvram_size);
 }
@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
                return -EAGAIN;
        }
 
+       mutex_lock(&ha->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
+               return -EAGAIN;
+       }
+
        /* Write NVRAM. */
        ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
        ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
-           count);
+            count);
+       mutex_unlock(&ha->optrom_mutex);
 
        ql_dbg(ql_dbg_user, vha, 0x7060,
            "Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                size = ha->optrom_size - start;
 
        mutex_lock(&ha->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&ha->optrom_mutex);
+               return -EAGAIN;
+       }
        switch (val) {
        case 0:
                if (ha->optrom_state != QLA_SREADING &&
@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
                    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
                        faddr = ha->flt_region_vpd_sec << 2;
 
+               mutex_lock(&ha->optrom_mutex);
+               if (qla2x00_chip_is_down(vha)) {
+                       mutex_unlock(&ha->optrom_mutex);
+                       return -EAGAIN;
+               }
                ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
                    ha->vpd_size);
+               mutex_unlock(&ha->optrom_mutex);
        }
        return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
 }
@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
        if (unlikely(pci_channel_offline(ha->pdev)))
                return 0;
 
-       if (qla2x00_chip_is_down(vha))
-               return 0;
-
        if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
            !ha->isp_ops->write_nvram)
                return 0;
@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
                return -EAGAIN;
        }
 
+       mutex_lock(&ha->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&ha->optrom_mutex);
+               return -EAGAIN;
+       }
+
        /* Write NVRAM. */
        ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
        ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
 
        /* Update flash version information for 4Gb & above. */
-       if (!IS_FWI2_CAPABLE(ha))
+       if (!IS_FWI2_CAPABLE(ha)) {
+               mutex_unlock(&ha->optrom_mutex);
                return -EINVAL;
+       }
 
        tmp_data = vmalloc(256);
        if (!tmp_data) {
+               mutex_unlock(&ha->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x706b,
                    "Unable to allocate memory for VPD information update.\n");
                return -ENOMEM;
@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
        ha->isp_ops->get_flash_version(vha, tmp_data);
        vfree(tmp_data);
 
+       mutex_unlock(&ha->optrom_mutex);
+
        return count;
 }
 
@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
                return 0;
 
-       if (qla2x00_chip_is_down(vha))
+       mutex_lock(&vha->hw->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                return 0;
+       }
 
        rval = qla2x00_read_sfp_dev(vha, buf, count);
+       mutex_unlock(&vha->hw->optrom_mutex);
+
        if (rval)
                return -EIO;
 
@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
 
        if (unlikely(pci_channel_offline(ha->pdev)))
                return 0;
-
-       if (qla2x00_chip_is_down(vha))
+       mutex_lock(&vha->hw->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                return 0;
+       }
 
        if (ha->xgmac_data)
                goto do_read;
@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
        ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
            &ha->xgmac_data_dma, GFP_KERNEL);
        if (!ha->xgmac_data) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x7076,
                    "Unable to allocate memory for XGMAC read-data.\n");
                return 0;
@@ -806,6 +847,8 @@ do_read:
 
        rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
            XGMAC_DATA_SIZE, &actual_size);
+
+       mutex_unlock(&vha->hw->optrom_mutex);
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x7077,
                    "Unable to read XGMAC data (%x).\n", rval);
@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
 
        if (ha->dcbx_tlv)
                goto do_read;
-
-       if (qla2x00_chip_is_down(vha))
+       mutex_lock(&vha->hw->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                return 0;
+       }
 
        ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
            &ha->dcbx_tlv_dma, GFP_KERNEL);
        if (!ha->dcbx_tlv) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x7078,
                    "Unable to allocate memory for DCBX TLV read-data.\n");
                return -ENOMEM;
@@ -859,6 +905,9 @@ do_read:
 
        rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
            DCBX_TLV_DATA_SIZE);
+
+       mutex_unlock(&vha->hw->optrom_mutex);
+
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0x7079,
                    "Unable to read DCBX TLV (%x).\n", rval);
@@ -1158,6 +1207,34 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
        return strlen(buf);
 }
 
+static ssize_t
+qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
+                      char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
+           vha->hw->last_zio_threshold);
+}
+
+static ssize_t
+qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int val = 0;
+
+       if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
+               return -EINVAL;
+       if (sscanf(buf, "%d", &val) != 1)
+               return -EINVAL;
+       if (val < 0 || val > 256)
+               return -ERANGE;
+
+       atomic_set(&vha->hw->zio_threshold, val);
+       return strlen(buf);
+}
+
 static ssize_t
 qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
                    char *buf)
@@ -1184,15 +1261,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
        if (IS_QLA2100(ha) || IS_QLA2200(ha))
                return -EPERM;
 
+       if (sscanf(buf, "%d", &val) != 1)
+               return -EINVAL;
+
+       mutex_lock(&vha->hw->optrom_mutex);
        if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x707a,
                    "Abort ISP active -- ignoring beacon request.\n");
                return -EBUSY;
        }
 
-       if (sscanf(buf, "%d", &val) != 1)
-               return -EINVAL;
-
        if (val)
                rval = ha->isp_ops->beacon_on(vha);
        else
@@ -1201,6 +1280,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
        if (rval != QLA_SUCCESS)
                count = 0;
 
+       mutex_unlock(&vha->hw->optrom_mutex);
+
        return count;
 }
 
@@ -1370,18 +1451,24 @@ qla2x00_thermal_temp_show(struct device *dev,
 {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        uint16_t temp = 0;
+       int rc;
 
+       mutex_lock(&vha->hw->optrom_mutex);
        if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
                goto done;
        }
 
        if (vha->hw->flags.eeh_busy) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
                goto done;
        }
 
-       if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+       rc = qla2x00_get_thermal_temp(vha, &temp);
+       mutex_unlock(&vha->hw->optrom_mutex);
+       if (rc == QLA_SUCCESS)
                return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
 
 done:
@@ -1402,13 +1489,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
                return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
        }
 
-       if (qla2x00_chip_is_down(vha))
+       mutex_lock(&vha->hw->optrom_mutex);
+       if (qla2x00_chip_is_down(vha)) {
+               mutex_unlock(&vha->hw->optrom_mutex);
                ql_log(ql_log_warn, vha, 0x707c,
                    "ISP reset active.\n");
-       else if (!vha->hw->flags.eeh_busy)
-               rval = qla2x00_get_firmware_state(vha, state);
-       if (rval != QLA_SUCCESS)
+               goto out;
+       } else if (vha->hw->flags.eeh_busy) {
+               mutex_unlock(&vha->hw->optrom_mutex);
+               goto out;
+       }
+
+       rval = qla2x00_get_firmware_state(vha, state);
+       mutex_unlock(&vha->hw->optrom_mutex);
+out:
+       if (rval != QLA_SUCCESS) {
                memset(state, -1, sizeof(state));
+               rval = qla2x00_get_firmware_state(vha, state);
+       }
 
        return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
            state[0], state[1], state[2], state[3], state[4], state[5]);
@@ -1534,6 +1632,433 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
            ha->max_speed_sup ? "32Gps" : "16Gps");
 }
 
+/* ----- */
+
+static ssize_t
+qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int len = 0;
+
+       len += scnprintf(buf + len, PAGE_SIZE-len,
+           "Supported options: enabled | disabled | dual | exclusive\n");
+
+       /* --- */
+       len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
+
+       switch (vha->qlini_mode) {
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               len += scnprintf(buf + len, PAGE_SIZE-len,
+                   QLA2XXX_INI_MODE_STR_EXCLUSIVE);
+               break;
+       case QLA2XXX_INI_MODE_DISABLED:
+               len += scnprintf(buf + len, PAGE_SIZE-len,
+                   QLA2XXX_INI_MODE_STR_DISABLED);
+               break;
+       case QLA2XXX_INI_MODE_ENABLED:
+               len += scnprintf(buf + len, PAGE_SIZE-len,
+                   QLA2XXX_INI_MODE_STR_ENABLED);
+               break;
+       case QLA2XXX_INI_MODE_DUAL:
+               len += scnprintf(buf + len, PAGE_SIZE-len,
+                   QLA2XXX_INI_MODE_STR_DUAL);
+               break;
+       }
+       len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
+
+       return len;
+}
+
+static char *mode_to_str[] = {
+       "exclusive",
+       "disabled",
+       "enabled",
+       "dual",
+};
+
+#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
+static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+{
+       int rc = 0;
+       enum {
+               NO_ACTION,
+               MODE_CHANGE_ACCEPT,
+               MODE_CHANGE_NO_ACTION,
+               TARGET_STILL_ACTIVE,
+       };
+       int action = NO_ACTION;
+       int set_mode = 0;
+       u8  eo_toggle = 0;      /* exchange offload flipped */
+
+       switch (vha->qlini_mode) {
+       case QLA2XXX_INI_MODE_DISABLED:
+               switch (op) {
+               case QLA2XXX_INI_MODE_DISABLED:
+                       if (qla_tgt_mode_enabled(vha)) {
+                               if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+                                   vha->hw->flags.exchoffld_enabled)
+                                       eo_toggle = 1;
+                               if (((vha->ql2xexchoffld !=
+                                   vha->u_ql2xexchoffld) &&
+                                   NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+                                   eo_toggle) {
+                                       /*
+                                        * The number of exchange to be offload
+                                        * was tweaked or offload option was
+                                        * flipped
+                                        */
+                                       action = MODE_CHANGE_ACCEPT;
+                               } else {
+                                       action = MODE_CHANGE_NO_ACTION;
+                               }
+                       } else {
+                               action = MODE_CHANGE_NO_ACTION;
+                       }
+                       break;
+               case QLA2XXX_INI_MODE_EXCLUSIVE:
+                       if (qla_tgt_mode_enabled(vha)) {
+                               if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+                                   vha->hw->flags.exchoffld_enabled)
+                                       eo_toggle = 1;
+                               if (((vha->ql2xexchoffld !=
+                                   vha->u_ql2xexchoffld) &&
+                                   NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+                                   eo_toggle) {
+                                       /*
+                                        * The number of exchange to be offload
+                                        * was tweaked or offload option was
+                                        * flipped
+                                        */
+                                       action = MODE_CHANGE_ACCEPT;
+                               } else {
+                                       action = MODE_CHANGE_NO_ACTION;
+                               }
+                       } else {
+                               action = MODE_CHANGE_ACCEPT;
+                       }
+                       break;
+               case QLA2XXX_INI_MODE_DUAL:
+                       action = MODE_CHANGE_ACCEPT;
+                       /* active_mode is target only, reset it to dual */
+                       if (qla_tgt_mode_enabled(vha)) {
+                               set_mode = 1;
+                               action = MODE_CHANGE_ACCEPT;
+                       } else {
+                               action = MODE_CHANGE_NO_ACTION;
+                       }
+                       break;
+
+               case QLA2XXX_INI_MODE_ENABLED:
+                       if (qla_tgt_mode_enabled(vha))
+                               action = TARGET_STILL_ACTIVE;
+                       else {
+                               action = MODE_CHANGE_ACCEPT;
+                               set_mode = 1;
+                       }
+                       break;
+               }
+               break;
+
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               switch (op) {
+               case QLA2XXX_INI_MODE_EXCLUSIVE:
+                       if (qla_tgt_mode_enabled(vha)) {
+                               if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+                                   vha->hw->flags.exchoffld_enabled)
+                                       eo_toggle = 1;
+                               if (((vha->ql2xexchoffld !=
+                                   vha->u_ql2xexchoffld) &&
+                                   NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+                                   eo_toggle)
+                                       /*
+                                        * The number of exchange to be offload
+                                        * was tweaked or offload option was
+                                        * flipped
+                                        */
+                                       action = MODE_CHANGE_ACCEPT;
+                               else
+                                       action = NO_ACTION;
+                       } else
+                               action = NO_ACTION;
+
+                       break;
+
+               case QLA2XXX_INI_MODE_DISABLED:
+                       if (qla_tgt_mode_enabled(vha)) {
+                               if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+                                   vha->hw->flags.exchoffld_enabled)
+                                       eo_toggle = 1;
+                               if (((vha->ql2xexchoffld !=
+                                     vha->u_ql2xexchoffld) &&
+                                   NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+                                   eo_toggle)
+                                       action = MODE_CHANGE_ACCEPT;
+                               else
+                                       action = MODE_CHANGE_NO_ACTION;
+                       } else
+                               action = MODE_CHANGE_NO_ACTION;
+                       break;
+
+               case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
+                       if (qla_tgt_mode_enabled(vha)) {
+                               action = MODE_CHANGE_ACCEPT;
+                               set_mode = 1;
+                       } else
+                               action = MODE_CHANGE_ACCEPT;
+                       break;
+
+               case QLA2XXX_INI_MODE_ENABLED:
+                       if (qla_tgt_mode_enabled(vha))
+                               action = TARGET_STILL_ACTIVE;
+                       else {
+                               if (vha->hw->flags.fw_started)
+                                       action = MODE_CHANGE_NO_ACTION;
+                               else
+                                       action = MODE_CHANGE_ACCEPT;
+                       }
+                       break;
+               }
+               break;
+
+       case QLA2XXX_INI_MODE_ENABLED:
+               switch (op) {
+               case QLA2XXX_INI_MODE_ENABLED:
+                       if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
+                           vha->hw->flags.exchoffld_enabled)
+                               eo_toggle = 1;
+                       if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
+                               NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
+                           eo_toggle)
+                               action = MODE_CHANGE_ACCEPT;
+                       else
+                               action = NO_ACTION;
+                       break;
+               case QLA2XXX_INI_MODE_DUAL:
+               case QLA2XXX_INI_MODE_DISABLED:
+                       action = MODE_CHANGE_ACCEPT;
+                       break;
+               default:
+                       action = MODE_CHANGE_NO_ACTION;
+                       break;
+               }
+               break;
+
+       case QLA2XXX_INI_MODE_DUAL:
+               switch (op) {
+               case QLA2XXX_INI_MODE_DUAL:
+                       if (qla_tgt_mode_enabled(vha) ||
+                           qla_dual_mode_enabled(vha)) {
+                               if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+                                       vha->u_ql2xiniexchg) !=
+                                   vha->hw->flags.exchoffld_enabled)
+                                       eo_toggle = 1;
+
+                               if ((((vha->ql2xexchoffld +
+                                      vha->ql2xiniexchg) !=
+                                   (vha->u_ql2xiniexchg +
+                                    vha->u_ql2xexchoffld)) &&
+                                   NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+                                       vha->u_ql2xexchoffld)) || eo_toggle)
+                                       action = MODE_CHANGE_ACCEPT;
+                               else
+                                       action = NO_ACTION;
+                       } else {
+                               if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+                                       vha->u_ql2xiniexchg) !=
+                                   vha->hw->flags.exchoffld_enabled)
+                                       eo_toggle = 1;
+
+                               if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
+                                   != (vha->u_ql2xiniexchg +
+                                       vha->u_ql2xexchoffld)) &&
+                                   NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+                                       vha->u_ql2xexchoffld)) || eo_toggle)
+                                       action = MODE_CHANGE_NO_ACTION;
+                               else
+                                       action = NO_ACTION;
+                       }
+                       break;
+
+               case QLA2XXX_INI_MODE_DISABLED:
+                       if (qla_tgt_mode_enabled(vha) ||
+                           qla_dual_mode_enabled(vha)) {
+                               /* turning off initiator mode */
+                               set_mode = 1;
+                               action = MODE_CHANGE_ACCEPT;
+                       } else {
+                               action = MODE_CHANGE_NO_ACTION;
+                       }
+                       break;
+
+               case QLA2XXX_INI_MODE_EXCLUSIVE:
+                       if (qla_tgt_mode_enabled(vha) ||
+                           qla_dual_mode_enabled(vha)) {
+                               set_mode = 1;
+                               action = MODE_CHANGE_ACCEPT;
+                       } else {
+                               action = MODE_CHANGE_ACCEPT;
+                       }
+                       break;
+
+               case QLA2XXX_INI_MODE_ENABLED:
+                       if (qla_tgt_mode_enabled(vha) ||
+                           qla_dual_mode_enabled(vha)) {
+                               action = TARGET_STILL_ACTIVE;
+                       } else {
+                               action = MODE_CHANGE_ACCEPT;
+                       }
+               }
+               break;
+       }
+
+       switch (action) {
+       case MODE_CHANGE_ACCEPT:
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+                   mode_to_str[vha->qlini_mode], mode_to_str[op],
+                   vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+                   vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+
+               vha->qlini_mode = op;
+               vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+               vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+               if (set_mode)
+                       qlt_set_mode(vha);
+               vha->flags.online = 1;
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               break;
+
+       case MODE_CHANGE_NO_ACTION:
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+                   mode_to_str[vha->qlini_mode], mode_to_str[op],
+                   vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+                   vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+               vha->qlini_mode = op;
+               vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+               vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+               break;
+
+       case TARGET_STILL_ACTIVE:
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Target Mode is active. Unable to change Mode.\n");
+               break;
+
+       case NO_ACTION:
+       default:
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
+                   vha->qlini_mode, op,
+                   vha->ql2xexchoffld, vha->u_ql2xexchoffld);
+               break;
+       }
+
+       return rc;
+}
+
+static ssize_t
+qlini_mode_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int ini;
+
+       if (!buf)
+               return -EINVAL;
+
+       if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
+               strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
+               ini = QLA2XXX_INI_MODE_EXCLUSIVE;
+       else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
+               strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
+               ini = QLA2XXX_INI_MODE_DISABLED;
+       else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
+                 strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
+               ini = QLA2XXX_INI_MODE_ENABLED;
+       else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
+               strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
+               ini = QLA2XXX_INI_MODE_DUAL;
+       else
+               return -EINVAL;
+
+       qla_set_ini_mode(vha, ini);
+       return strlen(buf);
+}
+
+static ssize_t
+ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int len = 0;
+
+       len += scnprintf(buf + len, PAGE_SIZE-len,
+               "target exchange: new %d : current: %d\n\n",
+               vha->u_ql2xexchoffld, vha->ql2xexchoffld);
+
+       len += scnprintf(buf + len, PAGE_SIZE-len,
+           "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+           vha->host_no);
+
+       return len;
+}
+
+static ssize_t
+ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int val = 0;
+
+       if (sscanf(buf, "%d", &val) != 1)
+               return -EINVAL;
+
+       if (val > FW_MAX_EXCHANGES_CNT)
+               val = FW_MAX_EXCHANGES_CNT;
+       else if (val < 0)
+               val = 0;
+
+       vha->u_ql2xexchoffld = val;
+       return strlen(buf);
+}
+
+static ssize_t
+ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int len = 0;
+
+       len += scnprintf(buf + len, PAGE_SIZE-len,
+               "target exchange: new %d : current: %d\n\n",
+               vha->u_ql2xiniexchg, vha->ql2xiniexchg);
+
+       len += scnprintf(buf + len, PAGE_SIZE-len,
+           "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+           vha->host_no);
+
+       return len;
+}
+
+static ssize_t
+ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
+    const char *buf, size_t count)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int val = 0;
+
+       if (sscanf(buf, "%d", &val) != 1)
+               return -EINVAL;
+
+       if (val > FW_MAX_EXCHANGES_CNT)
+               val = FW_MAX_EXCHANGES_CNT;
+       else if (val < 0)
+               val = 0;
+
+       vha->u_ql2xiniexchg = val;
+       return strlen(buf);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +2106,13 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
 static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
 static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(zio_threshold, 0644,
+    qla_zio_threshold_show,
+    qla_zio_threshold_store);
+static DEVICE_ATTR_RW(qlini_mode);
+static DEVICE_ATTR_RW(ql2xexchoffld);
+static DEVICE_ATTR_RW(ql2xiniexchg);
+
 
 struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_driver_version,
@@ -1617,9 +2149,28 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_pep_version,
        &dev_attr_min_link_speed,
        &dev_attr_max_speed_sup,
+       &dev_attr_zio_threshold,
+       NULL, /* reserve for qlini_mode */
+       NULL, /* reserve for ql2xiniexchg */
+       NULL, /* reserve for ql2xexchoffld */
        NULL,
 };
 
+void qla_insert_tgt_attrs(void)
+{
+       struct device_attribute **attr;
+
+       /* advance to empty slot */
+       for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
+               continue;
+
+       *attr = &dev_attr_qlini_mode;
+       attr++;
+       *attr = &dev_attr_ql2xiniexchg;
+       attr++;
+       *attr = &dev_attr_ql2xexchoffld;
+}
+
 /* Host attributes. */
 
 static void
index c11a89b..4a9fd8d 100644 (file)
@@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
                vha = shost_priv(host);
        }
 
-       if (qla2x00_reset_active(vha)) {
+       if (qla2x00_chip_is_down(vha)) {
                ql_dbg(ql_dbg_user, vha, 0x709f,
                    "BSG: ISP abort active/needed -- cmd=%d.\n",
                    bsg_request->msgcode);
index a9dc9c4..26b93c5 100644 (file)
@@ -262,8 +262,8 @@ struct name_list_extended {
        struct get_name_list_extended *l;
        dma_addr_t              ldma;
        struct list_head        fcports;
-       spinlock_t              fcports_lock;
        u32                     size;
+       u8                      sent;
 };
 /*
  * Timeout timer counts in seconds
@@ -519,6 +519,7 @@ struct srb_iocb {
 enum {
        TYPE_SRB,
        TYPE_TGT_CMD,
+       TYPE_TGT_TMCMD,         /* task management */
 };
 
 typedef struct srb {
@@ -2280,7 +2281,6 @@ struct ct_sns_desc {
 enum discovery_state {
        DSC_DELETED,
        DSC_GNN_ID,
-       DSC_GID_PN,
        DSC_GNL,
        DSC_LOGIN_PEND,
        DSC_LOGIN_FAILED,
@@ -2305,7 +2305,6 @@ enum login_state {        /* FW control Target side */
 enum fcport_mgt_event {
        FCME_RELOGIN = 1,
        FCME_RSCN,
-       FCME_GIDPN_DONE,
        FCME_PLOGI_DONE,        /* Initiator side sent LLIOCB */
        FCME_PRLI_DONE,
        FCME_GNL_DONE,
@@ -2351,7 +2350,7 @@ typedef struct fc_port {
        unsigned int login_succ:1;
        unsigned int query:1;
        unsigned int id_changed:1;
-       unsigned int rscn_rcvd:1;
+       unsigned int scan_needed:1;
 
        struct work_struct nvme_del_work;
        struct completion nvme_del_done;
@@ -2375,11 +2374,13 @@ typedef struct fc_port {
        unsigned long expires;
        struct list_head del_list_entry;
        struct work_struct free_work;
-
+       struct work_struct reg_work;
+       uint64_t jiffies_at_registration;
        struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
 
        uint16_t tgt_id;
        uint16_t old_tgt_id;
+       uint16_t sec_since_registration;
 
        uint8_t fcp_prio;
 
@@ -2412,6 +2413,7 @@ typedef struct fc_port {
        struct qla_tgt_sess *tgt_session;
        struct ct_sns_desc ct_desc;
        enum discovery_state disc_state;
+       enum discovery_state next_disc_state;
        enum login_state fw_login_state;
        unsigned long dm_login_expire;
        unsigned long plogi_nack_done_deadline;
@@ -3212,17 +3214,14 @@ enum qla_work_type {
        QLA_EVT_ASYNC_LOGOUT,
        QLA_EVT_ASYNC_LOGOUT_DONE,
        QLA_EVT_ASYNC_ADISC,
-       QLA_EVT_ASYNC_ADISC_DONE,
        QLA_EVT_UEVENT,
        QLA_EVT_AENFX,
-       QLA_EVT_GIDPN,
        QLA_EVT_GPNID,
        QLA_EVT_UNMAP,
        QLA_EVT_NEW_SESS,
        QLA_EVT_GPDB,
        QLA_EVT_PRLI,
        QLA_EVT_GPSC,
-       QLA_EVT_UPD_FCPORT,
        QLA_EVT_GNL,
        QLA_EVT_NACK,
        QLA_EVT_RELOGIN,
@@ -3483,6 +3482,9 @@ struct qla_qpair {
        struct list_head qp_list_elem; /* vha->qp_list */
        struct list_head hints_list;
        uint16_t cpuid;
+       uint16_t retry_term_cnt;
+       uint32_t retry_term_exchg_addr;
+       uint64_t retry_term_jiff;
        struct qla_tgt_counters tgt_counters;
 };
 
@@ -4184,6 +4186,10 @@ struct qla_hw_data {
 
        atomic_t        nvme_active_aen_cnt;
        uint16_t        nvme_last_rptd_aen;             /* Last recorded aen count */
+
+       atomic_t zio_threshold;
+       uint16_t last_zio_threshold;
+#define DEFAULT_ZIO_THRESHOLD 64
 };
 
 #define FW_ABILITY_MAX_SPEED_MASK      0xFUL
@@ -4263,10 +4269,11 @@ typedef struct scsi_qla_host {
 #define FX00_CRITEMP_RECOVERY  25
 #define FX00_HOST_INFO_RESEND  26
 #define QPAIR_ONLINE_CHECK_NEEDED      27
-#define SET_ZIO_THRESHOLD_NEEDED       28
+#define SET_NVME_ZIO_THRESHOLD_NEEDED  28
 #define DETECT_SFP_CHANGE      29
 #define N2N_LOGIN_NEEDED       30
 #define IOCB_WORK_ACTIVE       31
+#define SET_ZIO_THRESHOLD_NEEDED 32
 
        unsigned long   pci_flags;
 #define PFLG_DISCONNECTED      0       /* PCI device removed */
@@ -4369,6 +4376,13 @@ typedef struct scsi_qla_host {
        atomic_t        vref_count;
        struct qla8044_reset_template reset_tmplt;
        uint16_t        bbcr;
+
+       uint16_t u_ql2xexchoffld;
+       uint16_t u_ql2xiniexchg;
+       uint16_t qlini_mode;
+       uint16_t ql2xexchoffld;
+       uint16_t ql2xiniexchg;
+
        struct name_list_extended gnl;
        /* Count of active session/fcport */
        int fcport_count;
index 1789748..3673fcd 100644 (file)
@@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
 extern void qla2x00_quiesce_io(scsi_qla_host_t *);
 
 extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-
+void qla_register_fcport_fn(struct work_struct *);
 extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
 extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
 
@@ -73,8 +73,6 @@ extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
-extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
-    uint16_t *);
 struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
     enum qla_work_type);
 extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -109,6 +107,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
 int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
 int qla24xx_detect_sfp(scsi_qla_host_t *vha);
 int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
 void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +117,8 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
 int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
+void qla_rscn_replay(fc_port_t *fcport);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -158,6 +159,7 @@ extern int ql2xnvmeenable;
 extern int ql2xautodetectsfp;
 extern int ql2xenablemsix;
 extern int qla2xuseresexchforels;
+extern int ql2xexlogins;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -208,7 +210,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
 extern void qla2x00_sp_compl(void *, int);
 extern void qla2xxx_qpair_sp_free_dma(void *);
 extern void qla2xxx_qpair_sp_compl(void *, int);
-extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+extern void qla24xx_sched_upd_fcport(fc_port_t *);
 void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
        uint16_t *);
 int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
@@ -644,9 +646,6 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
 extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
        struct ct_sns_rsp *, const char *);
 extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
-int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
 
 extern void qla2x00_free_fcport(fc_port_t *);
 
@@ -677,6 +676,7 @@ void qla_scan_work_fn(struct work_struct *);
  */
 struct device_attribute;
 extern struct device_attribute *qla2x00_host_attrs[];
+extern struct device_attribute *qla2x00_host_attrs_dm[];
 struct fc_function_template;
 extern struct fc_function_template qla2xxx_transport_functions;
 extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -690,7 +690,7 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
 extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
 extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
        struct qla_fcp_prio_cfg *, uint8_t);
-
+void qla_insert_tgt_attrs(void);
 /*
  * Global Function Prototypes in qla_dfs.c source file.
  */
@@ -897,5 +897,6 @@ void qlt_unknown_atio_work_fn(struct work_struct *);
 void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
 void qlt_remove_target_resources(struct qla_hw_data *);
 void qlt_clr_qp_table(struct scsi_qla_host *vha);
+void qlt_set_mode(struct scsi_qla_host *);
 
 #endif /* _QLA_GBL_H */
index a0038d8..90cfa39 100644 (file)
@@ -2973,237 +2973,6 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
        }
 }
 
-/* GID_PN completion processing. */
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
-       fc_port_t *fcport = ea->fcport;
-
-       ql_dbg(ql_dbg_disc, vha, 0x201d,
-           "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
-           __func__, fcport->port_name, fcport->disc_state,
-           fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
-           fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
-
-       if (fcport->disc_state == DSC_DELETE_PEND)
-               return;
-
-       if (ea->sp->gen2 != fcport->login_gen) {
-               /* PLOGI/PRLI/LOGO came in while cmd was out.*/
-               ql_dbg(ql_dbg_disc, vha, 0x201e,
-                   "%s %8phC generation changed rscn %d|%d n",
-                   __func__, fcport->port_name, fcport->last_rscn_gen,
-                   fcport->rscn_gen);
-               return;
-       }
-
-       if (!ea->rc) {
-               if (ea->sp->gen1 == fcport->rscn_gen) {
-                       fcport->scan_state = QLA_FCPORT_FOUND;
-                       fcport->flags |= FCF_FABRIC_DEVICE;
-
-                       if (fcport->d_id.b24 == ea->id.b24) {
-                               /* cable plugged into the same place */
-                               switch (vha->host->active_mode) {
-                               case MODE_TARGET:
-                                       if (fcport->fw_login_state ==
-                                           DSC_LS_PRLI_COMP) {
-                                               u16 data[2];
-                                               /*
-                                                * Late RSCN was delivered.
-                                                * Remote port already login'ed.
-                                                */
-                                               ql_dbg(ql_dbg_disc, vha, 0x201f,
-                                                   "%s %d %8phC post adisc\n",
-                                                   __func__, __LINE__,
-                                                   fcport->port_name);
-                                               data[0] = data[1] = 0;
-                                               qla2x00_post_async_adisc_work(
-                                                   vha, fcport, data);
-                                       }
-                                       break;
-                               case MODE_INITIATOR:
-                               case MODE_DUAL:
-                               default:
-                                       ql_dbg(ql_dbg_disc, vha, 0x201f,
-                                           "%s %d %8phC post %s\n", __func__,
-                                           __LINE__, fcport->port_name,
-                                           (atomic_read(&fcport->state) ==
-                                           FCS_ONLINE) ? "adisc" : "gnl");
-
-                                       if (atomic_read(&fcport->state) ==
-                                           FCS_ONLINE) {
-                                               u16 data[2];
-
-                                               data[0] = data[1] = 0;
-                                               qla2x00_post_async_adisc_work(
-                                                   vha, fcport, data);
-                                       } else {
-                                               qla24xx_post_gnl_work(vha,
-                                                   fcport);
-                                       }
-                                       break;
-                               }
-                       } else { /* fcport->d_id.b24 != ea->id.b24 */
-                               fcport->d_id.b24 = ea->id.b24;
-                               fcport->id_changed = 1;
-                               if (fcport->deleted != QLA_SESS_DELETED) {
-                                       ql_dbg(ql_dbg_disc, vha, 0x2021,
-                                           "%s %d %8phC post del sess\n",
-                                           __func__, __LINE__, fcport->port_name);
-                                       qlt_schedule_sess_for_deletion(fcport);
-                               }
-                       }
-               } else { /* ea->sp->gen1 != fcport->rscn_gen */
-                       ql_dbg(ql_dbg_disc, vha, 0x2022,
-                           "%s %d %8phC post gidpn\n",
-                           __func__, __LINE__, fcport->port_name);
-                       /* rscn came in while cmd was out */
-                       qla24xx_post_gidpn_work(vha, fcport);
-               }
-       } else { /* ea->rc */
-               /* cable pulled */
-               if (ea->sp->gen1 == fcport->rscn_gen) {
-                       if (ea->sp->gen2 == fcport->login_gen) {
-                               ql_dbg(ql_dbg_disc, vha, 0x2042,
-                                   "%s %d %8phC post del sess\n", __func__,
-                                   __LINE__, fcport->port_name);
-                               qlt_schedule_sess_for_deletion(fcport);
-                       } else {
-                               ql_dbg(ql_dbg_disc, vha, 0x2045,
-                                   "%s %d %8phC login\n", __func__, __LINE__,
-                                   fcport->port_name);
-                               qla24xx_fcport_handle_login(vha, fcport);
-                       }
-               } else {
-                       ql_dbg(ql_dbg_disc, vha, 0x2049,
-                           "%s %d %8phC post gidpn\n", __func__, __LINE__,
-                           fcport->port_name);
-                       qla24xx_post_gidpn_work(vha, fcport);
-               }
-       }
-} /* gidpn_event */
-
-static void qla2x00_async_gidpn_sp_done(void *s, int res)
-{
-       struct srb *sp = s;
-       struct scsi_qla_host *vha = sp->vha;
-       fc_port_t *fcport = sp->fcport;
-       u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
-       struct event_arg ea;
-
-       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
-       memset(&ea, 0, sizeof(ea));
-       ea.fcport = fcport;
-       ea.id.b.domain = id[0];
-       ea.id.b.area = id[1];
-       ea.id.b.al_pa = id[2];
-       ea.sp = sp;
-       ea.rc = res;
-       ea.event = FCME_GIDPN_DONE;
-
-       if (res == QLA_FUNCTION_TIMEOUT) {
-               ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-                   "Async done-%s WWPN %8phC timed out.\n",
-                   sp->name, fcport->port_name);
-               qla24xx_post_gidpn_work(sp->vha, fcport);
-               sp->free(sp);
-               return;
-       } else if (res) {
-               ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-                   "Async done-%s fail res %x, WWPN %8phC\n",
-                   sp->name, res, fcport->port_name);
-       } else {
-               ql_dbg(ql_dbg_disc, vha, 0x204f,
-                   "Async done-%s good WWPN %8phC ID %3phC\n",
-                   sp->name, fcport->port_name, id);
-       }
-
-       qla2x00_fcport_event_handler(vha, &ea);
-
-       sp->free(sp);
-}
-
-int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
-       int rval = QLA_FUNCTION_FAILED;
-       struct ct_sns_req       *ct_req;
-       srb_t *sp;
-
-       if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
-               return rval;
-
-       fcport->disc_state = DSC_GID_PN;
-       fcport->scan_state = QLA_FCPORT_SCAN;
-       sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
-       if (!sp)
-               goto done;
-
-       fcport->flags |= FCF_ASYNC_SENT;
-       sp->type = SRB_CT_PTHRU_CMD;
-       sp->name = "gidpn";
-       sp->gen1 = fcport->rscn_gen;
-       sp->gen2 = fcport->login_gen;
-
-       qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
-       /* CT_IU preamble  */
-       ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
-               GID_PN_RSP_SIZE);
-
-       /* GIDPN req */
-       memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
-               WWN_SIZE);
-
-       /* req & rsp use the same buffer */
-       sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
-       sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
-       sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
-       sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
-       sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
-       sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
-       sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
-       sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
-       sp->done = qla2x00_async_gidpn_sp_done;
-
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
-       ql_dbg(ql_dbg_disc, vha, 0x20a4,
-           "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
-           sp->name, fcport->port_name,
-           sp->handle, fcport->loop_id, fcport->d_id.b.domain,
-           fcport->d_id.b.area, fcport->d_id.b.al_pa);
-       return rval;
-
-done_free_sp:
-       sp->free(sp);
-done:
-       fcport->flags &= ~FCF_ASYNC_ACTIVE;
-       return rval;
-}
-
-int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
-       struct qla_work_evt *e;
-       int ls;
-
-       ls = atomic_read(&vha->loop_state);
-       if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
-               test_bit(UNLOADING, &vha->dpc_flags))
-               return 0;
-
-       e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
-       if (!e)
-               return QLA_FUNCTION_FAILED;
-
-       e->u.fcport.fcport = fcport;
-       fcport->flags |= FCF_ASYNC_ACTIVE;
-       return qla2x00_post_work(vha, e);
-}
-
 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
        struct qla_work_evt *e;
@@ -3237,9 +3006,6 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
                    __func__, fcport->port_name);
                return;
        } else if (ea->sp->gen1 != fcport->rscn_gen) {
-               ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-                   __func__, __LINE__, fcport->port_name);
-               qla24xx_post_gidpn_work(vha, fcport);
                return;
        }
 
@@ -3261,6 +3027,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
            "Async done-%s res %x, WWPN %8phC \n",
            sp->name, res, fcport->port_name);
 
+       if (res == QLA_FUNCTION_TIMEOUT)
+               return;
+
        if (res == (DID_ERROR << 16)) {
                /* entry status error */
                goto done;
@@ -3272,7 +3041,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
                        ql_dbg(ql_dbg_disc, vha, 0x2019,
                            "GPSC command unsupported, disabling query.\n");
                        ha->flags.gpsc_supported = 0;
-                       res = QLA_SUCCESS;
+                       goto done;
                }
        } else {
                switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
@@ -3305,7 +3074,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
                    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
                    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
        }
-done:
        memset(&ea, 0, sizeof(ea));
        ea.event = FCME_GPSC_DONE;
        ea.rc = res;
@@ -3313,6 +3081,7 @@ done:
        ea.sp = sp;
        qla2x00_fcport_event_handler(vha, &ea);
 
+done:
        sp->free(sp);
 }
 
@@ -3355,15 +3124,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
        sp->done = qla24xx_async_gpsc_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_disc, vha, 0x205e,
            "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
            sp->name, fcport->port_name, sp->handle,
            fcport->loop_id, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        return rval;
 
 done_free_sp:
@@ -3442,26 +3211,10 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
        if (ea->rc) {
                /* cable is disconnected */
                list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
-                       if (fcport->d_id.b24 == ea->id.b24) {
-                               ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                   "%s %d %8phC DS %d\n",
-                                   __func__, __LINE__,
-                                   fcport->port_name,
-                                   fcport->disc_state);
+                       if (fcport->d_id.b24 == ea->id.b24)
                                fcport->scan_state = QLA_FCPORT_SCAN;
-                               switch (fcport->disc_state) {
-                               case DSC_DELETED:
-                               case DSC_DELETE_PEND:
-                                       break;
-                               default:
-                                       ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                           "%s %d %8phC post del sess\n",
-                                           __func__, __LINE__,
-                                           fcport->port_name);
-                                       qlt_schedule_sess_for_deletion(fcport);
-                                       break;
-                               }
-                       }
+
+                       qlt_schedule_sess_for_deletion(fcport);
                }
        } else {
                /* cable is connected */
@@ -3470,34 +3223,19 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
                        list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
                            list) {
                                if ((conflict->d_id.b24 == ea->id.b24) &&
-                                   (fcport != conflict)) {
-                                       /* 2 fcports with conflict Nport ID or
+                                   (fcport != conflict))
+                                       /*
+                                        * 2 fcports with conflict Nport ID or
                                         * an existing fcport is having nport ID
                                         * conflict with new fcport.
                                         */
 
-                                       ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                           "%s %d %8phC DS %d\n",
-                                           __func__, __LINE__,
-                                           conflict->port_name,
-                                           conflict->disc_state);
                                        conflict->scan_state = QLA_FCPORT_SCAN;
-                                       switch (conflict->disc_state) {
-                                       case DSC_DELETED:
-                                       case DSC_DELETE_PEND:
-                                               break;
-                                       default:
-                                               ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                                   "%s %d %8phC post del sess\n",
-                                                   __func__, __LINE__,
-                                                   conflict->port_name);
-                                               qlt_schedule_sess_for_deletion
-                                                       (conflict);
-                                               break;
-                                       }
-                               }
+
+                               qlt_schedule_sess_for_deletion(conflict);
                        }
 
+                       fcport->scan_needed = 0;
                        fcport->rscn_gen++;
                        fcport->scan_state = QLA_FCPORT_FOUND;
                        fcport->flags |= FCF_FABRIC_DEVICE;
@@ -3548,19 +3286,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
                                            conflict->disc_state);
 
                                        conflict->scan_state = QLA_FCPORT_SCAN;
-                                       switch (conflict->disc_state) {
-                                       case DSC_DELETED:
-                                       case DSC_DELETE_PEND:
-                                               break;
-                                       default:
-                                               ql_dbg(ql_dbg_disc, vha, 0xffff,
-                                                   "%s %d %8phC post del sess\n",
-                                                   __func__, __LINE__,
-                                                   conflict->port_name);
-                                               qlt_schedule_sess_for_deletion
-                                                       (conflict);
-                                               break;
-                                       }
+                                       qlt_schedule_sess_for_deletion(conflict);
                                }
                        }
 
@@ -3724,13 +3450,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
        sp->done = qla2x00_async_gpnid_sp_done;
 
+       ql_dbg(ql_dbg_disc, vha, 0x2067,
+           "Async-%s hdl=%x ID %3phC.\n", sp->name,
+           sp->handle, ct_req->req.port_id.port_id);
+
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS)
                goto done_free_sp;
 
-       ql_dbg(ql_dbg_disc, vha, 0x2067,
-           "Async-%s hdl=%x ID %3phC.\n", sp->name,
-           sp->handle, ct_req->req.port_id.port_id);
        return rval;
 
 done_free_sp:
@@ -3896,9 +3623,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
        fc_port_t *fcport;
        u32 i, rc;
        bool found;
-       struct fab_scan_rp *rp;
+       struct fab_scan_rp *rp, *trp;
        unsigned long flags;
        u8 recheck = 0;
+       u16 dup = 0, dup_cnt = 0;
 
        ql_dbg(ql_dbg_disc, vha, 0xffff,
            "%s enter\n", __func__);
@@ -3929,6 +3657,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
 
        for (i = 0; i < vha->hw->max_fibre_devices; i++) {
                u64 wwn;
+               int k;
 
                rp = &vha->scan.l[i];
                found = false;
@@ -3937,6 +3666,20 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                if (wwn == 0)
                        continue;
 
+               /* Remove duplicate NPORT ID entries from switch data base */
+               for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
+                       trp = &vha->scan.l[k];
+                       if (rp->id.b24 == trp->id.b24) {
+                               dup = 1;
+                               dup_cnt++;
+                               ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+                                   vha, 0xffff,
+                                   "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
+                                   rp->id.b24, rp->port_name, trp->port_name);
+                               memset(trp, 0, sizeof(*trp));
+                       }
+               }
+
                if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
                        continue;
 
@@ -3951,7 +3694,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
                                continue;
-                       fcport->rscn_rcvd = 0;
+                       fcport->scan_needed = 0;
                        fcport->scan_state = QLA_FCPORT_FOUND;
                        found = true;
                        /*
@@ -3976,25 +3719,30 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                }
        }
 
+       if (dup) {
+               ql_log(ql_log_warn, vha, 0xffff,
+                   "Detected %d duplicate NPORT ID(s) from switch data base\n",
+                   dup_cnt);
+       }
+
        /*
         * Logout all previous fabric dev marked lost, except FCP2 devices.
         */
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
                if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-                       fcport->rscn_rcvd = 0;
+                       fcport->scan_needed = 0;
                        continue;
                }
 
                if (fcport->scan_state != QLA_FCPORT_FOUND) {
-                       fcport->rscn_rcvd = 0;
+                       fcport->scan_needed = 0;
                        if ((qla_dual_mode_enabled(vha) ||
                                qla_ini_mode_enabled(vha)) &&
                            atomic_read(&fcport->state) == FCS_ONLINE) {
-                               qla2x00_mark_device_lost(vha, fcport,
-                                   ql2xplogiabsentdevice, 0);
+                               if (fcport->loop_id != FC_NO_LOOP_ID) {
+                                       if (fcport->flags & FCF_FCP2_DEVICE)
+                                               fcport->logout_on_delete = 0;
 
-                               if (fcport->loop_id != FC_NO_LOOP_ID &&
-                                   (fcport->flags & FCF_FCP2_DEVICE) == 0) {
                                        ql_dbg(ql_dbg_disc, vha, 0x20f0,
                                            "%s %d %8phC post del sess\n",
                                            __func__, __LINE__,
@@ -4005,7 +3753,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                                }
                        }
                } else {
-                       if (fcport->rscn_rcvd ||
+                       if (fcport->scan_needed ||
                            fcport->disc_state != DSC_LOGIN_COMPLETE) {
                                if (fcport->login_retry == 0) {
                                        fcport->login_retry =
@@ -4015,7 +3763,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                                            fcport->port_name, fcport->loop_id,
                                            fcport->login_retry);
                                }
-                               fcport->rscn_rcvd = 0;
+                               fcport->scan_needed = 0;
                                qla24xx_fcport_handle_login(vha, fcport);
                        }
                }
@@ -4030,7 +3778,7 @@ out:
 
        if (recheck) {
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
-                       if (fcport->rscn_rcvd) {
+                       if (fcport->scan_needed) {
                                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                                break;
@@ -4039,6 +3787,41 @@ out:
        }
 }
 
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+       struct qla_work_evt *e;
+
+       if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+               return QLA_PARAMETER_ERROR;
+
+       e = qla2x00_alloc_work(vha, cmd);
+       if (!e)
+               return QLA_FUNCTION_FAILED;
+
+       e->u.iosb.sp = sp;
+
+       return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
+    srb_t *sp, int cmd)
+{
+       struct qla_work_evt *e;
+
+       if (cmd != QLA_EVT_GPNFT)
+               return QLA_PARAMETER_ERROR;
+
+       e = qla2x00_alloc_work(vha, cmd);
+       if (!e)
+               return QLA_FUNCTION_FAILED;
+
+       e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+       e->u.gpnft.sp = sp;
+
+       return qla2x00_post_work(vha, e);
+}
+
 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
        struct srb *sp)
 {
@@ -4139,120 +3922,85 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
 {
        struct srb *sp = s;
        struct scsi_qla_host *vha = sp->vha;
-       struct qla_work_evt *e;
        struct ct_sns_req *ct_req =
                (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
        u16 cmd = be16_to_cpu(ct_req->command);
        u8 fc4_type = sp->gen2;
        unsigned long flags;
+       int rc;
 
        /* gen2 field is holding the fc4type */
        ql_dbg(ql_dbg_disc, vha, 0xffff,
            "Async done-%s res %x FC4Type %x\n",
            sp->name, res, sp->gen2);
 
+       del_timer(&sp->u.iocb_cmd.timer);
+       sp->rc = res;
        if (res) {
                unsigned long flags;
+               const char *name = sp->name;
 
-               sp->free(sp);
-               spin_lock_irqsave(&vha->work_lock, flags);
-               vha->scan.scan_flags &= ~SF_SCANNING;
-               vha->scan.scan_retry++;
-               spin_unlock_irqrestore(&vha->work_lock, flags);
+               /*
+                * We are in an Interrupt context, queue up this
+                * sp for GNNFT_DONE work. This will allow all
+                * the resource to get freed up.
+                */
+               rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+                   QLA_EVT_GNNFT_DONE);
+               if (rc) {
+                       /* Cleanup here to prevent memory leak */
+                       qla24xx_sp_unmap(vha, sp);
 
-               if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
-                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
-                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       qla2xxx_wake_dpc(vha);
-               } else {
-                       ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
-                           "Async done-%s rescan failed on all retries\n",
-                           sp->name);
+                       spin_lock_irqsave(&vha->work_lock, flags);
+                       vha->scan.scan_flags &= ~SF_SCANNING;
+                       vha->scan.scan_retry++;
+                       spin_unlock_irqrestore(&vha->work_lock, flags);
+
+                       if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+                               set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                               qla2xxx_wake_dpc(vha);
+                       } else {
+                               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                                   "Async done-%s rescan failed on all retries.\n",
+                                   name);
+                       }
                }
                return;
        }
 
-       if (!res)
-               qla2x00_find_free_fcp_nvme_slot(vha, sp);
+       qla2x00_find_free_fcp_nvme_slot(vha, sp);
 
        if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
            cmd == GNN_FT_CMD) {
-               del_timer(&sp->u.iocb_cmd.timer);
                spin_lock_irqsave(&vha->work_lock, flags);
                vha->scan.scan_flags &= ~SF_SCANNING;
                spin_unlock_irqrestore(&vha->work_lock, flags);
 
-               e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
-               if (!e) {
-                       /*
-                        * please ignore kernel warning. Otherwise,
-                        * we have mem leak.
-                        */
-                       if (sp->u.iocb_cmd.u.ctarg.req) {
-                               dma_free_coherent(&vha->hw->pdev->dev,
-                                   sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-                                   sp->u.iocb_cmd.u.ctarg.req,
-                                   sp->u.iocb_cmd.u.ctarg.req_dma);
-                               sp->u.iocb_cmd.u.ctarg.req = NULL;
-                       }
-                       if (sp->u.iocb_cmd.u.ctarg.rsp) {
-                               dma_free_coherent(&vha->hw->pdev->dev,
-                                   sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-                                   sp->u.iocb_cmd.u.ctarg.rsp,
-                                   sp->u.iocb_cmd.u.ctarg.rsp_dma);
-                               sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-                       }
-
-                       ql_dbg(ql_dbg_disc, vha, 0xffff,
-                           "Async done-%s unable to alloc work element\n",
-                           sp->name);
-                       sp->free(sp);
+               sp->rc = res;
+               rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
+               if (rc) {
+                       qla24xx_sp_unmap(vha, sp);
                        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                       return;
                }
-               e->u.gpnft.fc4_type = FC4_TYPE_NVME;
-               sp->rc = res;
-               e->u.gpnft.sp = sp;
-
-               qla2x00_post_work(vha, e);
                return;
        }
 
-       if (cmd == GPN_FT_CMD)
-               e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
-       else
-               e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
-       if (!e) {
-               /* please ignore kernel warning. Otherwise, we have mem leak. */
-               if (sp->u.iocb_cmd.u.ctarg.req) {
-                       dma_free_coherent(&vha->hw->pdev->dev,
-                           sp->u.iocb_cmd.u.ctarg.req_allocated_size,
-                           sp->u.iocb_cmd.u.ctarg.req,
-                           sp->u.iocb_cmd.u.ctarg.req_dma);
-                       sp->u.iocb_cmd.u.ctarg.req = NULL;
-               }
-               if (sp->u.iocb_cmd.u.ctarg.rsp) {
-                       dma_free_coherent(&vha->hw->pdev->dev,
-                           sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
-                           sp->u.iocb_cmd.u.ctarg.rsp,
-                           sp->u.iocb_cmd.u.ctarg.rsp_dma);
-                       sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-               }
+       if (cmd == GPN_FT_CMD) {
+               rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+                   QLA_EVT_GPNFT_DONE);
+       } else {
+               rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+                   QLA_EVT_GNNFT_DONE);
+       }
 
-               ql_dbg(ql_dbg_disc, vha, 0xffff,
-                   "Async done-%s unable to alloc work element\n",
-                   sp->name);
-               sp->free(sp);
+       if (rc) {
+               qla24xx_sp_unmap(vha, sp);
                set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                return;
        }
-
-       sp->rc = res;
-       e->u.iosb.sp = sp;
-
-       qla2x00_post_work(vha, e);
 }
 
 /*
@@ -4285,11 +4033,13 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
                vha->scan.scan_flags &= ~SF_SCANNING;
                spin_unlock_irqrestore(&vha->work_lock, flags);
                WARN_ON(1);
+               set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+               set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                goto done_free_sp;
        }
 
        ql_dbg(ql_dbg_disc, vha, 0xfffff,
-           "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
+           "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
            __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
             sp->u.iocb_cmd.u.ctarg.req_size);
 
@@ -4318,8 +4068,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
        sp->done = qla2x00_async_gpnft_gnnft_sp_done;
 
        rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
+       if (rval != QLA_SUCCESS) {
+               spin_lock_irqsave(&vha->work_lock, flags);
+               vha->scan.scan_flags &= ~SF_SCANNING;
+               spin_unlock_irqrestore(&vha->work_lock, flags);
                goto done_free_sp;
+       }
 
        ql_dbg(ql_dbg_disc, vha, 0xffff,
            "Async-%s hdl=%x FC4Type %x.\n", sp->name,
@@ -4351,7 +4105,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
 {
        ql_dbg(ql_dbg_disc, vha, 0xffff,
            "%s enter\n", __func__);
-       del_timer(&sp->u.iocb_cmd.timer);
        qla24xx_async_gnnft(vha, sp, sp->gen2);
 }
 
@@ -4444,9 +4197,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
        sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
-       rspsz = sizeof(struct ct_sns_gpnft_rsp) +
-               ((vha->hw->max_fibre_devices - 1) *
-                   sizeof(struct ct_sns_gpn_ft_data));
+       rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
+       memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+       memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
 
        ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
        /* CT_IU preamble  */
@@ -4644,9 +4397,6 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
                    __func__, fcport->port_name);
                return;
        } else if (ea->sp->gen1 != fcport->rscn_gen) {
-               ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-                   __func__, __LINE__, fcport->port_name);
-               qla24xx_post_gidpn_work(vha, fcport);
                return;
        }
 
index b934977..c72d801 100644 (file)
@@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t)
        struct srb_iocb *iocb;
        struct req_que *req;
        unsigned long flags;
+       struct qla_hw_data *ha = sp->vha->hw;
 
-       spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+       WARN_ON_ONCE(irqs_disabled());
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        req = sp->qpair->req;
        req->outstanding_cmds[sp->handle] = NULL;
        iocb = &sp->u.iocb_cmd;
-       spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        iocb->timeout(sp);
 }
 
@@ -245,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
 
        }
 
+       ql_dbg(ql_dbg_disc, vha, 0x2072,
+           "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+               "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+           fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+           fcport->login_retry);
+
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS) {
                fcport->flags |= FCF_LOGIN_NEEDED;
@@ -252,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
                goto done_free_sp;
        }
 
-       ql_dbg(ql_dbg_disc, vha, 0x2072,
-           "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
-               "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
-           fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
-           fcport->login_retry);
        return rval;
 
 done_free_sp:
@@ -301,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
        qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
 
        sp->done = qla2x00_async_logout_sp_done;
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x2070,
            "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
                fcport->d_id.b.area, fcport->d_id.b.al_pa,
                fcport->port_name);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        return rval;
 
 done_free_sp:
@@ -396,6 +400,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
                ql_dbg(ql_dbg_disc, vha, 0x2066,
                    "%s %8phC: adisc fail: post delete\n",
                    __func__, ea->fcport->port_name);
+               /* deleted = 0 & logout_on_delete = force fw cleanup */
+               fcport->deleted = 0;
+               fcport->logout_on_delete = 1;
                qlt_schedule_sess_for_deletion(ea->fcport);
                return;
        }
@@ -410,9 +417,8 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
                    __func__, ea->fcport->port_name);
                return;
        } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
-               ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-                   __func__, __LINE__, ea->fcport->port_name);
-               qla24xx_post_gidpn_work(vha, ea->fcport);
+               qla_rscn_replay(fcport);
+               qlt_schedule_sess_for_deletion(fcport);
                return;
        }
 
@@ -487,13 +493,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
        sp->done = qla2x00_async_adisc_sp_done;
        if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
                lio->u.logio.flags |= SRB_LOGIN_RETRIED;
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
 
        ql_dbg(ql_dbg_disc, vha, 0x206f,
            "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
+
        return rval;
 
 done_free_sp:
@@ -536,11 +544,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
        }
 
        if (fcport->last_rscn_gen != fcport->rscn_gen) {
-               ql_dbg(ql_dbg_disc, vha, 0x20df,
-                   "%s %8phC rscn gen changed rscn %d|%d \n",
-                   __func__, fcport->port_name,
-                   fcport->last_rscn_gen, fcport->rscn_gen);
-               qla24xx_post_gidpn_work(vha, fcport);
+               qla_rscn_replay(fcport);
+               qlt_schedule_sess_for_deletion(fcport);
                return;
        } else if (fcport->last_login_gen != fcport->login_gen) {
                ql_dbg(ql_dbg_disc, vha, 0x20e0,
@@ -787,6 +792,10 @@ qla24xx_async_gnl_sp_done(void *s, int res)
            sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
            sp->u.iocb_cmd.u.mbx.in_mb[2]);
 
+       if (res == QLA_FUNCTION_TIMEOUT)
+               return;
+
+       sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
        memset(&ea, 0, sizeof(ea));
        ea.sp = sp;
        ea.rc = res;
@@ -814,25 +823,24 @@ qla24xx_async_gnl_sp_done(void *s, int res)
                    (loop_id & 0x7fff));
        }
 
-       spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
 
        INIT_LIST_HEAD(&h);
        fcport = tf = NULL;
        if (!list_empty(&vha->gnl.fcports))
                list_splice_init(&vha->gnl.fcports, &h);
+       spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
        list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
                list_del_init(&fcport->gnl_entry);
-               spin_lock(&vha->hw->tgt.sess_lock);
+               spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
                fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-               spin_unlock(&vha->hw->tgt.sess_lock);
+               spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
                ea.fcport = fcport;
 
                qla2x00_fcport_event_handler(vha, &ea);
        }
-       spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
 
-       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
        /* create new fcport if fw has knowledge of new sessions */
        for (i = 0; i < n; i++) {
                port_id_t id;
@@ -865,6 +873,8 @@ qla24xx_async_gnl_sp_done(void *s, int res)
                }
        }
 
+       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+       vha->gnl.sent = 0;
        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
        sp->free(sp);
@@ -884,27 +894,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
        ql_dbg(ql_dbg_disc, vha, 0x20d9,
            "Async-gnlist WWPN %8phC \n", fcport->port_name);
 
-       spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
-       if (!list_empty(&fcport->gnl_entry)) {
-               spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
-               rval = QLA_SUCCESS;
-               goto done;
-       }
-
-       spin_lock(&vha->hw->tgt.sess_lock);
+       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+       fcport->flags |= FCF_ASYNC_SENT;
        fcport->disc_state = DSC_GNL;
        fcport->last_rscn_gen = fcport->rscn_gen;
        fcport->last_login_gen = fcport->login_gen;
-       spin_unlock(&vha->hw->tgt.sess_lock);
 
        list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
-       spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+       if (vha->gnl.sent) {
+               spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+               return QLA_SUCCESS;
+       }
+       vha->gnl.sent = 1;
+       spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
        if (!sp)
                goto done;
 
-       fcport->flags |= FCF_ASYNC_SENT;
        sp->type = SRB_MB_IOCB;
        sp->name = "gnlist";
        sp->gen1 = fcport->rscn_gen;
@@ -970,8 +977,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
            "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
            sp->name, res, fcport->port_name, mb[1], mb[2]);
 
-       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+       if (res == QLA_FUNCTION_TIMEOUT) {
+               dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+                       sp->u.iocb_cmd.u.mbx.in_dma);
+               return;
+       }
 
+       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
        memset(&ea, 0, sizeof(ea));
        ea.event = FCME_GPDB_DONE;
        ea.fcport = fcport;
@@ -1147,14 +1159,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
 
        sp->done = qla24xx_async_gpdb_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_disc, vha, 0x20dc,
            "Async-%s %8phC hndl %x opt %x\n",
            sp->name, fcport->port_name, sp->handle, opt);
 
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        return rval;
 
 done_free_sp:
@@ -1182,11 +1193,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
                vha->fcport_count++;
                ea->fcport->login_succ = 1;
 
-               ql_dbg(ql_dbg_disc, vha, 0x20d6,
-                   "%s %d %8phC post upd_fcport fcp_cnt %d\n",
-                   __func__, __LINE__,  ea->fcport->port_name,
-                   vha->fcport_count);
-               qla24xx_post_upd_fcport_work(vha, ea->fcport);
+               spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+               qla24xx_sched_upd_fcport(ea->fcport);
+               spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
        } else if (ea->fcport->login_succ) {
                /*
                 * We have an existing session. A late RSCN delivery
@@ -1226,6 +1235,19 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
        else
                ls = pd->current_login_state & 0xf;
 
+       if (ea->sp->gen2 != fcport->login_gen) {
+               /* target side must have changed it. */
+
+               ql_dbg(ql_dbg_disc, vha, 0x20d3,
+                   "%s %8phC generation changed\n",
+                   __func__, fcport->port_name);
+               return;
+       } else if (ea->sp->gen1 != fcport->rscn_gen) {
+               qla_rscn_replay(fcport);
+               qlt_schedule_sess_for_deletion(fcport);
+               return;
+       }
+
        switch (ls) {
        case PDS_PRLI_COMPLETE:
                __qla24xx_parse_gpdb(vha, fcport, pd);
@@ -1280,7 +1302,8 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                login = 1;
        }
 
-       if (login) {
+       if (login && fcport->login_retry) {
+               fcport->login_retry--;
                if (fcport->loop_id == FC_NO_LOOP_ID) {
                        fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
                        rc = qla2x00_find_new_loop_id(vha, fcport);
@@ -1304,14 +1327,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
        u16 data[2];
        u64 wwn;
+       u16 sec;
 
-       ql_dbg(ql_dbg_disc, vha, 0x20d8,
-           "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
+       ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
+           "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
            __func__, fcport->port_name, fcport->disc_state,
            fcport->fw_login_state, fcport->login_pause, fcport->flags,
            fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
-           fcport->login_gen, fcport->login_retry,
-           fcport->loop_id, fcport->scan_state);
+           fcport->login_gen, fcport->loop_id, fcport->scan_state);
 
        if (fcport->scan_state != QLA_FCPORT_FOUND)
                return 0;
@@ -1410,22 +1433,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                break;
 
        case DSC_LOGIN_FAILED:
-               fcport->login_retry--;
-               ql_dbg(ql_dbg_disc, vha, 0x20d0,
-                   "%s %d %8phC post gidpn\n",
-                   __func__, __LINE__, fcport->port_name);
                if (N2N_TOPO(vha->hw))
                        qla_chk_n2n_b4_login(vha, fcport);
                else
-                       qla24xx_post_gidpn_work(vha, fcport);
+                       qlt_schedule_sess_for_deletion(fcport);
                break;
 
        case DSC_LOGIN_COMPLETE:
                /* recheck login state */
-               ql_dbg(ql_dbg_disc, vha, 0x20d1,
-                   "%s %d %8phC post adisc\n",
-                   __func__, __LINE__, fcport->port_name);
-               fcport->login_retry--;
                data[0] = data[1] = 0;
                qla2x00_post_async_adisc_work(vha, fcport, data);
                break;
@@ -1435,6 +1450,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                        qla24xx_post_prli_work(vha, fcport);
                break;
 
+       case DSC_UPD_FCPORT:
+               sec =  jiffies_to_msecs(jiffies -
+                   fcport->jiffies_at_registration)/1000;
+               if (fcport->sec_since_registration < sec && sec &&
+                   !(sec % 60)) {
+                       fcport->sec_since_registration = sec;
+                       ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+                           "%s %8phC - Slow Rport registration(%d Sec)\n",
+                           __func__, fcport->port_name, sec);
+               }
+
+               if (fcport->next_disc_state != DSC_DELETE_PEND)
+                       fcport->next_disc_state = DSC_ADISC;
+               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+               break;
+
        default:
                break;
        }
@@ -1513,7 +1544,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
                ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
                    __func__, __LINE__, fcport->port_name);
 
-               qla24xx_post_gidpn_work(vha, fcport);
                return;
        }
 
@@ -1533,7 +1563,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
 {
        fc_port_t *f, *tf;
        uint32_t id = 0, mask, rid;
-       unsigned long flags;
        fc_port_t *fcport;
 
        switch (ea->event) {
@@ -1548,10 +1577,16 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
                        return;
                switch (ea->id.b.rsvd_1) {
                case RSCN_PORT_ADDR:
+#define BIGSCAN 1
+#if defined BIGSCAN & BIGSCAN > 0
+               {
+                       unsigned long flags;
                        fcport = qla2x00_find_fcport_by_nportid
                                (vha, &ea->id, 1);
-                       if (fcport)
-                               fcport->rscn_rcvd = 1;
+                       if (fcport) {
+                               fcport->scan_needed = 1;
+                               fcport->rscn_gen++;
+                       }
 
                        spin_lock_irqsave(&vha->work_lock, flags);
                        if (vha->scan.scan_flags == 0) {
@@ -1561,7 +1596,26 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
                                schedule_delayed_work(&vha->scan.scan_work, 5);
                        }
                        spin_unlock_irqrestore(&vha->work_lock, flags);
-
+               }
+#else
+               {
+                       int rc;
+                       fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+                       if (!fcport) {
+                               /* cable moved */
+                                rc = qla24xx_post_gpnid_work(vha, &ea->id);
+                                if (rc) {
+                                        ql_log(ql_log_warn, vha, 0xd044,
+                                            "RSCN GPNID work failed %06x\n",
+                                            ea->id.b24);
+                                }
+                       } else {
+                               ea->fcport = fcport;
+                               fcport->scan_needed = 1;
+                               qla24xx_handle_rscn_event(fcport, ea);
+                       }
+               }
+#endif
                        break;
                case RSCN_AREA_ADDR:
                case RSCN_DOM_ADDR:
@@ -1597,9 +1651,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
                        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
                }
                break;
-       case FCME_GIDPN_DONE:
-               qla24xx_handle_gidpn_event(vha, ea);
-               break;
        case FCME_GNL_DONE:
                qla24xx_handle_gnl_done_event(vha, ea);
                break;
@@ -1639,6 +1690,34 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
        }
 }
 
+/*
+ * RSCN(s) came in for this fcport, but the RSCN(s) was not able
+ * to be consumed by the fcport
+ */
+void qla_rscn_replay(fc_port_t *fcport)
+{
+       struct event_arg ea;
+
+       switch (fcport->disc_state) {
+       case DSC_DELETE_PEND:
+               return;
+       default:
+               break;
+       }
+
+       if (fcport->scan_needed) {
+               memset(&ea, 0, sizeof(ea));
+               ea.event = FCME_RSCN;
+               ea.id = fcport->d_id;
+               ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+#if defined BIGSCAN & BIGSCAN > 0
+               qla2x00_fcport_event_handler(fcport->vha, &ea);
+#else
+               qla24xx_post_gpnid_work(fcport->vha, &ea.id);
+#endif
+       }
+}
+
 static void
 qla2x00_tmf_iocb_timeout(void *data)
 {
@@ -1684,15 +1763,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
        tm_iocb->u.tmf.data = tag;
        sp->done = qla2x00_tmf_sp_done;
 
-       rval = qla2x00_start_sp(sp);
-       if (rval != QLA_SUCCESS)
-               goto done_free_sp;
-
        ql_dbg(ql_dbg_taskm, vha, 0x802f,
            "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
            sp->handle, fcport->loop_id, fcport->d_id.b.domain,
            fcport->d_id.b.area, fcport->d_id.b.al_pa);
 
+       rval = qla2x00_start_sp(sp);
+       if (rval != QLA_SUCCESS)
+               goto done_free_sp;
        wait_for_completion(&tm_iocb->u.tmf.comp);
 
        rval = tm_iocb->u.tmf.data;
@@ -1747,47 +1825,46 @@ int
 qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
 {
        scsi_qla_host_t *vha = cmd_sp->vha;
-       fc_port_t *fcport = cmd_sp->fcport;
        struct srb_iocb *abt_iocb;
        srb_t *sp;
        int rval = QLA_FUNCTION_FAILED;
 
-       sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+       sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+           GFP_KERNEL);
        if (!sp)
                goto done;
 
        abt_iocb = &sp->u.iocb_cmd;
        sp->type = SRB_ABT_CMD;
        sp->name = "abort";
+       sp->qpair = cmd_sp->qpair;
        if (wait)
                sp->flags = SRB_WAKEUP_ON_COMP;
 
        abt_iocb->timeout = qla24xx_abort_iocb_timeout;
        init_completion(&abt_iocb->u.abt.comp);
-       qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+       /* FW can send 2 x ABTS's timeout/20s */
+       qla2x00_init_timer(sp, 42);
 
        abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-
-       if (vha->flags.qpairs_available && cmd_sp->qpair)
-               abt_iocb->u.abt.req_que_no =
-                   cpu_to_le16(cmd_sp->qpair->req->id);
-       else
-               abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+       abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
 
        sp->done = qla24xx_abort_sp_done;
 
+       ql_dbg(ql_dbg_async, vha, 0x507c,
+           "Abort command issued - hdl=%x, type=%x\n",
+           cmd_sp->handle, cmd_sp->type);
+
        rval = qla2x00_start_sp(sp);
        if (rval != QLA_SUCCESS)
                goto done_free_sp;
 
-       ql_dbg(ql_dbg_async, vha, 0x507c,
-           "Abort command issued - hdl=%x, target_id=%x\n",
-           cmd_sp->handle, fcport->tgt_id);
-
        if (wait) {
                wait_for_completion(&abt_iocb->u.abt.comp);
                rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
                        QLA_SUCCESS : QLA_FUNCTION_FAILED;
+       } else {
+               goto done;
        }
 
 done_free_sp:
@@ -1803,19 +1880,17 @@ qla24xx_async_abort_command(srb_t *sp)
 
        uint32_t        handle;
        fc_port_t       *fcport = sp->fcport;
+       struct qla_qpair *qpair = sp->qpair;
        struct scsi_qla_host *vha = fcport->vha;
-       struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = vha->req;
-
-       if (vha->flags.qpairs_available && sp->qpair)
-               req = sp->qpair->req;
+       struct req_que *req = qpair->req;
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
+       spin_lock_irqsave(qpair->qp_lock_ptr, flags);
        for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
                        break;
        }
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
        if (handle == req->num_outstanding_cmds) {
                /* Command not found. */
                return QLA_FUNCTION_FAILED;
@@ -1876,7 +1951,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
            "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
            __func__, fcport->port_name, fcport->disc_state,
            fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
-           ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+           ea->sp->gen1, fcport->rscn_gen,
            ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
 
        if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
@@ -1898,9 +1973,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                return;
        } else if (ea->sp->gen1 != fcport->rscn_gen) {
-               ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
-                   __func__, __LINE__, fcport->port_name);
-               qla24xx_post_gidpn_work(vha, fcport);
+               ql_dbg(ql_dbg_disc, vha, 0x20d3,
+                   "%s %8phC RSCN generation changed\n",
+                   __func__, fcport->port_name);
+               qla_rscn_replay(fcport);
+               qlt_schedule_sess_for_deletion(fcport);
                return;
        }
 
@@ -1952,25 +2029,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                cid.b.rsvd_1 = 0;
 
                ql_dbg(ql_dbg_disc, vha, 0x20ec,
-                   "%s %d %8phC LoopID 0x%x in use post gnl\n",
+                   "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
                    __func__, __LINE__, ea->fcport->port_name,
-                   ea->fcport->loop_id);
+                   ea->fcport->loop_id, cid.b24);
 
-               if (IS_SW_RESV_ADDR(cid)) {
-                       set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
-                       ea->fcport->loop_id = FC_NO_LOOP_ID;
-               } else {
-                       qla2x00_clear_loop_id(ea->fcport);
-               }
+               set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+               ea->fcport->loop_id = FC_NO_LOOP_ID;
                qla24xx_post_gnl_work(vha, ea->fcport);
                break;
        case MBS_PORT_ID_USED:
-               ql_dbg(ql_dbg_disc, vha, 0x20ed,
-                   "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
-                   __func__, __LINE__, ea->fcport->port_name,
-                   ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
-                   ea->fcport->d_id.b.al_pa);
-
                lid = ea->iop[1] & 0xffff;
                qlt_find_sess_invalidate_other(vha,
                    wwn_to_u64(ea->fcport->port_name),
@@ -1989,8 +2056,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                            "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
                            __func__, __LINE__, ea->fcport->port_name,
                            ea->fcport->d_id.b24, lid);
-                       qla2x00_clear_loop_id(ea->fcport);
-                       qla24xx_post_gidpn_work(vha, ea->fcport);
                } else {
                        ql_dbg(ql_dbg_disc, vha, 0x20ed,
                            "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
@@ -2018,26 +2083,6 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
        return;
 }
 
-void
-qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
-    uint16_t *data)
-{
-       fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-       if (data[0] == MBS_COMMAND_COMPLETE) {
-               qla2x00_update_fcport(vha, fcport);
-
-               return;
-       }
-
-       /* Retry login. */
-       if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
-               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-       else
-               qla2x00_mark_device_lost(vha, fcport, 1, 0);
-
-       return;
-}
-
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -3527,6 +3572,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
                        if (rval == QLA_SUCCESS) {
                                qla24xx_detect_sfp(vha);
 
+                               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+                                   (ha->zio_mode == QLA_ZIO_MODE_6))
+                                       qla27xx_set_zio_threshold(vha,
+                                           ha->last_zio_threshold);
+
                                rval = qla2x00_set_exlogins_buffer(vha);
                                if (rval != QLA_SUCCESS)
                                        goto failed;
@@ -4015,6 +4065,7 @@ next_check:
                ql_dbg(ql_dbg_init, vha, 0x00d3,
                    "Init Firmware -- success.\n");
                QLA_FW_STARTED(ha);
+               vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
        }
 
        return (rval);
@@ -4728,6 +4779,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
                fcport = NULL;
        }
        INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+       INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
        INIT_LIST_HEAD(&fcport->gnl_entry);
        INIT_LIST_HEAD(&fcport->list);
 
@@ -4853,19 +4905,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
                         */
                        if (qla_tgt_mode_enabled(vha) ||
                            qla_dual_mode_enabled(vha)) {
-                               if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
-                                       spin_lock_irqsave(&ha->tgt.atio_lock,
-                                           flags);
-                                       qlt_24xx_process_atio_queue(vha, 0);
-                                       spin_unlock_irqrestore(
-                                           &ha->tgt.atio_lock, flags);
-                               } else {
-                                       spin_lock_irqsave(&ha->hardware_lock,
-                                           flags);
-                                       qlt_24xx_process_atio_queue(vha, 1);
-                                       spin_unlock_irqrestore(
-                                           &ha->hardware_lock, flags);
-                               }
+                               spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+                               qlt_24xx_process_atio_queue(vha, 0);
+                               spin_unlock_irqrestore(&ha->tgt.atio_lock,
+                                   flags);
                        }
                }
        }
@@ -4958,6 +5001,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
            (uint8_t *)ha->gid_list,
            entries * sizeof(struct gid_list_info));
 
+       if (entries == 0) {
+               spin_lock_irqsave(&vha->work_lock, flags);
+               vha->scan.scan_retry++;
+               spin_unlock_irqrestore(&vha->work_lock, flags);
+
+               if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+               }
+       } else {
+               vha->scan.scan_retry = 0;
+       }
+
        list_for_each_entry(fcport, &vha->vp_fcports, list) {
                fcport->scan_state = QLA_FCPORT_SCAN;
        }
@@ -5223,20 +5279,20 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
 void
 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-       fcport->vha = vha;
-
        if (IS_SW_RESV_ADDR(fcport->d_id))
                return;
 
+       ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+           __func__, fcport->port_name);
+
+       fcport->disc_state = DSC_UPD_FCPORT;
+       fcport->login_retry = vha->hw->login_retry_count;
        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
-       fcport->disc_state = DSC_LOGIN_COMPLETE;
        fcport->deleted = 0;
        fcport->logout_on_delete = 1;
        fcport->login_retry = vha->hw->login_retry_count;
        fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
 
-       qla2x00_iidma_fcport(vha, fcport);
-
        switch (vha->hw->current_topology) {
        case ISP_CFG_N:
        case ISP_CFG_NL:
@@ -5246,6 +5302,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                break;
        }
 
+       qla2x00_iidma_fcport(vha, fcport);
+
        if (fcport->fc4f_nvme) {
                qla_nvme_register_remote(vha, fcport);
                fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -5274,6 +5332,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                break;
        }
 
+       qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
        if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
                if (fcport->id_changed) {
                        fcport->id_changed = 0;
@@ -5290,7 +5350,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                        qla24xx_post_gpsc_work(vha, fcport);
                }
        }
-       qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
+       fcport->disc_state = DSC_LOGIN_COMPLETE;
+}
+
+void qla_register_fcport_fn(struct work_struct *work)
+{
+       fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
+       u32 rscn_gen = fcport->rscn_gen;
+       u16 data[2];
+
+       if (IS_SW_RESV_ADDR(fcport->d_id))
+               return;
+
+       qla2x00_update_fcport(fcport->vha, fcport);
+
+       if (rscn_gen != fcport->rscn_gen) {
+               /* RSCN(s) came in while registration */
+               switch (fcport->next_disc_state) {
+               case DSC_DELETE_PEND:
+                       qlt_schedule_sess_for_deletion(fcport);
+                       break;
+               case DSC_ADISC:
+                       data[0] = data[1] = 0;
+                       qla2x00_post_async_adisc_work(fcport->vha, fcport,
+                           data);
+                       break;
+               default:
+                       break;
+               }
+       }
 }
 
 /*
@@ -6494,6 +6583,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
        if (!(IS_P3P_TYPE(ha)))
                ha->isp_ops->reset_chip(vha);
 
+       ha->link_data_rate = PORT_SPEED_UNKNOWN;
        SAVE_TOPO(ha);
        ha->flags.rida_fmt2 = 0;
        ha->flags.n2n_ae = 0;
@@ -6622,6 +6712,20 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                        return status;
                }
 
+               switch (vha->qlini_mode) {
+               case QLA2XXX_INI_MODE_DISABLED:
+                       if (!qla_tgt_mode_enabled(vha))
+                               return 0;
+                       break;
+               case QLA2XXX_INI_MODE_DUAL:
+                       if (!qla_dual_mode_enabled(vha))
+                               return 0;
+                       break;
+               case QLA2XXX_INI_MODE_ENABLED:
+               default:
+                       break;
+               }
+
                ha->isp_ops->get_flash_version(vha, req->ring);
 
                ha->isp_ops->nvram_config(vha);
@@ -6682,7 +6786,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
                                         * The next call disables the board
                                         * completely.
                                         */
-                                       ha->isp_ops->reset_adapter(vha);
+                                       qla2x00_abort_isp_cleanup(vha);
                                        vha->flags.online = 0;
                                        clear_bit(ISP_ABORT_RETRY,
                                            &vha->dpc_flags);
@@ -7142,7 +7246,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        }
        icb->firmware_options_2 &= cpu_to_le32(
            ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
-       vha->flags.process_response_queue = 0;
        if (ha->zio_mode != QLA_ZIO_DISABLED) {
                ha->zio_mode = QLA_ZIO_MODE_6;
 
@@ -7153,7 +7256,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
                icb->firmware_options_2 |= cpu_to_le32(
                    (uint32_t)ha->zio_mode);
                icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
-               vha->flags.process_response_queue = 1;
        }
 
        if (rval) {
index 4351736..512c3c3 100644 (file)
@@ -209,7 +209,8 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
 }
 
 static inline srb_t *
-qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+    fc_port_t *fcport, gfp_t flag)
 {
        srb_t *sp = NULL;
        uint8_t bail;
@@ -225,7 +226,9 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
        memset(sp, 0, sizeof(*sp));
        sp->fcport = fcport;
        sp->iocbs = 1;
-       sp->vha = qpair->vha;
+       sp->vha = vha;
+       sp->qpair = qpair;
+       sp->cmd_type = TYPE_SRB;
        INIT_LIST_HEAD(&sp->elem);
 
 done:
@@ -246,19 +249,17 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
 {
        srb_t *sp = NULL;
        uint8_t bail;
+       struct qla_qpair *qpair;
 
        QLA_VHA_MARK_BUSY(vha, bail);
        if (unlikely(bail))
                return NULL;
 
-       sp = mempool_alloc(vha->hw->srb_mempool, flag);
+       qpair = vha->hw->base_qpair;
+       sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
        if (!sp)
                goto done;
 
-       memset(sp, 0, sizeof(*sp));
-       sp->fcport = fcport;
-       sp->cmd_type = TYPE_SRB;
-       sp->iocbs = 1;
        sp->vha = vha;
 done:
        if (!sp)
@@ -270,7 +271,7 @@ static inline void
 qla2x00_rel_sp(srb_t *sp)
 {
        QLA_VHA_MARK_NOT_BUSY(sp->vha);
-       mempool_free(sp, sp->vha->hw->srb_mempool);
+       qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
 
 static inline void
@@ -317,13 +318,13 @@ static inline bool
 qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
 {
        if (qla_ini_mode_enabled(vha) &&
-           (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+           (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
                return true;
        else if (qla_tgt_mode_enabled(vha) &&
-           (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+           (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
                return true;
        else if (qla_dual_mode_enabled(vha) &&
-           ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+           ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
                return true;
        else
                return false;
index 42ac8e0..86fb8b2 100644 (file)
@@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp)
 
        /* Set chip new ring index. */
        WRT_REG_DWORD(req->req_q_in, req->ring_index);
-       RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
-       /* Manage unprocessed RIO/ZIO commands in response queue. */
-       if (vha->flags.process_response_queue &&
-               rsp->ring_ptr->signature != RESPONSE_PROCESSED)
-               qla24xx_process_response_queue(vha, rsp);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return QLA_SUCCESS;
@@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
 
        /* Set chip new ring index. */
        WRT_REG_DWORD(req->req_q_in, req->ring_index);
-       RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
-       /* Manage unprocessed RIO/ZIO commands in response queue. */
-       if (vha->flags.process_response_queue &&
-           rsp->ring_ptr->signature != RESPONSE_PROCESSED)
-               qla24xx_process_response_queue(vha, rsp);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
        /* Set chip new ring index. */
        WRT_REG_DWORD(req->req_q_in, req->ring_index);
 
-       /* Manage unprocessed RIO/ZIO commands in response queue. */
-       if (vha->flags.process_response_queue &&
-               rsp->ring_ptr->signature != RESPONSE_PROCESSED)
-               qla24xx_process_response_queue(vha, rsp);
-
        spin_unlock_irqrestore(&qpair->qp_lock, flags);
        return QLA_SUCCESS;
 
@@ -2287,8 +2270,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        logio->control_flags =
            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
-       if (!sp->fcport->se_sess ||
-           !sp->fcport->keep_nport_handle)
+       if (!sp->fcport->keep_nport_handle)
                logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2659,7 +2641,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        struct qla_hw_data *ha = vha->hw;
        int rval = QLA_SUCCESS;
        void    *ptr, *resp_ptr;
-       dma_addr_t ptr_dma;
 
        /* Alloc SRB structure */
        sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -2691,7 +2672,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
        ptr = elsio->u.els_plogi.els_plogi_pyld =
            dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
                &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
-       ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
 
        if (!elsio->u.els_plogi.els_plogi_pyld) {
                rval = QLA_FUNCTION_FAILED;
@@ -3314,19 +3294,21 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
 {
        struct srb_iocb *aio = &sp->u.iocb_cmd;
        scsi_qla_host_t *vha = sp->vha;
-       struct req_que *req = vha->req;
+       struct req_que *req = sp->qpair->req;
 
        memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
        abt_iocb->entry_type = ABORT_IOCB_TYPE;
        abt_iocb->entry_count = 1;
        abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
-       abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+       if (sp->fcport) {
+               abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+               abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+               abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+               abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+       }
        abt_iocb->handle_to_abort =
            cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
                                    aio->u.abt.cmd_hndl));
-       abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
-       abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
-       abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
        abt_iocb->vp_index = vha->vp_idx;
        abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
        /* Send the command to the firmware */
@@ -3455,12 +3437,13 @@ qla2x00_start_sp(srb_t *sp)
        int rval;
        scsi_qla_host_t *vha = sp->vha;
        struct qla_hw_data *ha = vha->hw;
+       struct qla_qpair *qp = sp->qpair;
        void *pkt;
        unsigned long flags;
 
        rval = QLA_FUNCTION_FAILED;
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       pkt = qla2x00_alloc_iocbs(vha, sp);
+       spin_lock_irqsave(qp->qp_lock_ptr, flags);
+       pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
        if (!pkt) {
                ql_log(ql_log_warn, vha, 0x700c,
                    "qla2x00_alloc_iocbs failed.\n");
@@ -3538,9 +3521,9 @@ qla2x00_start_sp(srb_t *sp)
        }
 
        wmb();
-       qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+       qla2x00_start_iocbs(vha, qp->req);
 done:
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
        return rval;
 }
 
index 36cbb29..d73b04e 100644 (file)
@@ -1850,11 +1850,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
        struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
        uint16_t        state_flags;
        struct nvmefc_fcp_req *fd;
-       uint16_t        ret = 0;
+       uint16_t        ret = QLA_SUCCESS;
+       uint16_t        comp_status = le16_to_cpu(sts->comp_status);
 
        iocb = &sp->u.iocb_cmd;
        fcport = sp->fcport;
-       iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+       iocb->u.nvme.comp_status = comp_status;
        state_flags  = le16_to_cpu(sts->state_flags);
        fd = iocb->u.nvme.desc;
 
@@ -1892,28 +1893,35 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
        fd->transferred_length = fd->payload_length -
            le32_to_cpu(sts->residual_len);
 
-       switch (le16_to_cpu(sts->comp_status)) {
+       if (unlikely(comp_status != CS_COMPLETE))
+               ql_log(ql_log_warn, fcport->vha, 0x5060,
+                  "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x  ox_id=%x\n",
+                  sp->name, sp->handle, comp_status,
+                  fd->transferred_length, le32_to_cpu(sts->residual_len),
+                  sts->ox_id);
+
+       /*
+        * If transport error then Failure (HBA rejects request)
+        * otherwise transport will handle.
+        */
+       switch (comp_status) {
        case CS_COMPLETE:
-               ret = QLA_SUCCESS;
                break;
-       case CS_ABORTED:
+
        case CS_RESET:
        case CS_PORT_UNAVAILABLE:
        case CS_PORT_LOGGED_OUT:
+               fcport->nvme_flag |= NVME_FLAG_RESETTING;
+               /* fall through */
+       case CS_ABORTED:
        case CS_PORT_BUSY:
-               ql_log(ql_log_warn, fcport->vha, 0x5060,
-                   "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
-                   sp->name, sp->handle, sts->comp_status,
-                   le32_to_cpu(sts->residual_len), sts->ox_id);
                fd->transferred_length = 0;
                iocb->u.nvme.rsp_pyld_len = 0;
                ret = QLA_ABORTED;
                break;
+       case CS_DATA_UNDERRUN:
+               break;
        default:
-               ql_log(ql_log_warn, fcport->vha, 0x5060,
-                   "NVME-%s error - hdl=%x completion status(%x) resid=%x  ox_id=%x\n",
-                   sp->name, sp->handle, sts->comp_status,
-                   le32_to_cpu(sts->residual_len), sts->ox_id);
                ret = QLA_FUNCTION_FAILED;
                break;
        }
@@ -2837,6 +2845,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
        case ELS_IOCB_TYPE:
        case ABORT_IOCB_TYPE:
        case MBX_IOCB_TYPE:
+       default:
                sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
                if (sp) {
                        sp->done(sp, res);
@@ -2847,7 +2856,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
        case ABTS_RESP_24XX:
        case CTIO_TYPE7:
        case CTIO_CRC2:
-       default:
                return 1;
        }
 fatal:
@@ -3121,6 +3129,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
        uint16_t        mb[8];
        struct rsp_que *rsp;
        unsigned long   flags;
+       bool process_atio = false;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -3181,22 +3190,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
                        qla24xx_process_response_queue(vha, rsp);
                        break;
                case INTR_ATIO_QUE_UPDATE_27XX:
-               case INTR_ATIO_QUE_UPDATE:{
-                       unsigned long flags2;
-                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-                       qlt_24xx_process_atio_queue(vha, 1);
-                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+               case INTR_ATIO_QUE_UPDATE:
+                       process_atio = true;
                        break;
-               }
-               case INTR_ATIO_RSP_QUE_UPDATE: {
-                       unsigned long flags2;
-                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-                       qlt_24xx_process_atio_queue(vha, 1);
-                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+               case INTR_ATIO_RSP_QUE_UPDATE:
+                       process_atio = true;
                        qla24xx_process_response_queue(vha, rsp);
                        break;
-               }
                default:
                        ql_dbg(ql_dbg_async, vha, 0x504f,
                            "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -3210,6 +3210,12 @@ qla24xx_intr_handler(int irq, void *dev_id)
        qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+       if (process_atio) {
+               spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+               qlt_24xx_process_atio_queue(vha, 0);
+               spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -3256,6 +3262,7 @@ qla24xx_msix_default(int irq, void *dev_id)
        uint32_t        hccr;
        uint16_t        mb[8];
        unsigned long flags;
+       bool process_atio = false;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -3312,22 +3319,13 @@ qla24xx_msix_default(int irq, void *dev_id)
                        qla24xx_process_response_queue(vha, rsp);
                        break;
                case INTR_ATIO_QUE_UPDATE_27XX:
-               case INTR_ATIO_QUE_UPDATE:{
-                       unsigned long flags2;
-                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-                       qlt_24xx_process_atio_queue(vha, 1);
-                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+               case INTR_ATIO_QUE_UPDATE:
+                       process_atio = true;
                        break;
-               }
-               case INTR_ATIO_RSP_QUE_UPDATE: {
-                       unsigned long flags2;
-                       spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
-                       qlt_24xx_process_atio_queue(vha, 1);
-                       spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+               case INTR_ATIO_RSP_QUE_UPDATE:
+                       process_atio = true;
                        qla24xx_process_response_queue(vha, rsp);
                        break;
-               }
                default:
                        ql_dbg(ql_dbg_async, vha, 0x5051,
                            "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -3338,6 +3336,12 @@ qla24xx_msix_default(int irq, void *dev_id)
        qla2x00_handle_mbx_completion(ha, status);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+       if (process_atio) {
+               spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+               qlt_24xx_process_atio_queue(vha, 0);
+               spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+       }
+
        return IRQ_HANDLED;
 }
 
index 2c6c2cd..2f3e507 100644 (file)
@@ -60,6 +60,7 @@ static struct rom_cmd {
        { MBC_GET_ADAPTER_LOOP_ID },
        { MBC_READ_SFP },
        { MBC_GET_RNID_PARAMS },
+       { MBC_GET_SET_ZIO_THRESHOLD },
 };
 
 static int is_rom_cmd(uint16_t cmd)
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                goto premature_exit;
        }
 
-       ha->flags.mbox_busy = 1;
+
        /* Save mailbox command for debug */
        ha->mcp = mcp;
 
@@ -198,12 +199,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+       if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+           ha->flags.mbox_busy) {
                rval = QLA_ABORTED;
-               ha->flags.mbox_busy = 0;
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
                goto premature_exit;
        }
+       ha->flags.mbox_busy = 1;
 
        /* Load mailbox registers. */
        if (IS_P3P_TYPE(ha))
@@ -254,9 +256,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                if (IS_P3P_TYPE(ha)) {
                        if (RD_REG_DWORD(&reg->isp82.hint) &
                                HINT_MBX_INT_PENDING) {
+                               ha->flags.mbox_busy = 0;
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
-                               ha->flags.mbox_busy = 0;
+
                                atomic_dec(&ha->num_pend_mbx_stage2);
                                ql_dbg(ql_dbg_mbx, vha, 0x1010,
                                    "Pending mailbox timeout, exiting.\n");
@@ -274,6 +277,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                atomic_inc(&ha->num_pend_mbx_stage3);
                if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
                    mcp->tov * HZ)) {
+                       if (chip_reset != ha->chip_reset) {
+                               spin_lock_irqsave(&ha->hardware_lock, flags);
+                               ha->flags.mbox_busy = 0;
+                               spin_unlock_irqrestore(&ha->hardware_lock,
+                                   flags);
+                               atomic_dec(&ha->num_pend_mbx_stage2);
+                               atomic_dec(&ha->num_pend_mbx_stage3);
+                               rval = QLA_ABORTED;
+                               goto premature_exit;
+                       }
                        ql_dbg(ql_dbg_mbx, vha, 0x117a,
                            "cmd=%x Timeout.\n", command);
                        spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +295,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
 
                } else if (ha->flags.purge_mbox ||
                    chip_reset != ha->chip_reset) {
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
                        ha->flags.mbox_busy = 0;
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
                        atomic_dec(&ha->num_pend_mbx_stage2);
                        atomic_dec(&ha->num_pend_mbx_stage3);
                        rval = QLA_ABORTED;
@@ -300,9 +315,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                if (IS_P3P_TYPE(ha)) {
                        if (RD_REG_DWORD(&reg->isp82.hint) &
                                HINT_MBX_INT_PENDING) {
+                               ha->flags.mbox_busy = 0;
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
-                               ha->flags.mbox_busy = 0;
                                atomic_dec(&ha->num_pend_mbx_stage2);
                                ql_dbg(ql_dbg_mbx, vha, 0x1012,
                                    "Pending mailbox timeout, exiting.\n");
@@ -320,7 +335,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                while (!ha->flags.mbox_int) {
                        if (ha->flags.purge_mbox ||
                            chip_reset != ha->chip_reset) {
+                               spin_lock_irqsave(&ha->hardware_lock, flags);
                                ha->flags.mbox_busy = 0;
+                               spin_unlock_irqrestore(&ha->hardware_lock,
+                                   flags);
                                atomic_dec(&ha->num_pend_mbx_stage2);
                                rval = QLA_ABORTED;
                                goto premature_exit;
@@ -363,7 +381,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
                if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+                       spin_lock_irqsave(&ha->hardware_lock, flags);
                        ha->flags.mbox_busy = 0;
+                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
                        /* Setting Link-Down error */
                        mcp->mb[0] = MBS_LINK_DOWN_ERROR;
                        ha->mcp = NULL;
@@ -436,7 +457,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                 * then only PCI ERR flag would be set.
                                 * we will do premature exit for above case.
                                 */
+                               spin_lock_irqsave(&ha->hardware_lock, flags);
                                ha->flags.mbox_busy = 0;
+                               spin_unlock_irqrestore(&ha->hardware_lock,
+                                   flags);
                                rval = QLA_FUNCTION_TIMEOUT;
                                goto premature_exit;
                        }
@@ -451,8 +475,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        rval = QLA_FUNCTION_TIMEOUT;
                 }
        }
-
+       spin_lock_irqsave(&ha->hardware_lock, flags);
        ha->flags.mbox_busy = 0;
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        /* Clean up */
        ha->mcp = NULL;
@@ -493,7 +518,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
                                qla2xxx_wake_dpc(vha);
                        }
-               } else if (!abort_active) {
+               } else if (current == ha->dpc_thread) {
                        /* call abort directly since we are in the DPC thread */
                        ql_dbg(ql_dbg_mbx, vha, 0x101d,
                            "Timeout, calling abort_isp.\n");
@@ -1486,7 +1511,6 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
        struct req_que *req;
        struct rsp_que *rsp;
 
-       l = l;
        vha = fcport->vha;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
@@ -3072,22 +3096,25 @@ qla24xx_abort_command(srb_t *sp)
        struct scsi_qla_host *vha = fcport->vha;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = vha->req;
+       struct qla_qpair *qpair = sp->qpair;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
            "Entered %s.\n", __func__);
 
        if (vha->flags.qpairs_available && sp->qpair)
                req = sp->qpair->req;
+       else
+               return QLA_FUNCTION_FAILED;
 
        if (ql2xasynctmfenable)
                return qla24xx_async_abort_command(sp);
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
+       spin_lock_irqsave(qpair->qp_lock_ptr, flags);
        for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
                if (req->outstanding_cmds[handle] == sp)
                        break;
        }
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
        if (handle == req->num_outstanding_cmds) {
                /* Command not found. */
                return QLA_FUNCTION_FAILED;
@@ -3762,10 +3789,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
        mcp->mb[0] = MBC_PORT_PARAMS;
        mcp->mb[1] = loop_id;
        mcp->mb[2] = BIT_0;
-       if (IS_CNA_CAPABLE(vha->hw))
-               mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
-       else
-               mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+       mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
        mcp->mb[9] = vha->vp_idx;
        mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_3|MBX_1|MBX_0;
index 20d9dc3..7e78e7e 100644 (file)
@@ -506,7 +506,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
                return -EBUSY;
 
        /* Alloc SRB structure */
-       sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+       sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
        if (!sp)
                return -EBUSY;
 
@@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
 {
        int rval;
 
-       if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
+       if (ha->flags.fw_started) {
                rval = ha->isp_ops->abort_command(sp);
                if (!rval && !qla_nvme_wait_on_command(sp))
                        ql_log(ql_log_warn, NULL, 0x2112,
@@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
                    __func__, fcport);
 
                nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
-               init_completion(&fcport->nvme_del_done);
-               nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
-               wait_for_completion(&fcport->nvme_del_done);
        }
 
        if (vha->nvme_local_port) {
index de2bc78..121e18b 100644 (file)
@@ -3699,8 +3699,8 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
                /* Wait for pending cmds (physical and virtual) to complete */
-               if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
-                   WAIT_HOST) == QLA_SUCCESS) {
+               if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+                   WAIT_HOST)) {
                        ql_dbg(ql_dbg_init, vha, 0x00b3,
                            "Done wait for "
                            "pending commands.\n");
index 8fe2d73..8794e54 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/kobject.h>
 #include <linux/slab.h>
 #include <linux/blk-mq-pci.h>
+#include <linux/refcount.h>
+
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -204,7 +206,7 @@ int ql2xasynctmfenable = 1;
 module_param(ql2xasynctmfenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xasynctmfenable,
                "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
-               "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+               "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
 
 int ql2xdontresethba;
 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
@@ -391,12 +393,14 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
        struct qla_hw_data *ha = vha->hw;
        rsp->qpair = ha->base_qpair;
        rsp->req = req;
+       ha->base_qpair->hw = ha;
        ha->base_qpair->req = req;
        ha->base_qpair->rsp = rsp;
        ha->base_qpair->vha = vha;
        ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
        ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
        ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+       ha->base_qpair->srb_mempool = ha->srb_mempool;
        INIT_LIST_HEAD(&ha->base_qpair->hints_list);
        ha->base_qpair->enable_class_2 = ql2xenableclass2;
        /* init qpair to this cpu. Will adjust at run time. */
@@ -1012,7 +1016,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
        else
                goto qc24_target_busy;
 
-       sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+       sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
        if (!sp)
                goto qc24_host_busy;
 
@@ -1212,10 +1216,14 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
        return return_status;
 }
 
-static void
+static int
 sp_get(struct srb *sp)
 {
-       atomic_inc(&sp->ref_count);
+       if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
+               /* kref get fail */
+               return ENXIO;
+       else
+               return 0;
 }
 
 #define ISP_REG_DISCONNECT 0xffffffffU
@@ -1273,38 +1281,51 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        unsigned long flags;
        int rval, wait = 0;
        struct qla_hw_data *ha = vha->hw;
+       struct qla_qpair *qpair;
 
        if (qla2x00_isp_reg_stat(ha)) {
                ql_log(ql_log_info, vha, 0x8042,
                    "PCI/Register disconnect, exiting.\n");
                return FAILED;
        }
-       if (!CMD_SP(cmd))
-               return SUCCESS;
 
        ret = fc_block_scsi_eh(cmd);
        if (ret != 0)
                return ret;
        ret = SUCCESS;
 
-       id = cmd->device->id;
-       lun = cmd->device->lun;
-
-       spin_lock_irqsave(&ha->hardware_lock, flags);
        sp = (srb_t *) CMD_SP(cmd);
-       if (!sp) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       if (!sp)
+               return SUCCESS;
+
+       qpair = sp->qpair;
+       if (!qpair)
+               return SUCCESS;
+
+       spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+       if (!CMD_SP(cmd)) {
+               /* there's a chance an interrupt could clear
+                  the ptr as part of done & free */
+               spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+               return SUCCESS;
+       }
+
+       if (sp_get(sp)){
+               /* ref_count is already 0 */
+               spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
                return SUCCESS;
        }
+       spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+       id = cmd->device->id;
+       lun = cmd->device->lun;
 
        ql_dbg(ql_dbg_taskm, vha, 0x8002,
            "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
            vha->host_no, id, lun, sp, cmd, sp->handle);
 
        /* Get a reference to the sp and drop the lock.*/
-       sp_get(sp);
 
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
        rval = ha->isp_ops->abort_command(sp);
        if (rval) {
                if (rval == QLA_FUNCTION_PARAMETER_ERROR)
@@ -1320,14 +1341,29 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
                wait = 1;
        }
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       sp->done(sp, 0);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+       /*
+        * Clear the slot in the oustanding_cmds array if we can't find the
+        * command to reclaim the resources.
+        */
+       if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+               vha->req->outstanding_cmds[sp->handle] = NULL;
+
+       /*
+        * sp->done will do ref_count--
+        * sp_get() took an extra count above
+        */
+       sp->done(sp, DID_RESET << 16);
 
        /* Did the command return during mailbox execution? */
        if (ret == FAILED && !CMD_SP(cmd))
                ret = SUCCESS;
 
+       if (!CMD_SP(cmd))
+               wait = 0;
+
+       spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
        /* Wait for the command to be returned. */
        if (wait) {
                if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
@@ -1721,7 +1757,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
        struct req_que *req;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_cmd *cmd;
-       uint8_t trace = 0;
 
        if (!ha->req_q_map)
                return;
@@ -1731,64 +1766,68 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
                sp = req->outstanding_cmds[cnt];
                if (sp) {
                        req->outstanding_cmds[cnt] = NULL;
-                       if (sp->cmd_type == TYPE_SRB) {
+                       switch (sp->cmd_type) {
+                       case TYPE_SRB:
                                if (sp->type == SRB_NVME_CMD ||
                                    sp->type == SRB_NVME_LS) {
-                                       sp_get(sp);
-                                       spin_unlock_irqrestore(qp->qp_lock_ptr,
-                                           flags);
-                                       qla_nvme_abort(ha, sp, res);
-                                       spin_lock_irqsave(qp->qp_lock_ptr,
-                                           flags);
+                                       if (!sp_get(sp)) {
+                                               /* got sp */
+                                               spin_unlock_irqrestore
+                                                       (qp->qp_lock_ptr,
+                                                        flags);
+                                               qla_nvme_abort(ha, sp, res);
+                                               spin_lock_irqsave
+                                                       (qp->qp_lock_ptr, flags);
+                                       }
                                } else if (GET_CMD_SP(sp) &&
                                    !ha->flags.eeh_busy &&
                                    (!test_bit(ABORT_ISP_ACTIVE,
                                        &vha->dpc_flags)) &&
+                                   !qla2x00_isp_reg_stat(ha) &&
                                    (sp->type == SRB_SCSI_CMD)) {
                                        /*
-                                        * Don't abort commands in
-                                        * adapter during EEH
-                                        * recovery as it's not
+                                        * Don't abort commands in adapter
+                                        * during EEH recovery as it's not
                                         * accessible/responding.
                                         *
-                                        * Get a reference to the sp
-                                        * and drop the lock. The
-                                        * reference ensures this
-                                        * sp->done() call and not the
-                                        * call in qla2xxx_eh_abort()
-                                        * ends the SCSI command (with
-                                        * result 'res').
+                                        * Get a reference to the sp and drop
+                                        * the lock. The reference ensures this
+                                        * sp->done() call and not the call in
+                                        * qla2xxx_eh_abort() ends the SCSI cmd
+                                        * (with result 'res').
                                         */
-                                       sp_get(sp);
-                                       spin_unlock_irqrestore(qp->qp_lock_ptr,
-                                           flags);
-                                       status = qla2xxx_eh_abort(
-                                           GET_CMD_SP(sp));
-                                       spin_lock_irqsave(qp->qp_lock_ptr,
-                                           flags);
-                                       /*
-                                        * Get rid of extra reference
-                                        * if immediate exit from
-                                        * ql2xxx_eh_abort
-                                        */
-                                       if (status == FAILED &&
-                                           (qla2x00_isp_reg_stat(ha)))
-                                               atomic_dec(
-                                                   &sp->ref_count);
+                                       if (!sp_get(sp)) {
+                                               spin_unlock_irqrestore
+                                                       (qp->qp_lock_ptr, flags);
+                                               status = qla2xxx_eh_abort(
+                                                   GET_CMD_SP(sp));
+                                               spin_lock_irqsave
+                                                       (qp->qp_lock_ptr, flags);
+                                       }
                                }
                                sp->done(sp, res);
-                       } else {
+                               break;
+                       case TYPE_TGT_CMD:
                                if (!vha->hw->tgt.tgt_ops || !tgt ||
                                    qla_ini_mode_enabled(vha)) {
-                                       if (!trace)
-                                               ql_dbg(ql_dbg_tgt_mgt,
-                                                   vha, 0xf003,
-                                                   "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
-                                                   vha->dpc_flags);
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+                                           "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+                                           vha->dpc_flags);
                                        continue;
                                }
                                cmd = (struct qla_tgt_cmd *)sp;
                                qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+                               break;
+                       case TYPE_TGT_TMCMD:
+                               /*
+                                * Currently, only ABTS response gets on the
+                                * outstanding_cmds[]
+                                */
+                               ha->tgt.tgt_ops->free_mcmd(
+                                  (struct qla_tgt_mgmt_cmd *)sp);
+                               break;
+                       default:
+                               break;
                        }
                }
        }
@@ -2708,7 +2747,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
                struct scsi_qla_host, iocb_work);
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-       int i = 20;
+       int i = 2;
        unsigned long flags;
 
        if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -2819,6 +2858,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        atomic_set(&ha->num_pend_mbx_stage1, 0);
        atomic_set(&ha->num_pend_mbx_stage2, 0);
        atomic_set(&ha->num_pend_mbx_stage3, 0);
+       atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+       ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
 
        /* Assign ISP specific operations. */
        if (IS_QLA2100(ha)) {
@@ -4249,29 +4290,34 @@ static void
 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
 {
        u32 temp;
+       struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
        *ret_cnt = FW_DEF_EXCHANGES_CNT;
 
        if (max_cnt > vha->hw->max_exchg)
                max_cnt = vha->hw->max_exchg;
 
        if (qla_ini_mode_enabled(vha)) {
-               if (ql2xiniexchg > max_cnt)
-                       ql2xiniexchg = max_cnt;
+               if (vha->ql2xiniexchg > max_cnt)
+                       vha->ql2xiniexchg = max_cnt;
+
+               if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+                       *ret_cnt = vha->ql2xiniexchg;
 
-               if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
-                       *ret_cnt = ql2xiniexchg;
        } else if (qla_tgt_mode_enabled(vha)) {
-               if (ql2xexchoffld > max_cnt)
-                       ql2xexchoffld = max_cnt;
+               if (vha->ql2xexchoffld > max_cnt) {
+                       vha->ql2xexchoffld = max_cnt;
+                       icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
+               }
 
-               if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
-                       *ret_cnt = ql2xexchoffld;
+               if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+                       *ret_cnt = vha->ql2xexchoffld;
        } else if (qla_dual_mode_enabled(vha)) {
-               temp = ql2xiniexchg + ql2xexchoffld;
+               temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
                if (temp > max_cnt) {
-                       ql2xiniexchg -= (temp - max_cnt)/2;
-                       ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+                       vha->ql2xiniexchg -= (temp - max_cnt)/2;
+                       vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
                        temp = max_cnt;
+                       icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
                }
 
                if (temp > FW_DEF_EXCHANGES_CNT)
@@ -4309,6 +4355,12 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
 
        if (totsz != ha->exchoffld_size) {
                qla2x00_free_exchoffld_buffer(ha);
+               if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
+                       ha->exchoffld_size = 0;
+                       ha->flags.exchoffld_enabled = 0;
+                       return QLA_SUCCESS;
+               }
+
                ha->exchoffld_size = totsz;
 
                ql_log(ql_log_info, vha, 0xd016,
@@ -4341,6 +4393,15 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
 
                        return -ENOMEM;
                }
+       } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
+               /* pathological case */
+               qla2x00_free_exchoffld_buffer(ha);
+               ha->exchoffld_size = 0;
+               ha->flags.exchoffld_enabled = 0;
+               ql_log(ql_log_info, vha, 0xd016,
+                   "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
+                   ha->exchoffld_size, actual_cnt, size, totsz);
+               return 0;
        }
 
        /* Now configure the dma buffer */
@@ -4356,7 +4417,7 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
                if (qla_ini_mode_enabled(vha))
                        icb->exchange_count = 0;
                else
-                       icb->exchange_count = cpu_to_le16(ql2xexchoffld);
+                       icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
        }
 
        return rval;
@@ -4564,6 +4625,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        vha->host_no = host->host_no;
        vha->hw = ha;
 
+       vha->qlini_mode = ql2x_ini_mode;
+       vha->ql2xexchoffld = ql2xexchoffld;
+       vha->ql2xiniexchg = ql2xiniexchg;
+
        INIT_LIST_HEAD(&vha->vp_fcports);
        INIT_LIST_HEAD(&vha->work_list);
        INIT_LIST_HEAD(&vha->list);
@@ -4579,7 +4644,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
 
        spin_lock_init(&vha->work_lock);
        spin_lock_init(&vha->cmd_list_lock);
-       spin_lock_init(&vha->gnl.fcports_lock);
        init_waitqueue_head(&vha->fcport_waitQ);
        init_waitqueue_head(&vha->vref_waitq);
 
@@ -4710,7 +4774,6 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
 qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
-qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
 
@@ -4761,16 +4824,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
        return qla2x00_post_work(vha, e);
 }
 
-int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+void qla24xx_sched_upd_fcport(fc_port_t *fcport)
 {
-       struct qla_work_evt *e;
+       unsigned long flags;
 
-       e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
-       if (!e)
-               return QLA_FUNCTION_FAILED;
+       if (IS_SW_RESV_ADDR(fcport->d_id))
+               return;
 
-       e->u.fcport.fcport = fcport;
-       return qla2x00_post_work(vha, e);
+       spin_lock_irqsave(&fcport->vha->work_lock, flags);
+       if (fcport->disc_state == DSC_UPD_FCPORT) {
+               spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+               return;
+       }
+       fcport->jiffies_at_registration = jiffies;
+       fcport->sec_since_registration = 0;
+       fcport->next_disc_state = DSC_DELETED;
+       fcport->disc_state = DSC_UPD_FCPORT;
+       spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+
+       queue_work(system_unbound_wq, &fcport->reg_work);
 }
 
 static
@@ -4808,10 +4880,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
                        fcport->d_id = e->u.new_sess.id;
                        fcport->flags |= FCF_FABRIC_DEVICE;
                        fcport->fw_login_state = DSC_LS_PLOGI_PEND;
-                       if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
+                       if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
                                fcport->fc4_type = FC4_TYPE_FCP_SCSI;
 
-                       if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
+                       if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
                                fcport->fc4_type = FC4_TYPE_OTHER;
                                fcport->fc4f_nvme = FC4_TYPE_NVME;
                        }
@@ -4990,19 +5062,12 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                        qla2x00_async_adisc(vha, e->u.logio.fcport,
                            e->u.logio.data);
                        break;
-               case QLA_EVT_ASYNC_ADISC_DONE:
-                       qla2x00_async_adisc_done(vha, e->u.logio.fcport,
-                           e->u.logio.data);
-                       break;
                case QLA_EVT_UEVENT:
                        qla2x00_uevent_emit(vha, e->u.uevent.code);
                        break;
                case QLA_EVT_AENFX:
                        qlafx00_process_aen(vha, e);
                        break;
-               case QLA_EVT_GIDPN:
-                       qla24xx_async_gidpn(vha, e->u.fcport.fcport);
-                       break;
                case QLA_EVT_GPNID:
                        qla24xx_async_gpnid(vha, &e->u.gpnid.id);
                        break;
@@ -5025,9 +5090,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                case QLA_EVT_GPSC:
                        qla24xx_async_gpsc(vha, e->u.fcport.fcport);
                        break;
-               case QLA_EVT_UPD_FCPORT:
-                       qla2x00_update_fcport(vha, e->u.fcport.fcport);
-                       break;
                case QLA_EVT_GNL:
                        qla24xx_async_gnl(vha, e->u.fcport.fcport);
                        break;
@@ -6041,12 +6103,29 @@ qla2x00_do_dpc(void *data)
                if (test_and_clear_bit
                    (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
                    !test_bit(UNLOADING, &base_vha->dpc_flags)) {
+                       bool do_reset = true;
+
+                       switch (base_vha->qlini_mode) {
+                       case QLA2XXX_INI_MODE_ENABLED:
+                               break;
+                       case QLA2XXX_INI_MODE_DISABLED:
+                               if (!qla_tgt_mode_enabled(base_vha) &&
+                                   !ha->flags.fw_started)
+                                       do_reset = false;
+                               break;
+                       case QLA2XXX_INI_MODE_DUAL:
+                               if (!qla_dual_mode_enabled(base_vha) &&
+                                   !ha->flags.fw_started)
+                                       do_reset = false;
+                               break;
+                       default:
+                               break;
+                       }
 
-                       ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
-                           "ISP abort scheduled.\n");
-                       if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+                       if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
                            &base_vha->dpc_flags))) {
-
+                               ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+                                   "ISP abort scheduled.\n");
                                if (ha->isp_ops->abort_isp(base_vha)) {
                                        /* failed. retry later */
                                        set_bit(ISP_ABORT_NEEDED,
@@ -6054,10 +6133,9 @@ qla2x00_do_dpc(void *data)
                                }
                                clear_bit(ABORT_ISP_ACTIVE,
                                                &base_vha->dpc_flags);
+                               ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+                                   "ISP abort end.\n");
                        }
-
-                       ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
-                           "ISP abort end.\n");
                }
 
                if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
@@ -6183,17 +6261,28 @@ intr_on_check:
                        mutex_unlock(&ha->mq_lock);
                }
 
-               if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+               if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
+                   &base_vha->dpc_flags)) {
                        ql_log(ql_log_info, base_vha, 0xffffff,
                                "nvme: SET ZIO Activity exchange threshold to %d.\n",
                                                ha->nvme_last_rptd_aen);
-                       if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+                       if (qla27xx_set_zio_threshold(base_vha,
+                           ha->nvme_last_rptd_aen)) {
                                ql_log(ql_log_info, base_vha, 0xffffff,
-                                       "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
-                                               ha->nvme_last_rptd_aen);
+                                   "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+                                   ha->nvme_last_rptd_aen);
                        }
                }
 
+               if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+                   &base_vha->dpc_flags)) {
+                       ql_log(ql_log_info, base_vha, 0xffffff,
+                           "SET ZIO Activity exchange threshold to %d.\n",
+                           ha->last_zio_threshold);
+                       qla27xx_set_zio_threshold(base_vha,
+                           ha->last_zio_threshold);
+               }
+
                if (!IS_QLAFX00(ha))
                        qla2x00_do_dpc_all_vps(base_vha);
 
@@ -6406,13 +6495,24 @@ qla2x00_timer(struct timer_list *t)
         * FC-NVME
         * see if the active AEN count has changed from what was last reported.
         */
-       if (!vha->vp_idx &&
-               atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
-               ha->zio_mode == QLA_ZIO_MODE_6) {
+       if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
+           ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
                ql_log(ql_log_info, vha, 0x3002,
-                       "nvme: Sched: Set ZIO exchange threshold to %d.\n",
-                       ha->nvme_last_rptd_aen);
+                   "nvme: Sched: Set ZIO exchange threshold to %d.\n",
+                   ha->nvme_last_rptd_aen);
                ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+               set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+               start_dpc++;
+       }
+
+       if (!vha->vp_idx &&
+           (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
+           (ha->zio_mode == QLA_ZIO_MODE_6) &&
+           (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+               ql_log(ql_log_info, vha, 0x3002,
+                   "Sched: Set ZIO exchange threshold to %d.\n",
+                   ha->last_zio_threshold);
+               ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
                set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
                start_dpc++;
        }
@@ -6944,6 +7044,9 @@ qla2x00_module_init(void)
        if (ql2xextended_error_logging == 1)
                ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
 
+       if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
+               qla_insert_tgt_attrs();
+
        qla2xxx_transport_template =
            fc_attach_transport(&qla2xxx_transport_functions);
        if (!qla2xxx_transport_template) {
index 8c811b2..3982820 100644 (file)
@@ -141,6 +141,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *,
        struct abts_recv_from_24xx *);
 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
     uint16_t);
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
+static inline uint32_t qlt_make_handle(struct qla_qpair *);
 
 /*
  * Global Variables
@@ -541,7 +543,6 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
                qlt_response_pkt(host, rsp, pkt);
                break;
        }
-
        default:
                qlt_response_pkt(vha, rsp, pkt);
                break;
@@ -600,14 +601,9 @@ void qla2x00_async_nack_sp_done(void *s, int res)
                        sp->fcport->login_succ = 1;
 
                        vha->fcport_count++;
-
-                       ql_dbg(ql_dbg_disc, vha, 0x20f3,
-                           "%s %d %8phC post upd_fcport fcp_cnt %d\n",
-                           __func__, __LINE__,
-                           sp->fcport->port_name,
-                           vha->fcport_count);
-                       sp->fcport->disc_state = DSC_UPD_FCPORT;
-                       qla24xx_post_upd_fcport_work(vha, sp->fcport);
+                       spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+                       qla24xx_sched_upd_fcport(sp->fcport);
+                       spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
                } else {
                        sp->fcport->login_retry = 0;
                        sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -1230,11 +1226,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 {
        struct qla_tgt *tgt = sess->tgt;
        unsigned long flags;
+       u16 sec;
 
-       if (sess->disc_state == DSC_DELETE_PEND)
+       switch (sess->disc_state) {
+       case DSC_DELETE_PEND:
                return;
-
-       if (sess->disc_state == DSC_DELETED) {
+       case DSC_DELETED:
                if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
                        wake_up_all(&tgt->waitQ);
                if (sess->vha->fcport_count == 0)
@@ -1243,11 +1240,26 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
                if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
                        !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
                        return;
+               break;
+       case DSC_UPD_FCPORT:
+               /*
+                * This port is not done reporting to upper layer.
+                * let it finish
+                */
+               sess->next_disc_state = DSC_DELETE_PEND;
+               sec = jiffies_to_msecs(jiffies -
+                   sess->jiffies_at_registration)/1000;
+               if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
+                       sess->sec_since_registration = sec;
+                       ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+                           "%s %8phC : Slow Rport registration(%d Sec)\n",
+                           __func__, sess->port_name, sec);
+               }
+               return;
+       default:
+               break;
        }
 
-       if (sess->deleted == QLA_SESS_DELETED)
-               sess->logout_on_delete = 0;
-
        spin_lock_irqsave(&sess->vha->work_lock, flags);
        if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
                spin_unlock_irqrestore(&sess->vha->work_lock, flags);
@@ -1261,7 +1273,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
        qla24xx_chk_fcp_state(sess);
 
        ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
-           "Scheduling sess %p for deletion\n", sess);
+           "Scheduling sess %p for deletion %8phC\n",
+           sess, sess->port_name);
 
        INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
        WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
@@ -1479,27 +1492,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
        struct qla_hw_data *ha = tgt->ha;
        unsigned long flags;
 
+       mutex_lock(&ha->optrom_mutex);
        mutex_lock(&qla_tgt_mutex);
-       if (!vha->fc_vport) {
-               struct Scsi_Host *sh = vha->host;
-               struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
-               bool npiv_vports;
-
-               spin_lock_irqsave(sh->host_lock, flags);
-               npiv_vports = (fc_host->npiv_vports_inuse);
-               spin_unlock_irqrestore(sh->host_lock, flags);
-
-               if (npiv_vports) {
-                       mutex_unlock(&qla_tgt_mutex);
-                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
-                           "NPIV is in use. Can not stop target\n");
-                       return -EPERM;
-               }
-       }
+
        if (tgt->tgt_stop || tgt->tgt_stopped) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
                    "Already in tgt->tgt_stop or tgt_stopped state\n");
                mutex_unlock(&qla_tgt_mutex);
+               mutex_unlock(&ha->optrom_mutex);
                return -EPERM;
        }
 
@@ -1537,6 +1537,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
 
        /* Wait for sessions to clear out (just in case) */
        wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+       mutex_unlock(&ha->optrom_mutex);
+
        return 0;
 }
 EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1566,6 +1568,15 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
            tgt);
+
+       switch (vha->qlini_mode) {
+       case QLA2XXX_INI_MODE_EXCLUSIVE:
+               vha->flags.online = 1;
+               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+               break;
+       default:
+               break;
+       }
 }
 EXPORT_SYMBOL(qlt_stop_phase2);
 
@@ -1715,6 +1726,94 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
        qla2x00_start_iocbs(vha, qpair->req);
 }
 
+static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
+{
+       struct scsi_qla_host *vha = mcmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct abts_resp_to_24xx *resp;
+       uint32_t f_ctl, h;
+       uint8_t *p;
+       int rc;
+       struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
+       struct qla_qpair *qpair = mcmd->qpair;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe006,
+           "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
+           ha, mcmd->fc_tm_rsp);
+
+       rc = qlt_check_reserve_free_req(qpair, 1);
+       if (rc) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+                   "qla_target(%d): %s failed: unable to allocate request packet\n",
+                   vha->vp_idx, __func__);
+               return -EAGAIN;
+       }
+
+       resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
+       memset(resp, 0, sizeof(*resp));
+
+       h = qlt_make_handle(qpair);
+       if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+               /*
+                * CTIO type 7 from the firmware doesn't provide a way to
+                * know the initiator's LOOP ID, hence we can't find
+                * the session and, so, the command.
+                */
+               return -EAGAIN;
+       } else {
+               qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
+       }
+
+       resp->handle = MAKE_HANDLE(qpair->req->id, h);
+       resp->entry_type = ABTS_RESP_24XX;
+       resp->entry_count = 1;
+       resp->nport_handle = abts->nport_handle;
+       resp->vp_index = vha->vp_idx;
+       resp->sof_type = abts->sof_type;
+       resp->exchange_address = abts->exchange_address;
+       resp->fcp_hdr_le = abts->fcp_hdr_le;
+       f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+           F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+           F_CTL_SEQ_INITIATIVE);
+       p = (uint8_t *)&f_ctl;
+       resp->fcp_hdr_le.f_ctl[0] = *p++;
+       resp->fcp_hdr_le.f_ctl[1] = *p++;
+       resp->fcp_hdr_le.f_ctl[2] = *p;
+
+       resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+       resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+       resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+       resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+       resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+       resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+
+       resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+       if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
+               resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+               resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+               resp->payload.ba_acct.low_seq_cnt = 0x0000;
+               resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+               resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+               resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+       } else {
+               resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+               resp->payload.ba_rjt.reason_code =
+                       BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+               /* Other bytes are zero */
+       }
+
+       vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+       /* Memory Barrier */
+       wmb();
+       if (qpair->reqq_start_iocbs)
+               qpair->reqq_start_iocbs(qpair);
+       else
+               qla2x00_start_iocbs(vha, qpair->req);
+
+       return rc;
+}
+
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
@@ -1742,6 +1841,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
        }
 
        resp->entry_type = ABTS_RESP_24XX;
+       resp->handle = QLA_TGT_SKIP_HANDLE;
        resp->entry_count = 1;
        resp->nport_handle = abts->nport_handle;
        resp->vp_index = vha->vp_idx;
@@ -1799,15 +1899,13 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
-       struct abts_resp_from_24xx_fw *entry)
+    struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
 {
        struct ctio7_to_24xx *ctio;
+       u16 tmp;
+       struct abts_recv_from_24xx *entry;
 
-       ql_dbg(ql_dbg_tgt, vha, 0xe007,
-           "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
-
-       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
-           vha->hw->base_qpair, NULL);
+       ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
        if (ctio == NULL) {
                ql_dbg(ql_dbg_tgt, vha, 0xe04b,
                    "qla_target(%d): %s failed: unable to allocate "
@@ -1815,6 +1913,13 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
                return;
        }
 
+       if (mcmd)
+               /* abts from remote port */
+               entry = &mcmd->orig_iocb.abts;
+       else
+               /* abts from this driver.  */
+               entry = (struct abts_recv_from_24xx *)pkt;
+
        /*
         * We've got on entrance firmware's response on by us generated
         * ABTS response. So, in it ID fields are reversed.
@@ -1826,56 +1931,48 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
        ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
        ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
        ctio->vp_index = vha->vp_idx;
-       ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
-       ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
-       ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
        ctio->exchange_addr = entry->exchange_addr_to_abort;
-       ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
-                                           CTIO7_FLAGS_TERMINATE);
-       ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+       tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
 
-       /* Memory Barrier */
-       wmb();
-       qla2x00_start_iocbs(vha, vha->req);
+       if (mcmd) {
+               ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
+               ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
+               ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
 
-       qlt_24xx_send_abts_resp(vha->hw->base_qpair,
-           (struct abts_recv_from_24xx *)entry,
-           FCP_TMF_CMPL, true);
-}
-
-static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
-{
-       struct qla_tgt_sess_op *op;
-       struct qla_tgt_cmd *cmd;
-       unsigned long flags;
+               if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
+                       tmp |= (mcmd->abort_io_attr << 9);
+               else if (qpair->retry_term_cnt & 1)
+                       tmp |= (0x4 << 9);
+       } else {
+               ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+               ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+               ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
 
-       spin_lock_irqsave(&vha->cmd_list_lock, flags);
-       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
-               if (tag == op->atio.u.isp24.exchange_addr) {
-                       op->aborted = true;
-                       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-                       return 1;
-               }
+               if (qpair->retry_term_cnt & 1)
+                       tmp |= (0x4 << 9);
        }
+       ctio->u.status1.flags = cpu_to_le16(tmp);
+       ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
 
-       list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
-               if (tag == op->atio.u.isp24.exchange_addr) {
-                       op->aborted = true;
-                       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-                       return 1;
-               }
-       }
+       ql_dbg(ql_dbg_tgt, vha, 0xe007,
+           "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
+           le16_to_cpu(ctio->u.status1.flags),
+           le16_to_cpu(ctio->u.status1.ox_id),
+           (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
 
-       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
-               if (tag == cmd->atio.u.isp24.exchange_addr) {
-                       cmd->aborted = 1;
-                       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-                       return 1;
-               }
-       }
-       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+       /* Memory Barrier */
+       wmb();
+       if (qpair->reqq_start_iocbs)
+               qpair->reqq_start_iocbs(qpair);
+       else
+               qla2x00_start_iocbs(vha, qpair->req);
+
+       if (mcmd)
+               qlt_build_abts_resp_iocb(mcmd);
+       else
+               qlt_24xx_send_abts_resp(qpair,
+                   (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
 
-       return 0;
 }
 
 /* drop cmds for the given lun
@@ -1970,9 +2067,8 @@ static void qlt_do_tmr_work(struct work_struct *work)
                spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
                switch (mcmd->tmr_func) {
                case QLA_TGT_ABTS:
-                       qlt_24xx_send_abts_resp(mcmd->qpair,
-                           &mcmd->orig_iocb.abts,
-                           FCP_TMF_REJECTED, false);
+                       mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
+                       qlt_build_abts_resp_iocb(mcmd);
                        break;
                case QLA_TGT_LUN_RESET:
                case QLA_TGT_CLEAR_TS:
@@ -2007,12 +2103,6 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        struct qla_tgt_mgmt_cmd *mcmd;
        struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
 
-       if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
-               /* send TASK_ABORT response immediately */
-               qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
-               return 0;
-       }
-
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
            "qla_target(%d): task abort (tag=%d)\n",
            vha->vp_idx, abts->exchange_addr_to_abort);
@@ -2025,7 +2115,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
                return -ENOMEM;
        }
        memset(mcmd, 0, sizeof(*mcmd));
-
+       mcmd->cmd_type = TYPE_TGT_TMCMD;
        mcmd->sess = sess;
        memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
        mcmd->reset_count = ha->base_qpair->chip_reset;
@@ -2047,6 +2137,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
                if (abort_cmd && abort_cmd->qpair) {
                        mcmd->qpair = abort_cmd->qpair;
                        mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+                       mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+                       mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
                }
        }
 
@@ -2264,6 +2356,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
        struct qla_qpair *qpair = mcmd->qpair;
+       bool free_mcmd = true;
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
            "TM response mcmd (%p) status %#x state %#x",
@@ -2302,10 +2395,10 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
                            &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
                }
        } else {
-               if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
-                       qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
-                           mcmd->fc_tm_rsp, false);
-               else
+               if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
+                       qlt_build_abts_resp_iocb(mcmd);
+                       free_mcmd = false;
+               else
                        qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
                            mcmd->fc_tm_rsp);
        }
@@ -2317,7 +2410,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
         * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
         * qlt_xmit_tm_rsp() returns here..
         */
-       ha->tgt.tgt_ops->free_mcmd(mcmd);
+       if (free_mcmd)
+               ha->tgt.tgt_ops->free_mcmd(mcmd);
+
        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 }
 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2330,7 +2425,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
        BUG_ON(cmd->sg_cnt == 0);
 
        prm->sg = (struct scatterlist *)cmd->sg;
-       prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+       prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
            cmd->sg_cnt, cmd->dma_data_direction);
        if (unlikely(prm->seg_cnt == 0))
                goto out_err;
@@ -2357,7 +2452,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
 
                if (cmd->prot_sg_cnt) {
                        prm->prot_sg      = cmd->prot_sg;
-                       prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+                       prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
                                cmd->prot_sg, cmd->prot_sg_cnt,
                                cmd->dma_data_direction);
                        if (unlikely(prm->prot_seg_cnt == 0))
@@ -2392,12 +2487,12 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
 
        qpair = cmd->qpair;
 
-       pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+       dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
            cmd->dma_data_direction);
        cmd->sg_mapped = 0;
 
        if (cmd->prot_sg_cnt)
-               pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+               dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
                        cmd->dma_data_direction);
 
        if (!cmd->ctx)
@@ -3289,7 +3384,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
 
 
        cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+       spin_lock(&cmd->cmd_lock);
        cmd->cmd_sent_to_fw = 1;
+       spin_unlock(&cmd->cmd_lock);
+       cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
 
        /* Memory Barrier */
        wmb();
@@ -3367,7 +3465,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
                qlt_load_data_segments(&prm);
 
        cmd->state = QLA_TGT_STATE_NEED_DATA;
+       spin_lock(&cmd->cmd_lock);
        cmd->cmd_sent_to_fw = 1;
+       spin_unlock(&cmd->cmd_lock);
+       cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
 
        /* Memory Barrier */
        wmb();
@@ -3825,10 +3926,10 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
 
 
 /* ha->hardware_lock supposed to be held on entry */
-static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
        struct rsp_que *rsp, uint32_t handle, void *ctio)
 {
-       struct qla_tgt_cmd *cmd = NULL;
+       void *cmd = NULL;
        struct req_que *req;
        int qid = GET_QID(handle);
        uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
@@ -3857,7 +3958,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
                        return NULL;
                }
 
-               cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
+               cmd = (void *) req->outstanding_cmds[h];
                if (unlikely(cmd == NULL)) {
                        ql_dbg(ql_dbg_async, vha, 0xe053,
                            "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3930,7 +4031,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
                return;
        }
 
-       cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+       cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
        if (cmd == NULL)
                return;
 
@@ -3941,12 +4042,20 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
 
        if (unlikely(status != CTIO_SUCCESS)) {
                switch (status & 0xFFFF) {
+               case CTIO_INVALID_RX_ID:
+                       if (printk_ratelimit())
+                               dev_info(&vha->hw->pdev->dev,
+                                   "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
+                                   vha->vp_idx, cmd->atio.u.isp24.attr,
+                                   ((cmd->ctio_flags >> 9) & 0xf),
+                                   cmd->ctio_flags);
+
+                       break;
                case CTIO_LIP_RESET:
                case CTIO_TARGET_RESET:
                case CTIO_ABORTED:
                        /* driver request abort via Terminate exchange */
                case CTIO_TIMEOUT:
-               case CTIO_INVALID_RX_ID:
                        /* They are OK */
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
                            "qla_target(%d): CTIO with "
@@ -3973,7 +4082,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
                                 * Session is already logged out, but we need
                                 * to notify initiator, who's not aware of this
                                 */
-                               cmd->sess->logout_on_delete = 0;
                                cmd->sess->send_els_logo = 1;
                                ql_dbg(ql_dbg_disc, vha, 0x20f8,
                                    "%s %d %8phC post del sess\n",
@@ -4711,6 +4819,12 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
                sess = qlt_find_sess_invalidate_other(vha, wwn,
                    port_id, loop_id, &conflict_sess);
                spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+       } else {
+               ql_dbg(ql_dbg_disc, vha, 0xffff,
+                   "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
+                   __func__, __LINE__, loop_id, port_id.b24);
+               qlt_send_term_imm_notif(vha, iocb, 1);
+               goto out;
        }
 
        if (IS_SW_RESV_ADDR(port_id)) {
@@ -4752,6 +4866,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
                goto out;
        }
 
+       if (sess->disc_state == DSC_UPD_FCPORT) {
+               u16 sec;
+
+               /*
+                * Remote port registration is still going on from
+                * previous login. Allow it to finish before we
+                * accept the new login.
+                */
+               sess->next_disc_state = DSC_DELETE_PEND;
+               sec = jiffies_to_msecs(jiffies -
+                   sess->jiffies_at_registration) / 1000;
+               if (sess->sec_since_registration < sec && sec &&
+                   !(sec % 5)) {
+                       sess->sec_since_registration = sec;
+                       ql_dbg(ql_dbg_disc, vha, 0xffff,
+                           "%s %8phC - Slow Rport registration (%d Sec)\n",
+                           __func__, sess->port_name, sec);
+               }
+
+               if (!conflict_sess)
+                       kmem_cache_free(qla_tgt_plogi_cachep, pla);
+
+               qlt_send_term_imm_notif(vha, iocb, 1);
+               goto out;
+       }
+
        qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
        sess->d_id = port_id;
        sess->login_gen++;
@@ -4910,6 +5050,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
 
                if (sess != NULL) {
                        bool delete = false;
+                       int sec;
                        spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
                        switch (sess->fw_login_state) {
                        case DSC_LS_PLOGI_PEND:
@@ -4922,9 +5063,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                        }
 
                        switch (sess->disc_state) {
+                       case DSC_UPD_FCPORT:
+                               spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+                                   flags);
+
+                               sec = jiffies_to_msecs(jiffies -
+                                   sess->jiffies_at_registration)/1000;
+                               if (sess->sec_since_registration < sec && sec &&
+                                   !(sec % 5)) {
+                                       sess->sec_since_registration = sec;
+                                       ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+                                           "%s %8phC : Slow Rport registration(%d Sec)\n",
+                                           __func__, sess->port_name, sec);
+                               }
+                               qlt_send_term_imm_notif(vha, iocb, 1);
+                               return 0;
+
                        case DSC_LOGIN_PEND:
                        case DSC_GPDB:
-                       case DSC_UPD_FCPORT:
                        case DSC_LOGIN_COMPLETE:
                        case DSC_ADISC:
                                delete = false;
@@ -5608,6 +5764,101 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
        tgt->atio_irq_cmd_count--;
 }
 
+/*
+ * qpair lock is assume to be held
+ * rc = 0 : send terminate & abts respond
+ * rc != 0: do not send term & abts respond
+ */
+static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+    struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
+{
+       struct qla_hw_data *ha = vha->hw;
+       int rc = 0;
+
+       /*
+        * Detect unresolved exchange. If the same ABTS is unable
+        * to terminate an existing command and the same ABTS loops
+        * between FW & Driver, then force FW dump. Under 1 jiff,
+        * we should see multiple loops.
+        */
+       if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
+           qpair->retry_term_jiff == jiffies) {
+               /* found existing exchange */
+               qpair->retry_term_cnt++;
+               if (qpair->retry_term_cnt >= 5) {
+                       rc = EIO;
+                       qpair->retry_term_cnt = 0;
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "Unable to send ABTS Respond. Dumping firmware.\n");
+                       ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
+                           vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
+
+                       if (qpair == ha->base_qpair)
+                               ha->isp_ops->fw_dump(vha, 1);
+                       else
+                               ha->isp_ops->fw_dump(vha, 0);
+
+                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
+       } else if (qpair->retry_term_jiff != jiffies) {
+               qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
+               qpair->retry_term_cnt = 0;
+               qpair->retry_term_jiff = jiffies;
+       }
+
+       return rc;
+}
+
+
+static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
+       struct rsp_que *rsp, response_t *pkt)
+{
+       struct abts_resp_from_24xx_fw *entry =
+               (struct abts_resp_from_24xx_fw *)pkt;
+       u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
+       struct qla_tgt_mgmt_cmd *mcmd;
+       struct qla_hw_data *ha = vha->hw;
+
+       mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
+           pkt->handle, pkt);
+       if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
+               ql_dbg(ql_dbg_async, vha, 0xe064,
+                   "qla_target(%d): ABTS Comp without mcmd\n",
+                   vha->vp_idx);
+               return;
+       }
+
+       if (mcmd)
+               vha  = mcmd->vha;
+       vha->vha_tgt.qla_tgt->abts_resp_expected--;
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe038,
+           "ABTS_RESP_24XX: compl_status %x\n",
+           entry->compl_status);
+
+       if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
+               if ((entry->error_subcode1 == 0x1E) &&
+                   (entry->error_subcode2 == 0)) {
+                       if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
+                               ha->tgt.tgt_ops->free_mcmd(mcmd);
+                               return;
+                       }
+                       qlt_24xx_retry_term_exchange(vha, rsp->qpair,
+                           pkt, mcmd);
+               } else {
+                       ql_dbg(ql_dbg_tgt, vha, 0xe063,
+                           "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
+                           vha->vp_idx, entry->compl_status,
+                           entry->error_subcode1,
+                           entry->error_subcode2);
+                       ha->tgt.tgt_ops->free_mcmd(mcmd);
+               }
+       } else {
+               ha->tgt.tgt_ops->free_mcmd(mcmd);
+       }
+}
+
 /* ha->hardware_lock supposed to be held on entry */
 /* called via callback from qla2xxx */
 static void qlt_response_pkt(struct scsi_qla_host *vha,
@@ -5740,41 +5991,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
 
        case ABTS_RESP_24XX:
                if (tgt->abts_resp_expected > 0) {
-                       struct abts_resp_from_24xx_fw *entry =
-                               (struct abts_resp_from_24xx_fw *)pkt;
-                       ql_dbg(ql_dbg_tgt, vha, 0xe038,
-                           "ABTS_RESP_24XX: compl_status %x\n",
-                           entry->compl_status);
-                       tgt->abts_resp_expected--;
-                       if (le16_to_cpu(entry->compl_status) !=
-                           ABTS_RESP_COMPL_SUCCESS) {
-                               if ((entry->error_subcode1 == 0x1E) &&
-                                   (entry->error_subcode2 == 0)) {
-                                       /*
-                                        * We've got a race here: aborted
-                                        * exchange not terminated, i.e.
-                                        * response for the aborted command was
-                                        * sent between the abort request was
-                                        * received and processed.
-                                        * Unfortunately, the firmware has a
-                                        * silly requirement that all aborted
-                                        * exchanges must be explicitely
-                                        * terminated, otherwise it refuses to
-                                        * send responses for the abort
-                                        * requests. So, we have to
-                                        * (re)terminate the exchange and retry
-                                        * the abort response.
-                                        */
-                                       qlt_24xx_retry_term_exchange(vha,
-                                           entry);
-                               } else
-                                       ql_dbg(ql_dbg_tgt, vha, 0xe063,
-                                           "qla_target(%d): ABTS_RESP_24XX "
-                                           "failed %x (subcode %x:%x)",
-                                           vha->vp_idx, entry->compl_status,
-                                           entry->error_subcode1,
-                                           entry->error_subcode2);
-                       }
+                       qlt_handle_abts_completion(vha, rsp, pkt);
                } else {
                        ql_dbg(ql_dbg_tgt, vha, 0xe064,
                            "qla_target(%d): Unexpected ABTS_RESP_24XX "
@@ -5964,10 +6181,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
        case MODE_DUAL:
                if (newfcport) {
                        if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
-                               ql_dbg(ql_dbg_disc, vha, 0x20fe,
-                                  "%s %d %8phC post upd_fcport fcp_cnt %d\n",
-                                  __func__, __LINE__, fcport->port_name, vha->fcport_count);
-                               qla24xx_post_upd_fcport_work(vha, fcport);
+                               qla24xx_sched_upd_fcport(fcport);
                        } else {
                                ql_dbg(ql_dbg_disc, vha, 0x20ff,
                                   "%s %d %8phC post gpsc fcp_cnt %d\n",
@@ -6413,6 +6627,9 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
                if (!(host->hostt->supported_mode & MODE_TARGET))
                        continue;
 
+               if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+                       continue;
+
                spin_lock_irqsave(&ha->hardware_lock, flags);
                if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
                        pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
@@ -6475,15 +6692,15 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
 EXPORT_SYMBOL(qlt_lport_deregister);
 
 /* Must be called under HW lock */
-static void qlt_set_mode(struct scsi_qla_host *vha)
+void qlt_set_mode(struct scsi_qla_host *vha)
 {
-       switch (ql2x_ini_mode) {
+       switch (vha->qlini_mode) {
        case QLA2XXX_INI_MODE_DISABLED:
        case QLA2XXX_INI_MODE_EXCLUSIVE:
                vha->host->active_mode = MODE_TARGET;
                break;
        case QLA2XXX_INI_MODE_ENABLED:
-               vha->host->active_mode = MODE_UNKNOWN;
+               vha->host->active_mode = MODE_INITIATOR;
                break;
        case QLA2XXX_INI_MODE_DUAL:
                vha->host->active_mode = MODE_DUAL;
@@ -6496,7 +6713,7 @@ static void qlt_set_mode(struct scsi_qla_host *vha)
 /* Must be called under HW lock */
 static void qlt_clear_mode(struct scsi_qla_host *vha)
 {
-       switch (ql2x_ini_mode) {
+       switch (vha->qlini_mode) {
        case QLA2XXX_INI_MODE_DISABLED:
                vha->host->active_mode = MODE_UNKNOWN;
                break;
@@ -6532,12 +6749,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
                dump_stack();
                return;
        }
+       if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+               return;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
        tgt->tgt_stopped = 0;
        qlt_set_mode(vha);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+       mutex_lock(&ha->optrom_mutex);
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+           "%s.\n", __func__);
        if (vha->vp_idx) {
                qla24xx_disable_vp(vha);
                qla24xx_enable_vp(vha);
@@ -6546,6 +6768,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
                qla2xxx_wake_dpc(base_vha);
                qla2x00_wait_for_hba_online(base_vha);
        }
+       mutex_unlock(&ha->optrom_mutex);
 }
 EXPORT_SYMBOL(qlt_enable_vha);
 
@@ -6767,7 +6990,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
                if (qla_tgt_mode_enabled(vha))
                        nv->exchange_count = cpu_to_le16(0xFFFF);
                else                    /* dual */
-                       nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+                       nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
 
                /* Enable target mode */
                nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6846,14 +7069,6 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
                memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
                icb->firmware_options_1 |= cpu_to_le32(BIT_14);
        }
-
-       /* disable ZIO at start time. */
-       if (!vha->flags.init_done) {
-               uint32_t tmp;
-               tmp = le32_to_cpu(icb->firmware_options_2);
-               tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
-               icb->firmware_options_2 = cpu_to_le32(tmp);
-       }
 }
 
 void
@@ -6881,7 +7096,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                if (qla_tgt_mode_enabled(vha))
                        nv->exchange_count = cpu_to_le16(0xFFFF);
                else                    /* dual */
-                       nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+                       nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
 
                /* Enable target mode */
                nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6957,15 +7172,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
                memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
                icb->firmware_options_1 |= cpu_to_le32(BIT_14);
        }
-
-       /* disable ZIO at start time. */
-       if (!vha->flags.init_done) {
-               uint32_t tmp;
-               tmp = le32_to_cpu(icb->firmware_options_2);
-               tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
-               icb->firmware_options_2 = cpu_to_le32(tmp);
-       }
-
 }
 
 void
index 199d3ba..721da59 100644 (file)
@@ -900,6 +900,7 @@ struct qla_tgt_cmd {
        unsigned int aborted:1;
        unsigned int data_work:1;
        unsigned int data_work_free:1;
+       unsigned int released:1;
 
        struct scatterlist *sg; /* cmd data buffer SG vector */
        int sg_cnt;             /* SG segments count */
@@ -908,6 +909,7 @@ struct qla_tgt_cmd {
        u64 unpacked_lun;
        enum dma_data_direction dma_data_direction;
 
+       uint16_t ctio_flags;
        uint16_t vp_idx;
        uint16_t loop_id;       /* to save extra sess dereferences */
        struct qla_tgt *tgt;    /* to save extra sess dereferences */
@@ -956,16 +958,20 @@ struct qla_tgt_sess_work_param {
 };
 
 struct qla_tgt_mgmt_cmd {
+       uint8_t cmd_type;
+       uint8_t pad[3];
        uint16_t tmr_func;
        uint8_t fc_tm_rsp;
+       uint8_t abort_io_attr;
        struct fc_port *sess;
        struct qla_qpair *qpair;
        struct scsi_qla_host *vha;
        struct se_cmd se_cmd;
        struct work_struct free_work;
        unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK BIT_0
+#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
        uint32_t reset_count;
-#define QLA24XX_MGMT_SEND_NACK 1
        struct work_struct work;
        uint64_t unpacked_lun;
        union {
index 3850b28..12bafff 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "10.00.00.08-k"
+#define QLA2XXX_VERSION      "10.00.00.11-k"
 
 #define QLA_DRIVER_MAJOR_VER   10
 #define QLA_DRIVER_MINOR_VER   0
index e03d12a..65053c0 100644 (file)
@@ -277,14 +277,25 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
 static void tcm_qla2xxx_complete_free(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       bool released = false;
+       unsigned long flags;
 
        cmd->cmd_in_wq = 0;
 
        WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
 
+       spin_lock_irqsave(&cmd->cmd_lock, flags);
        cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
        cmd->trc_flags |= TRC_CMD_FREE;
-       transport_generic_free_cmd(&cmd->se_cmd, 0);
+       cmd->cmd_sent_to_fw = 0;
+       if (cmd->released)
+               released = true;
+       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+       if (released)
+               qlt_free_cmd(cmd);
+       else
+               transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 
 /*
@@ -325,6 +336,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd;
+       unsigned long flags;
 
        if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
                struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -332,9 +344,16 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
                qlt_free_mcmd(mcmd);
                return;
        }
-
        cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
-       qlt_free_cmd(cmd);
+
+       spin_lock_irqsave(&cmd->cmd_lock, flags);
+       if (cmd->cmd_sent_to_fw) {
+               cmd->released = 1;
+               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+               qlt_free_cmd(cmd);
+       }
 }
 
 static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -405,7 +424,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
        se_cmd->pi_err = 0;
 
        /*
-        * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+        * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
         * the SGL mappings into PCIe memory for incoming FCP WRITE data.
         */
        return qlt_rdy_to_xfer(cmd);
@@ -499,6 +518,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       unsigned long flags;
 
        /*
         * Ensure that the complete FCP WRITE payload has been received.
@@ -506,6 +526,25 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
         */
        cmd->cmd_in_wq = 0;
 
+       spin_lock_irqsave(&cmd->cmd_lock, flags);
+       cmd->cmd_sent_to_fw = 0;
+
+       if (cmd->released) {
+               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+               qlt_free_cmd(cmd);
+               return;
+       }
+
+       cmd->data_work = 1;
+       if (cmd->aborted) {
+               cmd->data_work_free = 1;
+               spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+               tcm_qla2xxx_free_cmd(cmd);
+               return;
+       }
+       spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
        cmd->qpair->tgt_counters.qla_core_ret_ctio++;
        if (!cmd->write_data_transferred) {
                /*
@@ -718,10 +757,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
        cmd->sg_cnt = 0;
        cmd->offset = 0;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
-       if (cmd->trc_flags & TRC_XMIT_STATUS) {
-               pr_crit("Multiple calls for status = %p.\n", cmd);
-               dump_stack();
-       }
        cmd->trc_flags |= TRC_XMIT_STATUS;
 
        if (se_cmd->data_direction == DMA_FROM_DEVICE) {
index 52b1a0b..1ef74aa 100644 (file)
@@ -766,12 +766,10 @@ int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
        while (drvr_wait) {
                if (ql4xxx_lock_drvr(a) == 0) {
                        ssleep(QL4_LOCK_DRVR_SLEEP);
-                       if (drvr_wait) {
-                               DEBUG2(printk("scsi%ld: %s: Waiting for "
-                                             "Global Init Semaphore(%d)...\n",
-                                             a->host_no,
-                                             __func__, drvr_wait));
-                       }
+                       DEBUG2(printk("scsi%ld: %s: Waiting for "
+                                     "Global Init Semaphore(%d)...\n",
+                                     a->host_no,
+                                     __func__, drvr_wait));
                        drvr_wait -= QL4_LOCK_DRVR_SLEEP;
                } else {
                        DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
index ab3a924..051164f 100644 (file)
@@ -3382,7 +3382,7 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
        if (task->data_count) {
                task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
                                                     task->data_count,
-                                                    PCI_DMA_TODEVICE);
+                                                    DMA_TO_DEVICE);
        }
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -3437,7 +3437,7 @@ static void qla4xxx_task_cleanup(struct iscsi_task *task)
 
        if (task->data_count) {
                dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
-                                task->data_count, PCI_DMA_TODEVICE);
+                                task->data_count, DMA_TO_DEVICE);
        }
 
        DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -9020,25 +9020,16 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
 /**
  * qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
  * @ha: HA context
- *
- * At exit, the @ha's flags.enable_64bit_addressing set to indicated
- * supported addressing method.
  */
 static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
 {
-       int retval;
-
        /* Update our PCI device dma_mask for full 64 bit mask */
-       if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
-               if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
-                       dev_dbg(&ha->pdev->dev,
-                                 "Failed to set 64 bit PCI consistent mask; "
-                                  "using 32 bit.\n");
-                       retval = pci_set_consistent_dma_mask(ha->pdev,
-                                                            DMA_BIT_MASK(32));
-               }
-       } else
-               retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+       if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+               dev_dbg(&ha->pdev->dev,
+                         "Failed to set 64 bit PCI consistent mask; "
+                          "using 32 bit.\n");
+               dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
+       }
 }
 
 static int qla4xxx_slave_alloc(struct scsi_device *sdev)
index ea88906..5c3d6e1 100644 (file)
@@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
         * emulated RAID devices, so start with SCSI */
        struct raid_internal *i = ac_to_raid_internal(cont);
 
-#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
-       if (scsi_is_sdev_device(dev)) {
+       if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
                struct scsi_device *sdev = to_scsi_device(dev);
 
                if (i->f->cookie != sdev->host->hostt)
@@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
 
                return i->f->is_raid(dev);
        }
-#endif
        /* FIXME: look at other subsystems too */
        return 0;
 }
index b7a8fdf..c736d61 100644 (file)
@@ -338,9 +338,6 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
 
        online = scsi_device_online(sdev);
 
-       SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
-               "%s: rtn: %d\n", __func__, online));
-
        return online;
 }
 EXPORT_SYMBOL(scsi_block_when_processing_errors);
index 6234841..c7fccbb 100644 (file)
@@ -1201,8 +1201,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
 
                count = blk_rq_map_integrity_sg(rq->q, rq->bio,
                                                prot_sdb->table.sgl);
-               BUG_ON(unlikely(count > ivecs));
-               BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
+               BUG_ON(count > ivecs);
+               BUG_ON(count > queue_max_integrity_segments(rq->q));
 
                cmd->prot_sdb = prot_sdb;
                cmd->prot_sdb->table.nents = count;
@@ -2753,6 +2753,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
                switch (oldstate) {
                case SDEV_RUNNING:
                case SDEV_CREATED_BLOCK:
+               case SDEV_OFFLINE:
                        break;
                default:
                        goto illegal;
index 0cd16e8..0a165b2 100644 (file)
@@ -612,7 +612,6 @@ sas_phy_protocol_attr(identify.target_port_protocols,
 sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
                unsigned long long);
 sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
 sas_phy_linkspeed_attr(negotiated_linkrate);
 sas_phy_linkspeed_attr(minimum_linkrate_hw);
 sas_phy_linkspeed_rw_attr(minimum_linkrate);
@@ -1802,7 +1801,6 @@ sas_attach_transport(struct sas_function_template *ft)
        SETUP_PHY_ATTRIBUTE(device_type);
        SETUP_PHY_ATTRIBUTE(sas_address);
        SETUP_PHY_ATTRIBUTE(phy_identifier);
-       //SETUP_PHY_ATTRIBUTE(port_identifier);
        SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
        SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
        SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
index 8a254bb..c6ad007 100644 (file)
@@ -822,7 +822,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
        if (atomic_read(&sdp->detaching)) {
                if (srp->bio) {
                        scsi_req_free_cmd(scsi_req(srp->rq));
-                       blk_end_request_all(srp->rq, BLK_STS_IOERR);
+                       blk_put_request(srp->rq);
                        srp->rq = NULL;
                }
 
index 2112ea6..a25a07a 100644 (file)
@@ -349,16 +349,16 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
 
 static int pqi_map_single(struct pci_dev *pci_dev,
        struct pqi_sg_descriptor *sg_descriptor, void *buffer,
-       size_t buffer_length, int data_direction)
+       size_t buffer_length, enum dma_data_direction data_direction)
 {
        dma_addr_t bus_address;
 
-       if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+       if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
                return 0;
 
-       bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+       bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
                data_direction);
-       if (pci_dma_mapping_error(pci_dev, bus_address))
+       if (dma_mapping_error(&pci_dev->dev, bus_address))
                return -ENOMEM;
 
        put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +370,15 @@ static int pqi_map_single(struct pci_dev *pci_dev,
 
 static void pqi_pci_unmap(struct pci_dev *pci_dev,
        struct pqi_sg_descriptor *descriptors, int num_descriptors,
-       int data_direction)
+       enum dma_data_direction data_direction)
 {
        int i;
 
-       if (data_direction == PCI_DMA_NONE)
+       if (data_direction == DMA_NONE)
                return;
 
        for (i = 0; i < num_descriptors; i++)
-               pci_unmap_single(pci_dev,
+               dma_unmap_single(&pci_dev->dev,
                        (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
                        get_unaligned_le32(&descriptors[i].length),
                        data_direction);
@@ -387,10 +387,9 @@ static void pqi_pci_unmap(struct pci_dev *pci_dev,
 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
        struct pqi_raid_path_request *request, u8 cmd,
        u8 *scsi3addr, void *buffer, size_t buffer_length,
-       u16 vpd_page, int *pci_direction)
+       u16 vpd_page, enum dma_data_direction *dir)
 {
        u8 *cdb;
-       int pci_dir;
 
        memset(request, 0, sizeof(*request));
 
@@ -458,23 +457,21 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
 
        switch (request->data_direction) {
        case SOP_READ_FLAG:
-               pci_dir = PCI_DMA_FROMDEVICE;
+               *dir = DMA_FROM_DEVICE;
                break;
        case SOP_WRITE_FLAG:
-               pci_dir = PCI_DMA_TODEVICE;
+               *dir = DMA_TO_DEVICE;
                break;
        case SOP_NO_DIRECTION_FLAG:
-               pci_dir = PCI_DMA_NONE;
+               *dir = DMA_NONE;
                break;
        default:
-               pci_dir = PCI_DMA_BIDIRECTIONAL;
+               *dir = DMA_BIDIRECTIONAL;
                break;
        }
 
-       *pci_direction = pci_dir;
-
        return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
-               buffer, buffer_length, pci_dir);
+               buffer, buffer_length, *dir);
 }
 
 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -516,21 +513,19 @@ static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
        struct bmic_identify_controller *buffer)
 {
        int rc;
-       int pci_direction;
+       enum dma_data_direction dir;
        struct pqi_raid_path_request request;
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
                BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
-               sizeof(*buffer), 0, &pci_direction);
+               sizeof(*buffer), 0, &dir);
        if (rc)
                return rc;
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
                NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
-
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
        return rc;
 }
 
@@ -538,21 +533,19 @@ static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
        u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
 {
        int rc;
-       int pci_direction;
+       enum dma_data_direction dir;
        struct pqi_raid_path_request request;
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
                INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
-               &pci_direction);
+               &dir);
        if (rc)
                return rc;
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
                NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
-
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
        return rc;
 }
 
@@ -562,13 +555,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
        size_t buffer_length)
 {
        int rc;
-       int pci_direction;
+       enum dma_data_direction dir;
        u16 bmic_device_index;
        struct pqi_raid_path_request request;
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
                BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
-               buffer_length, 0, &pci_direction);
+               buffer_length, 0, &dir);
        if (rc)
                return rc;
 
@@ -579,9 +572,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
                0, NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
-
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
        return rc;
 }
 
@@ -590,8 +581,8 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
 {
        int rc;
        struct pqi_raid_path_request request;
-       int pci_direction;
        struct bmic_flush_cache *flush_cache;
+       enum dma_data_direction dir;
 
        /*
         * Don't bother trying to flush the cache if the controller is
@@ -608,16 +599,14 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
                SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
-               sizeof(*flush_cache), 0, &pci_direction);
+               sizeof(*flush_cache), 0, &dir);
        if (rc)
                goto out;
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
                0, NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
-
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 out:
        kfree(flush_cache);
 
@@ -629,20 +618,18 @@ static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
 {
        int rc;
        struct pqi_raid_path_request request;
-       int pci_direction;
+       enum dma_data_direction dir;
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
                BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
-               buffer_length, 0, &pci_direction);
+               buffer_length, 0, &dir);
        if (rc)
                return rc;
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
                0, NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
-
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
        return rc;
 }
 
@@ -793,20 +780,18 @@ static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
        void *buffer, size_t buffer_length)
 {
        int rc;
-       int pci_direction;
+       enum dma_data_direction dir;
        struct pqi_raid_path_request request;
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
-               cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+               cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
        if (rc)
                return rc;
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
                NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
-
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
        return rc;
 }
 
@@ -1089,7 +1074,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
        struct pqi_scsi_dev *device)
 {
        int rc;
-       int pci_direction;
+       enum dma_data_direction dir;
        struct pqi_raid_path_request request;
        struct raid_map *raid_map;
 
@@ -1099,15 +1084,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
 
        rc = pqi_build_raid_path_request(ctrl_info, &request,
                CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
-               sizeof(*raid_map), 0, &pci_direction);
+               sizeof(*raid_map), 0, &dir);
        if (rc)
                goto error;
 
        rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
                NULL, NO_TIMEOUT);
 
-       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-               pci_direction);
+       pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
 
        if (rc)
                goto error;
@@ -3822,7 +3806,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
        rc = pqi_map_single(ctrl_info->pci_dev,
                &request.data.report_device_capability.sg_descriptor,
                capability, sizeof(*capability),
-               PCI_DMA_FROMDEVICE);
+               DMA_FROM_DEVICE);
        if (rc)
                goto out;
 
@@ -3831,7 +3815,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
 
        pqi_pci_unmap(ctrl_info->pci_dev,
                &request.data.report_device_capability.sg_descriptor, 1,
-               PCI_DMA_FROMDEVICE);
+               DMA_FROM_DEVICE);
 
        if (rc)
                goto out;
@@ -4158,7 +4142,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
        rc = pqi_map_single(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors,
                event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
-               PCI_DMA_FROMDEVICE);
+               DMA_FROM_DEVICE);
        if (rc)
                goto out;
 
@@ -4167,7 +4151,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
 
        pqi_pci_unmap(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors, 1,
-               PCI_DMA_FROMDEVICE);
+               DMA_FROM_DEVICE);
 
        if (rc)
                goto out;
@@ -4194,7 +4178,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
        rc = pqi_map_single(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors,
                event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
-               PCI_DMA_TODEVICE);
+               DMA_TO_DEVICE);
        if (rc)
                goto out;
 
@@ -4203,7 +4187,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
 
        pqi_pci_unmap(ctrl_info->pci_dev,
                request.data.report_event_configuration.sg_descriptors, 1,
-               PCI_DMA_TODEVICE);
+               DMA_TO_DEVICE);
 
 out:
        kfree(event_config);
@@ -5534,7 +5518,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 
                rc = pqi_map_single(ctrl_info->pci_dev,
                        &request.sg_descriptors[0], kernel_buffer,
-                       iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+                       iocommand.buf_size, DMA_BIDIRECTIONAL);
                if (rc)
                        goto out;
 
@@ -5548,7 +5532,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
 
        if (iocommand.buf_size > 0)
                pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
-                       PCI_DMA_BIDIRECTIONAL);
+                       DMA_BIDIRECTIONAL);
 
        memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
 
index 5141bd4..ea91658 100644 (file)
@@ -316,9 +316,9 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
        put_unaligned_le32(ctrl_info->max_io_slots,
                &base_struct->error_buffer_num_elements);
 
-       bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
-               sizeof(*base_struct), PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+       bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct,
+               sizeof(*base_struct), DMA_TO_DEVICE);
+       if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) {
                rc = -ENOMEM;
                goto out;
        }
@@ -331,9 +331,8 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
        rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
                &params);
 
-       pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
-               PCI_DMA_TODEVICE);
-
+       dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address,
+                       sizeof(*base_struct), DMA_TO_DEVICE);
 out:
        kfree(base_struct_unaligned);
 
index b106596..e9ccfb9 100644 (file)
@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
 
        SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
 
-       pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(snic->pdev, pa)) {
+       pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
+       if (dma_mapping_error(&snic->pdev->dev, pa)) {
                SNIC_HOST_ERR(snic->shost,
                              "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
                              buf);
@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
 
        ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
        if (ret) {
-               pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&snic->pdev->dev, pa, buf_len,
+                                DMA_FROM_DEVICE);
                kfree(buf);
                rqi->sge_va = 0;
                snic_release_untagged_req(snic, rqi);
index 8e69548..159ee94 100644 (file)
@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
        struct snic_req_info *rqi = NULL;
        unsigned long flags;
 
-       pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
+                        DMA_TO_DEVICE);
 
        rqi = req_to_rqi(req);
        spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
        snic_print_desc(__func__, os_buf, len);
 
        /* Map request buffer */
-       pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(snic->pdev, pa)) {
+       pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&snic->pdev->dev, pa)) {
                SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
 
                return -ENOMEM;
@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
        spin_lock_irqsave(&snic->wq_lock[q_num], flags);
        desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
        if (desc_avail <= 0) {
-               pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+               dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
                req->req_pa = 0;
                spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
                atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
 
        if (rqi->abort_req) {
                if (rqi->abort_req->req_pa)
-                       pci_unmap_single(snic->pdev,
+                       dma_unmap_single(&snic->pdev->dev,
                                         rqi->abort_req->req_pa,
                                         sizeof(struct snic_host_req),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
 
                mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
        }
 
        if (rqi->dr_req) {
                if (rqi->dr_req->req_pa)
-                       pci_unmap_single(snic->pdev,
+                       dma_unmap_single(&snic->pdev->dev,
                                         rqi->dr_req->req_pa,
                                         sizeof(struct snic_host_req),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
 
                mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
        }
 
        if (rqi->req->req_pa)
-               pci_unmap_single(snic->pdev,
+               dma_unmap_single(&snic->pdev->dev,
                                 rqi->req->req_pa,
                                 rqi->req_len,
-                                PCI_DMA_TODEVICE);
+                                DMA_TO_DEVICE);
 
        mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
 }
@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
 
        sgd = req_to_sgl(rqi_to_req(rqi));
        SNIC_BUG_ON(sgd[0].addr == 0);
-       pci_unmap_single(snic->pdev,
+       dma_unmap_single(&snic->pdev->dev,
                         le64_to_cpu(sgd[0].addr),
                         le32_to_cpu(sgd[0].len),
-                        PCI_DMA_FROMDEVICE);
+                        DMA_FROM_DEVICE);
 }
 
 /*
index 7cf70aa..5295277 100644 (file)
@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * limitation for the device. Try 43-bit first, and
         * fail to 32-bit.
         */
-       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
        if (ret) {
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (ret) {
                        SNIC_HOST_ERR(shost,
                                      "No Usable DMA Configuration, aborting %d\n",
                                      ret);
-
-                       goto err_rel_regions;
-               }
-
-               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (ret) {
-                       SNIC_HOST_ERR(shost,
-                                     "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
-                                     ret);
-
-                       goto err_rel_regions;
-               }
-       } else {
-               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
-               if (ret) {
-                       SNIC_HOST_ERR(shost,
-                                     "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
-                                     ret);
-
                        goto err_rel_regions;
                }
        }
 
-
        /* Map vNIC resources from BAR0 */
        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
                SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
index d9b2e46..b3650c9 100644 (file)
@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
                      CMD_FLAGS(sc));
 
        if (req->u.icmnd.sense_addr)
-               pci_unmap_single(snic->pdev,
+               dma_unmap_single(&snic->pdev->dev,
                                 le64_to_cpu(req->u.icmnd.sense_addr),
                                 SCSI_SENSE_BUFFERSIZE,
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
 
        scsi_dma_unmap(sc);
 
@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
                }
        }
 
-       pa = pci_map_single(snic->pdev,
+       pa = dma_map_single(&snic->pdev->dev,
                            sc->sense_buffer,
                            SCSI_SENSE_BUFFERSIZE,
-                           PCI_DMA_FROMDEVICE);
-
-       if (pci_dma_mapping_error(snic->pdev, pa)) {
+                           DMA_FROM_DEVICE);
+       if (dma_mapping_error(&snic->pdev->dev, pa)) {
                SNIC_HOST_ERR(snic->shost,
                              "QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
                              sc->sense_buffer, snic_cmd_tag(sc));
@@ -2001,7 +2000,7 @@ snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
        }
 
 dr_failed:
-       SNIC_BUG_ON(!spin_is_locked(io_lock));
+       lockdep_assert_held(io_lock);
        if (rqi)
                CMD_SP(sc) = NULL;
        spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2603,7 @@ snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
        ret = SUCCESS;
 
 skip_internal_abts:
-       SNIC_BUG_ON(!spin_is_locked(io_lock));
+       lockdep_assert_held(io_lock);
        spin_unlock_irqrestore(io_lock, flags);
 
        return ret;
index dad5fc6..05e374f 100644 (file)
@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
 {
        svnic_dev_desc_ring_size(ring, desc_count, desc_size);
 
-       ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
-               ring->size_unaligned,
-               &ring->base_addr_unaligned);
-
+       ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+                       ring->size_unaligned, &ring->base_addr_unaligned,
+                       GFP_KERNEL);
        if (!ring->descs_unaligned) {
                pr_err("Failed to allocate ring (size=%d), aborting\n",
                        (int)ring->size);
@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
 void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
 {
        if (ring->descs) {
-               pci_free_consistent(vdev->pdev,
+               dma_free_coherent(&vdev->pdev->dev,
                        ring->size_unaligned,
                        ring->descs_unaligned,
                        ring->base_addr_unaligned);
@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
        int err = 0;
 
        if (!vdev->fw_info) {
-               vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+               vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
                        sizeof(struct vnic_devcmd_fw_info),
-                       &vdev->fw_info_pa);
+                       &vdev->fw_info_pa, GFP_KERNEL);
                if (!vdev->fw_info)
                        return -ENOMEM;
 
@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
        int wait = VNIC_DVCMD_TMO;
 
        if (!vdev->stats) {
-               vdev->stats = pci_alloc_consistent(vdev->pdev,
-                       sizeof(struct vnic_stats), &vdev->stats_pa);
+               vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+                       sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
                if (!vdev->stats)
                        return -ENOMEM;
        }
@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
        int wait = VNIC_DVCMD_TMO;
 
        if (!vdev->notify) {
-               vdev->notify = pci_alloc_consistent(vdev->pdev,
+               vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
                        sizeof(struct vnic_devcmd_notify),
-                       &vdev->notify_pa);
+                       &vdev->notify_pa, GFP_KERNEL);
                if (!vdev->notify)
                        return -ENOMEM;
        }
@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
 {
        if (vdev) {
                if (vdev->notify)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(struct vnic_devcmd_notify),
                                vdev->notify,
                                vdev->notify_pa);
                if (vdev->linkstatus)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(u32),
                                vdev->linkstatus,
                                vdev->linkstatus_pa);
                if (vdev->stats)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(struct vnic_stats),
                                vdev->stats, vdev->stats_pa);
                if (vdev->fw_info)
-                       pci_free_consistent(vdev->pdev,
+                       dma_free_coherent(&vdev->pdev->dev,
                                sizeof(struct vnic_devcmd_fw_info),
                                vdev->fw_info, vdev->fw_info_pa);
                if (vdev->devcmd2)
index 0b1421c..c9a55d0 100644 (file)
@@ -60,30 +60,6 @@ static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
        return readb(esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
-                                     size_t sz, int dir)
-{
-       return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-                                 int num_sg, int dir)
-{
-       return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-                                 size_t sz, int dir)
-{
-       dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-                             int num_sg, int dir)
-{
-       dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int sun3x_esp_irq_pending(struct esp *esp)
 {
        if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -182,10 +158,6 @@ static int sun3x_esp_dma_error(struct esp *esp)
 static const struct esp_driver_ops sun3x_esp_ops = {
        .esp_write8     =       sun3x_esp_write8,
        .esp_read8      =       sun3x_esp_read8,
-       .map_single     =       sun3x_esp_map_single,
-       .map_sg         =       sun3x_esp_map_sg,
-       .unmap_single   =       sun3x_esp_unmap_single,
-       .unmap_sg       =       sun3x_esp_unmap_sg,
        .irq_pending    =       sun3x_esp_irq_pending,
        .reset_dma      =       sun3x_esp_reset_dma,
        .dma_drain      =       sun3x_esp_dma_drain,
@@ -246,7 +218,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
 
        dev_set_drvdata(&dev->dev, esp);
 
-       err = scsi_esp_register(esp, &dev->dev);
+       err = scsi_esp_register(esp);
        if (err)
                goto fail_free_irq;
 
index 747ee64..a11efbc 100644 (file)
@@ -80,7 +80,7 @@ static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
 
 static int esp_sbus_map_regs(struct esp *esp, int hme)
 {
-       struct platform_device *op = esp->dev;
+       struct platform_device *op = to_platform_device(esp->dev);
        struct resource *res;
 
        /* On HME, two reg sets exist, first is DVMA,
@@ -100,11 +100,9 @@ static int esp_sbus_map_regs(struct esp *esp, int hme)
 
 static int esp_sbus_map_command_block(struct esp *esp)
 {
-       struct platform_device *op = esp->dev;
-
-       esp->command_block = dma_alloc_coherent(&op->dev, 16,
+       esp->command_block = dma_alloc_coherent(esp->dev, 16,
                                                &esp->command_block_dma,
-                                               GFP_ATOMIC);
+                                               GFP_KERNEL);
        if (!esp->command_block)
                return -ENOMEM;
        return 0;
@@ -113,7 +111,7 @@ static int esp_sbus_map_command_block(struct esp *esp)
 static int esp_sbus_register_irq(struct esp *esp)
 {
        struct Scsi_Host *host = esp->host;
-       struct platform_device *op = esp->dev;
+       struct platform_device *op = to_platform_device(esp->dev);
 
        host->irq = op->archdata.irqs[0];
        return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
@@ -121,7 +119,7 @@ static int esp_sbus_register_irq(struct esp *esp)
 
 static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
 {
-       struct platform_device *op = esp->dev;
+       struct platform_device *op = to_platform_device(esp->dev);
        struct device_node *dp;
 
        dp = op->dev.of_node;
@@ -143,7 +141,7 @@ done:
 
 static void esp_get_differential(struct esp *esp)
 {
-       struct platform_device *op = esp->dev;
+       struct platform_device *op = to_platform_device(esp->dev);
        struct device_node *dp;
 
        dp = op->dev.of_node;
@@ -155,7 +153,7 @@ static void esp_get_differential(struct esp *esp)
 
 static void esp_get_clock_params(struct esp *esp)
 {
-       struct platform_device *op = esp->dev;
+       struct platform_device *op = to_platform_device(esp->dev);
        struct device_node *bus_dp, *dp;
        int fmhz;
 
@@ -172,7 +170,7 @@ static void esp_get_clock_params(struct esp *esp)
 static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
 {
        struct device_node *dma_dp = dma_of->dev.of_node;
-       struct platform_device *op = esp->dev;
+       struct platform_device *op = to_platform_device(esp->dev);
        struct device_node *dp;
        u8 bursts, val;
 
@@ -212,38 +210,6 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
        return sbus_readb(esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
-                                     size_t sz, int dir)
-{
-       struct platform_device *op = esp->dev;
-
-       return dma_map_single(&op->dev, buf, sz, dir);
-}
-
-static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-                                 int num_sg, int dir)
-{
-       struct platform_device *op = esp->dev;
-
-       return dma_map_sg(&op->dev, sg, num_sg, dir);
-}
-
-static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-                                 size_t sz, int dir)
-{
-       struct platform_device *op = esp->dev;
-
-       dma_unmap_single(&op->dev, addr, sz, dir);
-}
-
-static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-                             int num_sg, int dir)
-{
-       struct platform_device *op = esp->dev;
-
-       dma_unmap_sg(&op->dev, sg, num_sg, dir);
-}
-
 static int sbus_esp_irq_pending(struct esp *esp)
 {
        if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -255,14 +221,13 @@ static void sbus_esp_reset_dma(struct esp *esp)
 {
        int can_do_burst16, can_do_burst32, can_do_burst64;
        int can_do_sbus64, lim;
-       struct platform_device *op;
+       struct platform_device *op = to_platform_device(esp->dev);
        u32 val;
 
        can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
        can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
        can_do_burst64 = 0;
        can_do_sbus64 = 0;
-       op = esp->dev;
        if (sbus_can_dma_64bit())
                can_do_sbus64 = 1;
        if (sbus_can_burst64())
@@ -474,10 +439,6 @@ static int sbus_esp_dma_error(struct esp *esp)
 static const struct esp_driver_ops sbus_esp_ops = {
        .esp_write8     =       sbus_esp_write8,
        .esp_read8      =       sbus_esp_read8,
-       .map_single     =       sbus_esp_map_single,
-       .map_sg         =       sbus_esp_map_sg,
-       .unmap_single   =       sbus_esp_unmap_single,
-       .unmap_sg       =       sbus_esp_unmap_sg,
        .irq_pending    =       sbus_esp_irq_pending,
        .reset_dma      =       sbus_esp_reset_dma,
        .dma_drain      =       sbus_esp_dma_drain,
@@ -504,7 +465,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
        esp = shost_priv(host);
 
        esp->host = host;
-       esp->dev = op;
+       esp->dev = &op->dev;
        esp->ops = &sbus_esp_ops;
 
        if (hme)
@@ -540,7 +501,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
 
        dev_set_drvdata(&op->dev, esp);
 
-       err = scsi_esp_register(esp, &op->dev);
+       err = scsi_esp_register(esp);
        if (err)
                goto fail_free_irq;
 
index bd3f6e2..0a2a545 100644 (file)
@@ -4370,6 +4370,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym
        OUTB(np, HS_PRT, HS_BUSY);
 }
 
+#define sym_printk(lvl, tp, cp, fmt, v...) do { \
+       if (cp)                                                 \
+               scmd_printk(lvl, cp->cmd, fmt, ##v);            \
+       else                                                    \
+               starget_printk(lvl, tp->starget, fmt, ##v);     \
+} while (0)
+
 /*
  *  chip exception handler for programmed interrupts.
  */
@@ -4415,7 +4422,7 @@ static void sym_int_sir(struct sym_hcb *np)
         *  been selected with ATN.  We do not want to handle that.
         */
        case SIR_SEL_ATN_NO_MSG_OUT:
-               scmd_printk(KERN_WARNING, cp->cmd,
+               sym_printk(KERN_WARNING, tp, cp,
                                "No MSG OUT phase after selection with ATN\n");
                goto out_stuck;
        /*
@@ -4423,7 +4430,7 @@ static void sym_int_sir(struct sym_hcb *np)
         *  having reselected the initiator.
         */
        case SIR_RESEL_NO_MSG_IN:
-               scmd_printk(KERN_WARNING, cp->cmd,
+               sym_printk(KERN_WARNING, tp, cp,
                                "No MSG IN phase after reselection\n");
                goto out_stuck;
        /*
@@ -4431,7 +4438,7 @@ static void sym_int_sir(struct sym_hcb *np)
         *  an IDENTIFY.
         */
        case SIR_RESEL_NO_IDENTIFY:
-               scmd_printk(KERN_WARNING, cp->cmd,
+               sym_printk(KERN_WARNING, tp, cp,
                                "No IDENTIFY after reselection\n");
                goto out_stuck;
        /*
@@ -4460,7 +4467,7 @@ static void sym_int_sir(struct sym_hcb *np)
        case SIR_RESEL_ABORTED:
                np->lastmsg = np->msgout[0];
                np->msgout[0] = M_NOOP;
-               scmd_printk(KERN_WARNING, cp->cmd,
+               sym_printk(KERN_WARNING, tp, cp,
                        "message %x sent on bad reselection\n", np->lastmsg);
                goto out;
        /*
index e09fe6a..2ddd426 100644 (file)
@@ -109,3 +109,22 @@ config SCSI_UFS_HISI
 
          Select this if you have UFS controller on Hisilicon chipset.
          If unsure, say N.
+
+config SCSI_UFS_BSG
+       bool "Universal Flash Storage BSG device node"
+       depends on SCSI_UFSHCD
+       select BLK_DEV_BSGLIB
+       help
+         Universal Flash Storage (UFS) is SCSI transport specification for
+         accessing flash storage on digital cameras, mobile phones and
+         consumer electronic devices.
+         A UFS controller communicates with a UFS device by exchanging
+         UFS Protocol Information Units (UPIUs).
+         UPIUs can not only be used as a transport layer for the SCSI protocol
+         but are also used by the UFS native command set.
+         This transport driver supports exchanging UFS protocol information units
+         with a UFS device. See also the ufshcd driver, which is a SCSI driver
+         that supports UFS devices.
+
+         Select this if you need a bsg device node for your UFS controller.
+         If unsure, say N.
index 2c50f03..aca4813 100644 (file)
@@ -4,7 +4,8 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.
 obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
 obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-objs := ufshcd.o ufs-sysfs.o
+ufshcd-core-y                          += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG)     += ufs_bsg.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
 obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
index 75ee590..4a6b2b3 100644 (file)
@@ -70,20 +70,27 @@ static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
 }
 
 static int ufs_qcom_host_clk_get(struct device *dev,
-               const char *name, struct clk **clk_out)
+               const char *name, struct clk **clk_out, bool optional)
 {
        struct clk *clk;
        int err = 0;
 
        clk = devm_clk_get(dev, name);
-       if (IS_ERR(clk)) {
-               err = PTR_ERR(clk);
-               dev_err(dev, "%s: failed to get %s err %d",
-                               __func__, name, err);
-       } else {
+       if (!IS_ERR(clk)) {
                *clk_out = clk;
+               return 0;
        }
 
+       err = PTR_ERR(clk);
+
+       if (optional && err == -ENOENT) {
+               *clk_out = NULL;
+               return 0;
+       }
+
+       if (err != -EPROBE_DEFER)
+               dev_err(dev, "failed to get %s err %d\n", name, err);
+
        return err;
 }
 
@@ -104,11 +111,9 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
        if (!host->is_lane_clks_enabled)
                return;
 
-       if (host->hba->lanes_per_direction > 1)
-               clk_disable_unprepare(host->tx_l1_sync_clk);
+       clk_disable_unprepare(host->tx_l1_sync_clk);
        clk_disable_unprepare(host->tx_l0_sync_clk);
-       if (host->hba->lanes_per_direction > 1)
-               clk_disable_unprepare(host->rx_l1_sync_clk);
+       clk_disable_unprepare(host->rx_l1_sync_clk);
        clk_disable_unprepare(host->rx_l0_sync_clk);
 
        host->is_lane_clks_enabled = false;
@@ -132,24 +137,21 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
        if (err)
                goto disable_rx_l0;
 
-       if (host->hba->lanes_per_direction > 1) {
-               err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+       err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
                        host->rx_l1_sync_clk);
-               if (err)
-                       goto disable_tx_l0;
+       if (err)
+               goto disable_tx_l0;
 
-               err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+       err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
                        host->tx_l1_sync_clk);
-               if (err)
-                       goto disable_rx_l1;
-       }
+       if (err)
+               goto disable_rx_l1;
 
        host->is_lane_clks_enabled = true;
        goto out;
 
 disable_rx_l1:
-       if (host->hba->lanes_per_direction > 1)
-               clk_disable_unprepare(host->rx_l1_sync_clk);
+       clk_disable_unprepare(host->rx_l1_sync_clk);
 disable_tx_l0:
        clk_disable_unprepare(host->tx_l0_sync_clk);
 disable_rx_l0:
@@ -163,25 +165,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
        int err = 0;
        struct device *dev = host->hba->dev;
 
-       err = ufs_qcom_host_clk_get(dev,
-                       "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+       err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
+                                       &host->rx_l0_sync_clk, false);
        if (err)
                goto out;
 
-       err = ufs_qcom_host_clk_get(dev,
-                       "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+       err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
+                                       &host->tx_l0_sync_clk, false);
        if (err)
                goto out;
 
        /* In case of single lane per direction, don't read lane1 clocks */
        if (host->hba->lanes_per_direction > 1) {
                err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
-                       &host->rx_l1_sync_clk);
+                       &host->rx_l1_sync_clk, false);
                if (err)
                        goto out;
 
                err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
-                       &host->tx_l1_sync_clk);
+                       &host->tx_l1_sync_clk, true);
        }
 out:
        return err;
index 14e5bf7..58087d3 100644 (file)
@@ -38,9 +38,9 @@
 
 #include <linux/mutex.h>
 #include <linux/types.h>
+#include <uapi/scsi/scsi_bsg_ufs.h>
 
-#define MAX_CDB_SIZE   16
-#define GENERAL_UPIU_REQUEST_SIZE 32
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
 #define QUERY_DESC_MAX_SIZE       255
 #define QUERY_DESC_MIN_SIZE       2
 #define QUERY_DESC_HDR_SIZE       2
@@ -414,6 +414,7 @@ enum {
        MASK_RSP_UPIU_DATA_SEG_LEN      = 0xFFFF,
        MASK_RSP_EXCEPTION_EVENT        = 0x10000,
        MASK_TM_SERVICE_RESP            = 0xFF,
+       MASK_TM_FUNC                    = 0xFF,
 };
 
 /* Task management service response */
@@ -432,65 +433,6 @@ enum ufs_dev_pwr_mode {
        UFS_POWERDOWN_PWR_MODE  = 3,
 };
 
-/**
- * struct utp_upiu_header - UPIU header structure
- * @dword_0: UPIU header DW-0
- * @dword_1: UPIU header DW-1
- * @dword_2: UPIU header DW-2
- */
-struct utp_upiu_header {
-       __be32 dword_0;
-       __be32 dword_1;
-       __be32 dword_2;
-};
-
-/**
- * struct utp_upiu_cmd - Command UPIU structure
- * @data_transfer_len: Data Transfer Length DW-3
- * @cdb: Command Descriptor Block CDB DW-4 to DW-7
- */
-struct utp_upiu_cmd {
-       __be32 exp_data_transfer_len;
-       u8 cdb[MAX_CDB_SIZE];
-};
-
-/**
- * struct utp_upiu_query - upiu request buffer structure for
- * query request.
- * @opcode: command to perform B-0
- * @idn: a value that indicates the particular type of data B-1
- * @index: Index to further identify data B-2
- * @selector: Index to further identify data B-3
- * @reserved_osf: spec reserved field B-4,5
- * @length: number of descriptor bytes to read/write B-6,7
- * @value: Attribute value to be written DW-5
- * @reserved: spec reserved DW-6,7
- */
-struct utp_upiu_query {
-       u8 opcode;
-       u8 idn;
-       u8 index;
-       u8 selector;
-       __be16 reserved_osf;
-       __be16 length;
-       __be32 value;
-       __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_req - general upiu request structure
- * @header:UPIU header structure DW-0 to DW-2
- * @sc: fields structure for scsi command DW-3 to DW-7
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_req {
-       struct utp_upiu_header header;
-       union {
-               struct utp_upiu_cmd sc;
-               struct utp_upiu_query qr;
-       };
-};
-
 /**
  * struct utp_cmd_rsp - Response UPIU structure
  * @residual_transfer_count: Residual transfer count DW-3
@@ -519,36 +461,6 @@ struct utp_upiu_rsp {
        };
 };
 
-/**
- * struct utp_upiu_task_req - Task request UPIU structure
- * @header - UPIU header structure DW0 to DW-2
- * @input_param1: Input parameter 1 DW-3
- * @input_param2: Input parameter 2 DW-4
- * @input_param3: Input parameter 3 DW-5
- * @reserved: Reserved double words DW-6 to DW-7
- */
-struct utp_upiu_task_req {
-       struct utp_upiu_header header;
-       __be32 input_param1;
-       __be32 input_param2;
-       __be32 input_param3;
-       __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_task_rsp - Task Management Response UPIU structure
- * @header: UPIU header structure DW0-DW-2
- * @output_param1: Ouput parameter 1 DW3
- * @output_param2: Output parameter 2 DW4
- * @reserved: Reserved double words DW-5 to DW-7
- */
-struct utp_upiu_task_rsp {
-       struct utp_upiu_header header;
-       __be32 output_param1;
-       __be32 output_param2;
-       __be32 reserved[3];
-};
-
 /**
  * struct ufs_query_req - parameters for building a query request
  * @query_func: UPIU header query function
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
new file mode 100644 (file)
index 0000000..e5f8e54
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bsg endpoint that supports UPIUs
+ *
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#include "ufs_bsg.h"
+
+static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
+                                      struct utp_upiu_query *qr)
+{
+       int desc_size = be16_to_cpu(qr->length);
+       int desc_id = qr->idn;
+       int ret;
+
+       if (desc_size <= 0)
+               return -EINVAL;
+
+       ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
+       if (ret || !*desc_len)
+               return -EINVAL;
+
+       *desc_len = min_t(int, *desc_len, desc_size);
+
+       return 0;
+}
+
+static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
+                                    unsigned int request_len,
+                                    unsigned int reply_len,
+                                    int desc_len, enum query_opcode desc_op)
+{
+       int min_req_len = sizeof(struct ufs_bsg_request);
+       int min_rsp_len = sizeof(struct ufs_bsg_reply);
+
+       if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
+               min_req_len += desc_len;
+
+       if (min_req_len > request_len || min_rsp_len > reply_len) {
+               dev_err(hba->dev, "not enough space assigned\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
+                                      struct ufs_bsg_request *bsg_request,
+                                      unsigned int request_len,
+                                      unsigned int reply_len,
+                                      uint8_t *desc_buff, int *desc_len,
+                                      enum query_opcode desc_op)
+{
+       struct utp_upiu_query *qr;
+
+       if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
+               dev_err(hba->dev, "unsupported opcode %d\n", desc_op);
+               return -ENOTSUPP;
+       }
+
+       if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC)
+               goto out;
+
+       qr = &bsg_request->upiu_req.qr;
+       if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
+               dev_err(hba->dev, "Illegal desc size\n");
+               return -EINVAL;
+       }
+
+       if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len,
+                                     desc_op))
+               return -EINVAL;
+
+       desc_buff = (uint8_t *)(bsg_request + 1);
+
+out:
+       return 0;
+}
+
+static int ufs_bsg_request(struct bsg_job *job)
+{
+       struct ufs_bsg_request *bsg_request = job->request;
+       struct ufs_bsg_reply *bsg_reply = job->reply;
+       struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
+       unsigned int req_len = job->request_len;
+       unsigned int reply_len = job->reply_len;
+       struct uic_command uc = {};
+       int msgcode;
+       uint8_t *desc_buff = NULL;
+       int desc_len = 0;
+       enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
+       int ret;
+
+       ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op);
+       if (ret)
+               goto out;
+
+       bsg_reply->reply_payload_rcv_len = 0;
+
+       msgcode = bsg_request->msgcode;
+       switch (msgcode) {
+       case UPIU_TRANSACTION_QUERY_REQ:
+               desc_op = bsg_request->upiu_req.qr.opcode;
+               ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len,
+                                                 reply_len, desc_buff,
+                                                 &desc_len, desc_op);
+               if (ret)
+                       goto out;
+
+               /* fall through */
+       case UPIU_TRANSACTION_NOP_OUT:
+       case UPIU_TRANSACTION_TASK_REQ:
+               ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
+                                              &bsg_reply->upiu_rsp, msgcode,
+                                              desc_buff, &desc_len, desc_op);
+               if (ret)
+                       dev_err(hba->dev,
+                               "exe raw upiu: error code %d\n", ret);
+
+               break;
+       case UPIU_TRANSACTION_UIC_CMD:
+               memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
+               ret = ufshcd_send_uic_cmd(hba, &uc);
+               if (ret)
+                       dev_dbg(hba->dev,
+                               "send uic cmd: error code %d\n", ret);
+
+               memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
+
+               break;
+       default:
+               ret = -ENOTSUPP;
+               dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
+
+               break;
+       }
+
+out:
+       bsg_reply->result = ret;
+       job->reply_len = sizeof(struct ufs_bsg_reply) +
+                        bsg_reply->reply_payload_rcv_len;
+
+       bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+
+       return ret;
+}
+
+/**
+ * ufs_bsg_remove - detach and remove the added ufs-bsg node
+ *
+ * Should be called when unloading the driver.
+ */
+void ufs_bsg_remove(struct ufs_hba *hba)
+{
+       struct device *bsg_dev = &hba->bsg_dev;
+
+       if (!hba->bsg_queue)
+               return;
+
+       bsg_unregister_queue(hba->bsg_queue);
+
+       device_del(bsg_dev);
+       put_device(bsg_dev);
+}
+
+static inline void ufs_bsg_node_release(struct device *dev)
+{
+       put_device(dev->parent);
+}
+
+/**
+ * ufs_bsg_probe - Add ufs bsg device node
+ * @hba: per adapter object
+ *
+ * Called during initial loading of the driver, and before scsi_scan_host.
+ */
+int ufs_bsg_probe(struct ufs_hba *hba)
+{
+       struct device *bsg_dev = &hba->bsg_dev;
+       struct Scsi_Host *shost = hba->host;
+       struct device *parent = &shost->shost_gendev;
+       struct request_queue *q;
+       int ret;
+
+       device_initialize(bsg_dev);
+
+       bsg_dev->parent = get_device(parent);
+       bsg_dev->release = ufs_bsg_node_release;
+
+       dev_set_name(bsg_dev, "ufs-bsg");
+
+       ret = device_add(bsg_dev);
+       if (ret)
+               goto out;
+
+       q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, 0);
+       if (IS_ERR(q)) {
+               ret = PTR_ERR(q);
+               goto out;
+       }
+
+       hba->bsg_queue = q;
+
+       return 0;
+
+out:
+       dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
+       put_device(bsg_dev);
+       return ret;
+}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
new file mode 100644 (file)
index 0000000..d099187
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef UFS_BSG_H
+#define UFS_BSG_H
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include "ufshcd.h"
+#include "ufs.h"
+
+#ifdef CONFIG_SCSI_UFS_BSG
+void ufs_bsg_remove(struct ufs_hba *hba);
+int ufs_bsg_probe(struct ufs_hba *hba);
+#else
+static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
+static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
+#endif
+
+#endif /* UFS_BSG_H */
index c55f38e..23d7cca 100644 (file)
@@ -46,6 +46,7 @@
 #include "ufs_quirks.h"
 #include "unipro.h"
 #include "ufs-sysfs.h"
+#include "ufs_bsg.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ufs.h>
@@ -326,14 +327,11 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
                const char *str)
 {
-       struct utp_task_req_desc *descp;
-       struct utp_upiu_task_req *task_req;
        int off = (int)tag - hba->nutrs;
+       struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
 
-       descp = &hba->utmrdl_base_addr[off];
-       task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
-       trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
-                       &task_req->input_param1);
+       trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
+                       &descp->input_param1);
 }
 
 static void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -475,22 +473,13 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
 
 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
 {
-       struct utp_task_req_desc *tmrdp;
        int tag;
 
        for_each_set_bit(tag, &bitmap, hba->nutmrs) {
-               tmrdp = &hba->utmrdl_base_addr[tag];
+               struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
+
                dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
-               ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
-                               sizeof(struct request_desc_header));
-               dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
-                               tag);
-               ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
-                               sizeof(struct utp_upiu_req));
-               dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
-                               tag);
-               ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
-                               sizeof(struct utp_task_req_desc));
+               ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
        }
 }
 
@@ -645,19 +634,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
        return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 }
 
-/**
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
- * @task_req_descp: pointer to utp_task_req_desc structure
- *
- * This function is used to get the OCS field from UTMRD
- * Returns the OCS field in the UTMRD
- */
-static inline int
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
-{
-       return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
-}
-
 /**
  * ufshcd_get_tm_free_slot - get a free slot for task management request
  * @hba: per adapter instance
@@ -1691,8 +1667,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
 
        hba->clk_gating.state = REQ_CLKS_OFF;
        trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
-       schedule_delayed_work(&hba->clk_gating.gate_work,
-                       msecs_to_jiffies(hba->clk_gating.delay_ms));
+       queue_delayed_work(hba->clk_gating.clk_gating_workq,
+                          &hba->clk_gating.gate_work,
+                          msecs_to_jiffies(hba->clk_gating.delay_ms));
 }
 
 void ufshcd_release(struct ufs_hba *hba)
@@ -1763,6 +1740,34 @@ out:
        return count;
 }
 
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+       char wq_name[sizeof("ufs_clkscaling_00")];
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       INIT_WORK(&hba->clk_scaling.suspend_work,
+                 ufshcd_clk_scaling_suspend_work);
+       INIT_WORK(&hba->clk_scaling.resume_work,
+                 ufshcd_clk_scaling_resume_work);
+
+       snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+                hba->host->host_no);
+       hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+       ufshcd_clkscaling_init_sysfs(hba);
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       destroy_workqueue(hba->clk_scaling.workq);
+       ufshcd_devfreq_remove(hba);
+}
+
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
        char wq_name[sizeof("ufs_clk_gating_00")];
@@ -2055,8 +2060,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
  *
  * Returns 0 only if success.
  */
-static int
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
        int ret;
        unsigned long flags;
@@ -2238,8 +2242,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
        ucd_req_ptr->sc.exp_data_transfer_len =
                cpu_to_be32(lrbp->cmd->sdb.length);
 
-       cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
-       memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+       cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+       memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
        memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
 
        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2258,7 +2262,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
        struct ufs_query *query = &hba->dev_cmd.query;
        u16 len = be16_to_cpu(query->request.upiu_req.length);
-       u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
 
        /* Query request header */
        ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -2280,7 +2283,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
 
        /* Copy the Descriptor */
        if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
-               memcpy(descp, query->descriptor, len);
+               memcpy(ucd_req_ptr + 1, query->descriptor, len);
 
        memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
@@ -4600,46 +4603,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
        }
 }
 
-/**
- * ufshcd_task_req_compl - handle task management request completion
- * @hba: per adapter instance
- * @index: index of the completed request
- * @resp: task management service response
- *
- * Returns non-zero value on error, zero on success
- */
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
-{
-       struct utp_task_req_desc *task_req_descp;
-       struct utp_upiu_task_rsp *task_rsp_upiup;
-       unsigned long flags;
-       int ocs_value;
-       int task_result;
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-
-       /* Clear completed tasks from outstanding_tasks */
-       __clear_bit(index, &hba->outstanding_tasks);
-
-       task_req_descp = hba->utmrdl_base_addr;
-       ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
-
-       if (ocs_value == OCS_SUCCESS) {
-               task_rsp_upiup = (struct utp_upiu_task_rsp *)
-                               task_req_descp[index].task_rsp_upiu;
-               task_result = be32_to_cpu(task_rsp_upiup->output_param1);
-               task_result = task_result & MASK_TM_SERVICE_RESP;
-               if (resp)
-                       *resp = (u8)task_result;
-       } else {
-               dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
-                               __func__, ocs_value);
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       return ocs_value;
-}
-
 /**
  * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  * @lrbp: pointer to local reference block of completed command
@@ -5597,28 +5560,12 @@ out:
        return err;
 }
 
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
-               u8 tm_function, u8 *tm_response)
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+               struct utp_task_req_desc *treq, u8 tm_function)
 {
-       struct utp_task_req_desc *task_req_descp;
-       struct utp_upiu_task_req *task_req_upiup;
-       struct Scsi_Host *host;
+       struct Scsi_Host *host = hba->host;
        unsigned long flags;
-       int free_slot;
-       int err;
-       int task_tag;
-
-       host = hba->host;
+       int free_slot, task_tag, err;
 
        /*
         * Get free slot, sleep if slots are unavailable.
@@ -5629,30 +5576,11 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       task_req_descp = hba->utmrdl_base_addr;
-       task_req_descp += free_slot;
-
-       /* Configure task request descriptor */
-       task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
-       task_req_descp->header.dword_2 =
-                       cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
-       /* Configure task request UPIU */
-       task_req_upiup =
-               (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
        task_tag = hba->nutrs + free_slot;
-       task_req_upiup->header.dword_0 =
-               UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
-                                             lun_id, task_tag);
-       task_req_upiup->header.dword_1 =
-               UPIU_HEADER_DWORD(0, tm_function, 0, 0);
-       /*
-        * The host shall provide the same value for LUN field in the basic
-        * header and for Input Parameter.
-        */
-       task_req_upiup->input_param1 = cpu_to_be32(lun_id);
-       task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
+       treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+
+       memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
        ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
 
        /* send command to the controller */
@@ -5682,8 +5610,15 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
                                        __func__, free_slot);
                err = -ETIMEDOUT;
        } else {
-               err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+               err = 0;
+               memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+
                ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               __clear_bit(free_slot, &hba->outstanding_tasks);
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+
        }
 
        clear_bit(free_slot, &hba->tm_condition);
@@ -5694,6 +5629,228 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        return err;
 }
 
+/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+               u8 tm_function, u8 *tm_response)
+{
+       struct utp_task_req_desc treq = { { 0 }, };
+       int ocs_value, err;
+
+       /* Configure task request descriptor */
+       treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+       treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+       /* Configure task request UPIU */
+       treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+                                 cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
+       treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+
+       /*
+        * The host shall provide the same value for LUN field in the basic
+        * header and for Input Parameter.
+        */
+       treq.input_param1 = cpu_to_be32(lun_id);
+       treq.input_param2 = cpu_to_be32(task_id);
+
+       err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
+       if (err == -ETIMEDOUT)
+               return err;
+
+       ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+       if (ocs_value != OCS_SUCCESS)
+               dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+                               __func__, ocs_value);
+       else if (tm_response)
+               *tm_response = be32_to_cpu(treq.output_param1) &
+                               MASK_TM_SERVICE_RESP;
+       return err;
+}
+
+/**
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
+ * @hba:       per-adapter instance
+ * @req_upiu:  upiu request
+ * @rsp_upiu:  upiu reply
+ * @msgcode:   message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len:  descriptor size, 0 if NA
+ * @desc_op:   descriptor operation
+ *
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
+ * tasks work queues.
+ *
+ * Since there is only one available tag for device management commands,
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+                                       struct utp_upiu_req *req_upiu,
+                                       struct utp_upiu_req *rsp_upiu,
+                                       u8 *desc_buff, int *buff_len,
+                                       int cmd_type,
+                                       enum query_opcode desc_op)
+{
+       struct ufshcd_lrb *lrbp;
+       int err = 0;
+       int tag;
+       struct completion wait;
+       unsigned long flags;
+       u32 upiu_flags;
+
+       down_read(&hba->clk_scaling_lock);
+
+       wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+       init_completion(&wait);
+       lrbp = &hba->lrb[tag];
+       WARN_ON(lrbp->cmd);
+
+       lrbp->cmd = NULL;
+       lrbp->sense_bufflen = 0;
+       lrbp->sense_buffer = NULL;
+       lrbp->task_tag = tag;
+       lrbp->lun = 0;
+       lrbp->intr_cmd = true;
+       hba->dev_cmd.type = cmd_type;
+
+       switch (hba->ufs_version) {
+       case UFSHCI_VERSION_10:
+       case UFSHCI_VERSION_11:
+               lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+               break;
+       default:
+               lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+               break;
+       }
+
+       /* update the task tag in the request upiu */
+       req_upiu->header.dword_0 |= cpu_to_be32(tag);
+
+       ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+
+       /* just copy the upiu request as it is */
+       memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+       if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
+               /* The Data Segment Area is optional depending upon the query
+                * function value. for WRITE DESCRIPTOR, the data segment
+                * follows right after the tsf.
+                */
+               memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
+               *buff_len = 0;
+       }
+
+       memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+       hba->dev_cmd.complete = &wait;
+
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_send_command(hba, tag);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /*
+        * ignore the returning value here - ufshcd_check_query_response is
+        * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
+        * read the response directly ignoring all errors.
+        */
+       ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+
+       /* just copy the upiu response as it is */
+       memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+
+       ufshcd_put_dev_cmd_tag(hba, tag);
+       wake_up(&hba->dev_cmd.tag_wq);
+       up_read(&hba->clk_scaling_lock);
+       return err;
+}
+
+/**
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
+ * @hba:       per-adapter instance
+ * @req_upiu:  upiu request
+ * @rsp_upiu:  upiu reply - only 8 DW as we do not support scsi commands
+ * @msgcode:   message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len:  descriptor size, 0 if NA
+ * @desc_op:   descriptor operation
+ *
+ * Supports UTP Transfer requests (nop and query), and UTP Task
+ * Management requests.
+ * It is up to the caller to fill the upiu conent properly, as it will
+ * be copied without any further input validations.
+ */
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+                            struct utp_upiu_req *req_upiu,
+                            struct utp_upiu_req *rsp_upiu,
+                            int msgcode,
+                            u8 *desc_buff, int *buff_len,
+                            enum query_opcode desc_op)
+{
+       int err;
+       int cmd_type = DEV_CMD_TYPE_QUERY;
+       struct utp_task_req_desc treq = { { 0 }, };
+       int ocs_value;
+       u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+
+       if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) {
+               err = -ENOTSUPP;
+               goto out;
+       }
+
+       switch (msgcode) {
+       case UPIU_TRANSACTION_NOP_OUT:
+               cmd_type = DEV_CMD_TYPE_NOP;
+               /* fall through */
+       case UPIU_TRANSACTION_QUERY_REQ:
+               ufshcd_hold(hba, false);
+               mutex_lock(&hba->dev_cmd.lock);
+               err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
+                                                  desc_buff, buff_len,
+                                                  cmd_type, desc_op);
+               mutex_unlock(&hba->dev_cmd.lock);
+               ufshcd_release(hba);
+
+               break;
+       case UPIU_TRANSACTION_TASK_REQ:
+               treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+               treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+               memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+
+               err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
+               if (err == -ETIMEDOUT)
+                       break;
+
+               ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+               if (ocs_value != OCS_SUCCESS) {
+                       dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
+                               ocs_value);
+                       break;
+               }
+
+               memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+
+               break;
+       default:
+               err = -EINVAL;
+
+               break;
+       }
+
+out:
+       return err;
+}
+
 /**
  * ufshcd_eh_device_reset_handler - device reset handler registered to
  *                                    scsi layer.
@@ -6652,6 +6809,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
                        hba->clk_scaling.is_allowed = true;
                }
 
+               ufs_bsg_probe(hba);
+
                scsi_scan_host(hba->host);
                pm_runtime_put_sync(hba->dev);
        }
@@ -6666,6 +6825,7 @@ out:
         */
        if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
                pm_runtime_put_sync(hba->dev);
+               ufshcd_exit_clk_scaling(hba);
                ufshcd_hba_exit(hba);
        }
 
@@ -7201,12 +7361,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
                ufshcd_suspend_clkscaling(hba);
-               if (ufshcd_is_clkscaling_supported(hba)) {
+               if (ufshcd_is_clkscaling_supported(hba))
                        if (hba->devfreq)
                                ufshcd_suspend_clkscaling(hba);
-                       destroy_workqueue(hba->clk_scaling.workq);
-                       ufshcd_devfreq_remove(hba);
-               }
                ufshcd_setup_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
@@ -7875,12 +8032,14 @@ EXPORT_SYMBOL(ufshcd_shutdown);
  */
 void ufshcd_remove(struct ufs_hba *hba)
 {
+       ufs_bsg_remove(hba);
        ufs_sysfs_remove_nodes(hba->dev);
        scsi_remove_host(hba->host);
        /* disable interrupts */
        ufshcd_disable_intr(hba, hba->intr_mask);
        ufshcd_hba_stop(hba, true);
 
+       ufshcd_exit_clk_scaling(hba);
        ufshcd_exit_clk_gating(hba);
        if (ufshcd_is_clkscaling_supported(hba))
                device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
@@ -8027,7 +8186,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        host->max_lun = UFS_MAX_LUNS;
        host->max_channel = UFSHCD_MAX_CHANNEL;
        host->unique_id = host->host_no;
-       host->max_cmd_len = MAX_CDB_SIZE;
+       host->max_cmd_len = UFS_CDB_SIZE;
 
        hba->max_pwr_info.is_valid = false;
 
@@ -8052,6 +8211,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 
        ufshcd_init_clk_gating(hba);
 
+       ufshcd_init_clk_scaling(hba);
+
        /*
         * In order to avoid any spurious interrupt immediately after
         * registering UFS controller interrupt handler, clear any pending UFS
@@ -8090,21 +8251,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
                goto out_remove_scsi_host;
        }
 
-       if (ufshcd_is_clkscaling_supported(hba)) {
-               char wq_name[sizeof("ufs_clkscaling_00")];
-
-               INIT_WORK(&hba->clk_scaling.suspend_work,
-                         ufshcd_clk_scaling_suspend_work);
-               INIT_WORK(&hba->clk_scaling.resume_work,
-                         ufshcd_clk_scaling_resume_work);
-
-               snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
-                        host->host_no);
-               hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
-               ufshcd_clkscaling_init_sysfs(hba);
-       }
-
        /*
         * Set the default power management level for runtime and system PM.
         * Default power saving mode is to keep UFS link in Hibern8 state
@@ -8142,6 +8288,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
 out_remove_scsi_host:
        scsi_remove_host(hba->host);
 exit_gating:
+       ufshcd_exit_clk_scaling(hba);
        ufshcd_exit_clk_gating(hba);
 out_disable:
        hba->is_irq_enabled = false;
index 33fdd3f..1a1c2b4 100644 (file)
@@ -702,6 +702,9 @@ struct ufs_hba {
        struct rw_semaphore clk_scaling_lock;
        struct ufs_desc_size desc_size;
        atomic_t scsi_block_reqs_cnt;
+
+       struct device           bsg_dev;
+       struct request_queue    *bsg_queue;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
@@ -892,6 +895,15 @@ int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
 
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
 
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+                            struct utp_upiu_req *req_upiu,
+                            struct utp_upiu_req *rsp_upiu,
+                            int msgcode,
+                            u8 *desc_buff, int *buff_len,
+                            enum query_opcode desc_op);
+
 /* Wrapper functions for safely calling variant operations */
 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
 {
index bb5d9c7..6fa889d 100644 (file)
@@ -433,22 +433,25 @@ struct utp_transfer_req_desc {
        __le16  prd_table_offset;
 };
 
-/**
- * struct utp_task_req_desc - UTMRD structure
- * @header: UTMRD header DW-0 to DW-3
- * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
- * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+/*
+ * UTMRD structure.
  */
 struct utp_task_req_desc {
-
        /* DW 0-3 */
        struct request_desc_header header;
 
-       /* DW 4-11 */
-       __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
-
-       /* DW 12-19 */
-       __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+       /* DW 4-11 - Task request UPIU structure */
+       struct utp_upiu_header  req_header;
+       __be32                  input_param1;
+       __be32                  input_param2;
+       __be32                  input_param3;
+       __be32                  __reserved1[2];
+
+       /* DW 12-19 - Task Management Response UPIU structure */
+       struct utp_upiu_header  rsp_header;
+       __be32                  output_param1;
+       __be32                  output_param2;
+       __be32                  __reserved2[3];
 };
 
 #endif /* End of Header */
index 0cd947f..6e49102 100644 (file)
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
                        pvscsi_create_sg(ctx, sg, segs);
 
                        e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
-                       ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
-                                                   SGL_SIZE, PCI_DMA_TODEVICE);
-                       if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
+                       ctx->sglPA = dma_map_single(&adapter->dev->dev,
+                                       ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
+                       if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
                                scmd_printk(KERN_ERR, cmd,
                                            "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
                                scsi_dma_unmap(cmd);
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
                 * In case there is no S/G list, scsi_sglist points
                 * directly to the buffer.
                 */
-               ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+               ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
                                             cmd->sc_data_direction);
-               if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
+               if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
                        scmd_printk(KERN_ERR, cmd,
                                    "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
                        return -ENOMEM;
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
                if (count != 0) {
                        scsi_dma_unmap(cmd);
                        if (ctx->sglPA) {
-                               pci_unmap_single(adapter->dev, ctx->sglPA,
-                                                SGL_SIZE, PCI_DMA_TODEVICE);
+                               dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
+                                                SGL_SIZE, DMA_TO_DEVICE);
                                ctx->sglPA = 0;
                        }
                } else
-                       pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
-                                        cmd->sc_data_direction);
+                       dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
+                                        bufflen, cmd->sc_data_direction);
        }
        if (cmd->sense_buffer)
-               pci_unmap_single(adapter->dev, ctx->sensePA,
-                                SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
+                                SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 }
 
 static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
 {
-       adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
-                                                   &adapter->ringStatePA);
+       adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+                       &adapter->ringStatePA, GFP_KERNEL);
        if (!adapter->rings_state)
                return -ENOMEM;
 
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
                                 pvscsi_ring_pages);
        adapter->req_depth = adapter->req_pages
                                        * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
-       adapter->req_ring = pci_alloc_consistent(adapter->dev,
-                                                adapter->req_pages * PAGE_SIZE,
-                                                &adapter->reqRingPA);
+       adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
+                       adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
+                       GFP_KERNEL);
        if (!adapter->req_ring)
                return -ENOMEM;
 
        adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
                                 pvscsi_ring_pages);
-       adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
-                                                adapter->cmp_pages * PAGE_SIZE,
-                                                &adapter->cmpRingPA);
+       adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
+                       adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
+                       GFP_KERNEL);
        if (!adapter->cmp_ring)
                return -ENOMEM;
 
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
 
        adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
                                 pvscsi_msg_ring_pages);
-       adapter->msg_ring = pci_alloc_consistent(adapter->dev,
-                                                adapter->msg_pages * PAGE_SIZE,
-                                                &adapter->msgRingPA);
+       adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
+                       adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
+                       GFP_KERNEL);
        if (!adapter->msg_ring)
                return -ENOMEM;
        BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
        e->lun[1] = sdev->lun;
 
        if (cmd->sense_buffer) {
-               ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
-                                             SCSI_SENSE_BUFFERSIZE,
-                                             PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
+               ctx->sensePA = dma_map_single(&adapter->dev->dev,
+                               cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
                        scmd_printk(KERN_ERR, cmd,
                                    "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
                        ctx->sensePA = 0;
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
 
        if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
                if (cmd->sense_buffer) {
-                       pci_unmap_single(adapter->dev, ctx->sensePA,
+                       dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
                                         SCSI_SENSE_BUFFERSIZE,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                        ctx->sensePA = 0;
                }
                return -ENOMEM;
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
        }
 
        if (adapter->rings_state)
-               pci_free_consistent(adapter->dev, PAGE_SIZE,
+               dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
                                    adapter->rings_state, adapter->ringStatePA);
 
        if (adapter->req_ring)
-               pci_free_consistent(adapter->dev,
+               dma_free_coherent(&adapter->dev->dev,
                                    adapter->req_pages * PAGE_SIZE,
                                    adapter->req_ring, adapter->reqRingPA);
 
        if (adapter->cmp_ring)
-               pci_free_consistent(adapter->dev,
+               dma_free_coherent(&adapter->dev->dev,
                                    adapter->cmp_pages * PAGE_SIZE,
                                    adapter->cmp_ring, adapter->cmpRingPA);
 
        if (adapter->msg_ring)
-               pci_free_consistent(adapter->dev,
+               dma_free_coherent(&adapter->dev->dev,
                                    adapter->msg_pages * PAGE_SIZE,
                                    adapter->msg_ring, adapter->msgRingPA);
 }
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
        u32 numPhys = 16;
 
        dev = pvscsi_dev(adapter);
-       config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
-                                          &configPagePA);
+       config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+                       &configPagePA, GFP_KERNEL);
        if (!config_page) {
                dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
                goto exit;
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
        } else
                dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
                         header->hostStatus, header->scsiStatus);
-       pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+       dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
+                         configPagePA);
 exit:
        return numPhys;
 }
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (pci_enable_device(pdev))
                return error;
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
-           pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
-       } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
-                  pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+       } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
                printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
        } else {
                printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
index bb70882..ca8e3ab 100644 (file)
@@ -9,8 +9,6 @@
  *
  * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
  *               Blizzard 1230 DMA and probe function fixes
- *
- * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
  */
 /*
  * ZORRO bus code from:
@@ -159,7 +157,6 @@ struct fastlane_dma_registers {
 struct zorro_esp_priv {
        struct esp *esp;                /* our ESP instance - for Scsi_host* */
        void __iomem *board_base;       /* virtual address (Zorro III board) */
-       int error;                      /* PIO error flag */
        int zorro3;                     /* board is Zorro III */
        unsigned char ctrl_data;        /* shadow copy of ctrl_reg */
 };
@@ -182,30 +179,6 @@ static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
        return readb(esp->regs + (reg * 4UL));
 }
 
-static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
-                                     size_t sz, int dir)
-{
-       return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
-                                 int num_sg, int dir)
-{
-       return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
-                                 size_t sz, int dir)
-{
-       dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
-                             int num_sg, int dir)
-{
-       dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
 static int zorro_esp_irq_pending(struct esp *esp)
 {
        /* check ESP status register; DMA has no status reg. */
@@ -245,7 +218,7 @@ static int fastlane_esp_irq_pending(struct esp *esp)
 static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
                                        u32 dma_len)
 {
-       return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+       return dma_len > 0xFFFF ? 0xFFFF : dma_len;
 }
 
 static void zorro_esp_reset_dma(struct esp *esp)
@@ -274,192 +247,29 @@ static void fastlane_esp_dma_invalidate(struct esp *esp)
        z_writel(0, zep->board_base);
 }
 
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
-{
-       int i = 500000;
-
-       do {
-               unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
-                                                       & ESP_FF_FBYTES;
-
-               if (fbytes)
-                       return fbytes;
-
-               udelay(2);
-       } while (--i);
-
-       pr_err("FIFO is empty (sreg %02x)\n",
-              zorro_esp_read8(esp, ESP_STATUS));
-       return 0;
-}
-
-static inline int zorro_esp_wait_for_intr(struct esp *esp)
-{
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-       int i = 500000;
-
-       do {
-               esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
-               if (esp->sreg & ESP_STAT_INTR)
-                       return 0;
-
-               udelay(2);
-       } while (--i);
-
-       pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
-       zep->error = 1;
-       return 1;
-}
-
-/*
- * PIO macros as used in mac_esp.c.
- * Note that addr and fifo arguments are local-scope variables declared
- * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
- * and addr and fifo are referenced in each use of the macros so there
- * is no need to pass them as macro parameters.
- */
-#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
-       asm volatile ( \
-            "1:     moveb " operands "\n" \
-            "       subqw #1,%1       \n" \
-            "       jbne 1b           \n" \
-            : "+a" (addr), "+r" (reg1) \
-            : "a" (fifo));
-
-#define ZORRO_ESP_PIO_FILL(operands, reg1) \
-       asm volatile ( \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       moveb " operands "\n" \
-            "       subqw #8,%1       \n" \
-            "       subqw #8,%1       \n" \
-            : "+a" (addr), "+r" (reg1) \
-            : "a" (fifo));
-
-#define ZORRO_ESP_FIFO_SIZE 16
-
-static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
-                                u32 dma_count, int write, u8 cmd)
-{
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-       u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
-       u8 phase = esp->sreg & ESP_STAT_PMASK;
-
-       cmd &= ~ESP_CMD_DMA;
-
-       if (write) {
-               u8 *dst = (u8 *)addr;
-               u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
-               scsi_esp_cmd(esp, cmd);
-
-               while (1) {
-                       if (!zorro_esp_wait_for_fifo(esp))
-                               break;
-
-                       *dst++ = zorro_esp_read8(esp, ESP_FDATA);
-                       --esp_count;
-
-                       if (!esp_count)
-                               break;
-
-                       if (zorro_esp_wait_for_intr(esp))
-                               break;
-
-                       if ((esp->sreg & ESP_STAT_PMASK) != phase)
-                               break;
-
-                       esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
-                       if (esp->ireg & mask) {
-                               zep->error = 1;
-                               break;
-                       }
-
-                       if (phase == ESP_MIP)
-                               scsi_esp_cmd(esp, ESP_CMD_MOK);
-
-                       scsi_esp_cmd(esp, ESP_CMD_TI);
-               }
-       } else {        /* unused, as long as we only handle MIP here */
-               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
-               if (esp_count >= ZORRO_ESP_FIFO_SIZE)
-                       ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
-               else
-                       ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
-
-               scsi_esp_cmd(esp, cmd);
-
-               while (esp_count) {
-                       unsigned int n;
-
-                       if (zorro_esp_wait_for_intr(esp))
-                               break;
-
-                       if ((esp->sreg & ESP_STAT_PMASK) != phase)
-                               break;
-
-                       esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
-                       if (esp->ireg & ~ESP_INTR_BSERV) {
-                               zep->error = 1;
-                               break;
-                       }
-
-                       n = ZORRO_ESP_FIFO_SIZE -
-                           (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
-                       if (n > esp_count)
-                               n = esp_count;
-
-                       if (n == ZORRO_ESP_FIFO_SIZE)
-                               ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
-                       else {
-                               esp_count -= n;
-                               ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
-                       }
-
-                       scsi_esp_cmd(esp, ESP_CMD_TI);
-               }
-       }
-}
-
 /* Blizzard 1230/60 SCSI-IV DMA */
 
 static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
                        u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
        struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
        u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-       zep->error = 0;
        /*
         * Use PIO if transferring message bytes to esp->command_block_dma.
         * PIO requires a virtual address, so substitute esp->command_block
         * for addr.
         */
        if (phase == ESP_MIP && addr == esp->command_block_dma) {
-               zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-                                       esp_count, dma_count, write, cmd);
+               esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+                                dma_count, write, cmd);
                return;
        }
 
+       /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
+       esp->send_cmd_error = 0;
+       esp->send_cmd_residual = 0;
+
        if (write)
                /* DMA receive */
                dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -484,7 +294,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
        scsi_esp_cmd(esp, ESP_CMD_DMA);
        zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
        zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-       zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
        scsi_esp_cmd(esp, cmd);
 }
@@ -494,18 +303,19 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
 static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
                        u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
        struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
        u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-       zep->error = 0;
        /* Use PIO if transferring message bytes to esp->command_block_dma */
        if (phase == ESP_MIP && addr == esp->command_block_dma) {
-               zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-                                       esp_count, dma_count, write, cmd);
+               esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+                                dma_count, write, cmd);
                return;
        }
 
+       esp->send_cmd_error = 0;
+       esp->send_cmd_residual = 0;
+
        if (write)
                /* DMA receive */
                dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -529,7 +339,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
        scsi_esp_cmd(esp, ESP_CMD_DMA);
        zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
        zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-       zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
        scsi_esp_cmd(esp, cmd);
 }
@@ -539,18 +348,19 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
 static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
                        u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
        struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
        u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-       zep->error = 0;
        /* Use PIO if transferring message bytes to esp->command_block_dma */
        if (phase == ESP_MIP && addr == esp->command_block_dma) {
-               zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-                                       esp_count, dma_count, write, cmd);
+               esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+                                dma_count, write, cmd);
                return;
        }
 
+       esp->send_cmd_error = 0;
+       esp->send_cmd_residual = 0;
+
        if (write)
                /* DMA receive */
                dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -574,7 +384,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
        scsi_esp_cmd(esp, ESP_CMD_DMA);
        zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
        zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-       zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
        scsi_esp_cmd(esp, cmd);
 }
@@ -589,17 +398,18 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
        u8 phase = esp->sreg & ESP_STAT_PMASK;
        unsigned char *ctrl_data = &zep->ctrl_data;
 
-       zep->error = 0;
        /* Use PIO if transferring message bytes to esp->command_block_dma */
        if (phase == ESP_MIP && addr == esp->command_block_dma) {
-               zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-                                       esp_count, dma_count, write, cmd);
+               esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+                                dma_count, write, cmd);
                return;
        }
 
+       esp->send_cmd_error = 0;
+       esp->send_cmd_residual = 0;
+
        zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
        zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-       zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
        if (write) {
                /* DMA receive */
@@ -635,21 +445,21 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
 static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
                        u32 esp_count, u32 dma_count, int write, u8 cmd)
 {
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
        struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
        u8 phase = esp->sreg & ESP_STAT_PMASK;
 
-       zep->error = 0;
        /* Use PIO if transferring message bytes to esp->command_block_dma */
        if (phase == ESP_MIP && addr == esp->command_block_dma) {
-               zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-                                       esp_count, dma_count, write, cmd);
+               esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+                                dma_count, write, cmd);
                return;
        }
 
+       esp->send_cmd_error = 0;
+       esp->send_cmd_residual = 0;
+
        zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
        zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-       zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
        if (write) {
                /* DMA receive */
@@ -681,17 +491,18 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
        u8 phase = esp->sreg & ESP_STAT_PMASK;
        unsigned char *ctrl_data = &zep->ctrl_data;
 
-       zep->error = 0;
        /* Use PIO if transferring message bytes to esp->command_block_dma */
        if (phase == ESP_MIP && addr == esp->command_block_dma) {
-               zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
-                                       esp_count, dma_count, write, cmd);
+               esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+                                dma_count, write, cmd);
                return;
        }
 
+       esp->send_cmd_error = 0;
+       esp->send_cmd_residual = 0;
+
        zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
        zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
-       zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
 
        if (write) {
                /* DMA receive */
@@ -724,14 +535,7 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
 
 static int zorro_esp_dma_error(struct esp *esp)
 {
-       struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-
-       /* check for error in case we've been doing PIO */
-       if (zep->error == 1)
-               return 1;
-
-       /* do nothing - there seems to be no way to check for DMA errors */
-       return 0;
+       return esp->send_cmd_error;
 }
 
 /* per-board ESP driver ops */
@@ -739,10 +543,6 @@ static int zorro_esp_dma_error(struct esp *esp)
 static const struct esp_driver_ops blz1230_esp_ops = {
        .esp_write8             = zorro_esp_write8,
        .esp_read8              = zorro_esp_read8,
-       .map_single             = zorro_esp_map_single,
-       .map_sg                 = zorro_esp_map_sg,
-       .unmap_single           = zorro_esp_unmap_single,
-       .unmap_sg               = zorro_esp_unmap_sg,
        .irq_pending            = zorro_esp_irq_pending,
        .dma_length_limit       = zorro_esp_dma_length_limit,
        .reset_dma              = zorro_esp_reset_dma,
@@ -755,10 +555,6 @@ static const struct esp_driver_ops blz1230_esp_ops = {
 static const struct esp_driver_ops blz1230II_esp_ops = {
        .esp_write8             = zorro_esp_write8,
        .esp_read8              = zorro_esp_read8,
-       .map_single             = zorro_esp_map_single,
-       .map_sg                 = zorro_esp_map_sg,
-       .unmap_single           = zorro_esp_unmap_single,
-       .unmap_sg               = zorro_esp_unmap_sg,
        .irq_pending            = zorro_esp_irq_pending,
        .dma_length_limit       = zorro_esp_dma_length_limit,
        .reset_dma              = zorro_esp_reset_dma,
@@ -771,10 +567,6 @@ static const struct esp_driver_ops blz1230II_esp_ops = {
 static const struct esp_driver_ops blz2060_esp_ops = {
        .esp_write8             = zorro_esp_write8,
        .esp_read8              = zorro_esp_read8,
-       .map_single             = zorro_esp_map_single,
-       .map_sg                 = zorro_esp_map_sg,
-       .unmap_single           = zorro_esp_unmap_single,
-       .unmap_sg               = zorro_esp_unmap_sg,
        .irq_pending            = zorro_esp_irq_pending,
        .dma_length_limit       = zorro_esp_dma_length_limit,
        .reset_dma              = zorro_esp_reset_dma,
@@ -787,10 +579,6 @@ static const struct esp_driver_ops blz2060_esp_ops = {
 static const struct esp_driver_ops cyber_esp_ops = {
        .esp_write8             = zorro_esp_write8,
        .esp_read8              = zorro_esp_read8,
-       .map_single             = zorro_esp_map_single,
-       .map_sg                 = zorro_esp_map_sg,
-       .unmap_single           = zorro_esp_unmap_single,
-       .unmap_sg               = zorro_esp_unmap_sg,
        .irq_pending            = cyber_esp_irq_pending,
        .dma_length_limit       = zorro_esp_dma_length_limit,
        .reset_dma              = zorro_esp_reset_dma,
@@ -803,10 +591,6 @@ static const struct esp_driver_ops cyber_esp_ops = {
 static const struct esp_driver_ops cyberII_esp_ops = {
        .esp_write8             = zorro_esp_write8,
        .esp_read8              = zorro_esp_read8,
-       .map_single             = zorro_esp_map_single,
-       .map_sg                 = zorro_esp_map_sg,
-       .unmap_single           = zorro_esp_unmap_single,
-       .unmap_sg               = zorro_esp_unmap_sg,
        .irq_pending            = zorro_esp_irq_pending,
        .dma_length_limit       = zorro_esp_dma_length_limit,
        .reset_dma              = zorro_esp_reset_dma,
@@ -819,10 +603,6 @@ static const struct esp_driver_ops cyberII_esp_ops = {
 static const struct esp_driver_ops fastlane_esp_ops = {
        .esp_write8             = zorro_esp_write8,
        .esp_read8              = zorro_esp_read8,
-       .map_single             = zorro_esp_map_single,
-       .map_sg                 = zorro_esp_map_sg,
-       .unmap_single           = zorro_esp_unmap_single,
-       .unmap_sg               = zorro_esp_unmap_sg,
        .irq_pending            = fastlane_esp_irq_pending,
        .dma_length_limit       = zorro_esp_dma_length_limit,
        .reset_dma              = zorro_esp_reset_dma,
@@ -1039,6 +819,8 @@ static int zorro_esp_probe(struct zorro_dev *z,
                goto fail_unmap_fastlane;
        }
 
+       esp->fifo_reg = esp->regs + ESP_FDATA * 4;
+
        /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
        if (zdd->scsi_option) {
                zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
@@ -1082,7 +864,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
        }
 
        /* register the chip */
-       err = scsi_esp_register(esp, &z->dev);
+       err = scsi_esp_register(esp);
 
        if (err) {
                err = -ENOMEM;
index 8de1601..71888b9 100644 (file)
@@ -598,9 +598,12 @@ out:
        mutex_unlock(&cdev_list_lock);
 }
 
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
 void cxgbit_free_np(struct iscsi_np *np)
 {
        struct cxgbit_np *cnp = np->np_context;
+       struct cxgbit_sock *csk, *tmp;
 
        cnp->com.state = CSK_STATE_DEAD;
        if (cnp->com.cdev)
@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
        else
                cxgbit_free_all_np(cnp);
 
+       spin_lock_bh(&cnp->np_accept_lock);
+       list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+               list_del_init(&csk->accept_node);
+               __cxgbit_free_conn(csk);
+       }
+       spin_unlock_bh(&cnp->np_accept_lock);
+
        np->np_context = NULL;
        cxgbit_put_cnp(cnp);
 }
@@ -705,9 +715,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
                              csk->tid, 600, __func__);
 }
 
-void cxgbit_free_conn(struct iscsi_conn *conn)
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 {
-       struct cxgbit_sock *csk = conn->context;
+       struct iscsi_conn *conn = csk->conn;
        bool release = false;
 
        pr_debug("%s: state %d\n",
@@ -716,7 +726,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
        spin_lock_bh(&csk->lock);
        switch (csk->com.state) {
        case CSK_STATE_ESTABLISHED:
-               if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+               if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
                        csk->com.state = CSK_STATE_CLOSING;
                        cxgbit_send_halfclose(csk);
                } else {
@@ -741,6 +751,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
                cxgbit_put_csk(csk);
 }
 
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+       __cxgbit_free_conn(conn->context);
+}
+
 static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 {
        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +818,7 @@ void _cxgbit_free_csk(struct kref *kref)
        spin_unlock_bh(&cdev->cskq.lock);
 
        cxgbit_free_skb(csk);
+       cxgbit_put_cnp(csk->cnp);
        cxgbit_put_cdev(cdev);
 
        kfree(csk);
@@ -1351,6 +1367,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
+       cxgbit_get_cnp(cnp);
        cxgbit_get_cdev(cdev);
 
        spin_lock(&cdev->cskq.lock);
index cc756a1..c1d5a17 100644 (file)
@@ -4355,7 +4355,7 @@ int iscsit_close_session(struct iscsi_session *sess)
        transport_deregister_session(sess->se_sess);
 
        if (sess->sess_ops->ErrorRecoveryLevel == 2)
-               iscsit_free_connection_recovery_entires(sess);
+               iscsit_free_connection_recovery_entries(sess);
 
        iscsit_free_all_ooo_cmdsns(sess);
 
index 718fe9a..1193cf8 100644 (file)
@@ -770,21 +770,8 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
 
        pr_err("Time2Retain timer expired for SID: %u, cleaning up"
                        " iSCSI session.\n", sess->sid);
-       {
-       struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
-       if (tiqn) {
-               spin_lock(&tiqn->sess_err_stats.lock);
-               strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
-                       (void *)sess->sess_ops->InitiatorName);
-               tiqn->sess_err_stats.last_sess_failure_type =
-                               ISCSI_SESS_ERR_CXN_TIMEOUT;
-               tiqn->sess_err_stats.cxn_timeout_errors++;
-               atomic_long_inc(&sess->conn_timeout_errors);
-               spin_unlock(&tiqn->sess_err_stats.lock);
-       }
-       }
 
+       iscsit_fill_cxn_timeout_err_stats(sess);
        spin_unlock_bh(&se_tpg->session_lock);
        iscsit_close_session(sess);
 }
index 5efa42b..a211e81 100644 (file)
@@ -1169,15 +1169,21 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
        na = iscsit_tpg_get_node_attrib(sess);
 
        if (!sess->sess_ops->ErrorRecoveryLevel) {
-               pr_debug("Unable to recover from DataOut timeout while"
-                       " in ERL=0.\n");
+               pr_err("Unable to recover from DataOut timeout while"
+                       " in ERL=0, closing iSCSI connection for I_T Nexus"
+                       " %s,i,0x%6phN,%s,t,0x%02x\n",
+                       sess->sess_ops->InitiatorName, sess->isid,
+                       sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
                goto failure;
        }
 
        if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
-               pr_debug("Command ITT: 0x%08x exceeded max retries"
-                       " for DataOUT timeout %u, closing iSCSI connection.\n",
-                       cmd->init_task_tag, na->dataout_timeout_retries);
+               pr_err("Command ITT: 0x%08x exceeded max retries"
+                       " for DataOUT timeout %u, closing iSCSI connection for"
+                       " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+                       cmd->init_task_tag, na->dataout_timeout_retries,
+                       sess->sess_ops->InitiatorName, sess->isid,
+                       sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
                goto failure;
        }
 
@@ -1224,6 +1230,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
 
 failure:
        spin_unlock_bh(&cmd->dataout_timeout_lock);
+       iscsit_fill_cxn_timeout_err_stats(sess);
        iscsit_cause_connection_reinstatement(conn, 0);
        iscsit_dec_conn_usage_count(conn);
 }
index 8df9c90..b08b620 100644 (file)
@@ -125,7 +125,7 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
        return NULL;
 }
 
-void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
 {
        struct iscsi_cmd *cmd, *cmd_tmp;
        struct iscsi_conn_recovery *cr, *cr_tmp;
index 93e180d..a39b0ca 100644 (file)
@@ -13,7 +13,7 @@ extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32
 extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
 extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
                        struct iscsi_session *, u16);
-extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
 extern int iscsit_remove_active_connection_recovery_entry(
                        struct iscsi_conn_recovery *, struct iscsi_session *);
 extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
index bb90c80..ae3209e 100644 (file)
@@ -578,7 +578,7 @@ int iscsi_login_post_auth_non_zero_tsih(
        }
 
        /*
-        * Check for any connection recovery entires containing CID.
+        * Check for any connection recovery entries containing CID.
         * We use the original ExpStatSN sent in the first login request
         * to acknowledge commands for the failed connection.
         *
index df0a398..bb98882 100644 (file)
@@ -328,10 +328,10 @@ static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
 {
        struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
        struct iscsi_login_stats *lstat = &tiqn->login_stats;
-       unsigned char buf[224];
+       unsigned char buf[ISCSI_IQN_LEN];
 
        spin_lock(&lstat->lock);
-       snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+       snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
                                lstat->last_intr_fail_name : NONE);
        spin_unlock(&lstat->lock);
 
index 49be1e4..1227872 100644 (file)
@@ -915,6 +915,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
 void iscsit_handle_nopin_response_timeout(struct timer_list *t)
 {
        struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
+       struct iscsi_session *sess = conn->sess;
 
        iscsit_inc_conn_usage_count(conn);
 
@@ -925,28 +926,14 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
                return;
        }
 
-       pr_debug("Did not receive response to NOPIN on CID: %hu on"
-               " SID: %u, failing connection.\n", conn->cid,
-                       conn->sess->sid);
+       pr_err("Did not receive response to NOPIN on CID: %hu, failing"
+               " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+               conn->cid, sess->sess_ops->InitiatorName, sess->isid,
+               sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
        conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
        spin_unlock_bh(&conn->nopin_timer_lock);
 
-       {
-       struct iscsi_portal_group *tpg = conn->sess->tpg;
-       struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
-       if (tiqn) {
-               spin_lock_bh(&tiqn->sess_err_stats.lock);
-               strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
-                               conn->sess->sess_ops->InitiatorName);
-               tiqn->sess_err_stats.last_sess_failure_type =
-                               ISCSI_SESS_ERR_CXN_TIMEOUT;
-               tiqn->sess_err_stats.cxn_timeout_errors++;
-               atomic_long_inc(&conn->sess->conn_timeout_errors);
-               spin_unlock_bh(&tiqn->sess_err_stats.lock);
-       }
-       }
-
+       iscsit_fill_cxn_timeout_err_stats(sess);
        iscsit_cause_connection_reinstatement(conn, 0);
        iscsit_dec_conn_usage_count(conn);
 }
@@ -1405,3 +1392,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
 
        return tpg->tpg_tiqn;
 }
+
+void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
+{
+       struct iscsi_portal_group *tpg = sess->tpg;
+       struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+       if (!tiqn)
+               return;
+
+       spin_lock_bh(&tiqn->sess_err_stats.lock);
+       strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+                       sess->sess_ops->InitiatorName,
+                       sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
+       tiqn->sess_err_stats.last_sess_failure_type =
+                       ISCSI_SESS_ERR_CXN_TIMEOUT;
+       tiqn->sess_err_stats.cxn_timeout_errors++;
+       atomic_long_inc(&sess->conn_timeout_errors);
+       spin_unlock_bh(&tiqn->sess_err_stats.lock);
+}
index d66dfc2..68e8480 100644 (file)
@@ -67,5 +67,6 @@ extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
 extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
 extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
 extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
 
 #endif /*** ISCSI_TARGET_UTIL_H ***/
index ce1321a..b5ed9c3 100644 (file)
@@ -514,7 +514,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
                }
 
                /* Always in 512 byte units for Linux/Block */
-               block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+               block_lba += sg->length >> SECTOR_SHIFT;
                sectors -= 1;
        }
 
@@ -635,14 +635,15 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
 }
 
 static int
-iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+                struct sg_mapping_iter *miter)
 {
        struct se_device *dev = cmd->se_dev;
        struct blk_integrity *bi;
        struct bio_integrity_payload *bip;
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
-       struct scatterlist *sg;
-       int i, rc;
+       int rc;
+       size_t resid, len;
 
        bi = bdev_get_integrity(ib_dev->ibd_bd);
        if (!bi) {
@@ -650,31 +651,39 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
                return -ENODEV;
        }
 
-       bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+       bip = bio_integrity_alloc(bio, GFP_NOIO,
+                       min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
        if (IS_ERR(bip)) {
                pr_err("Unable to allocate bio_integrity_payload\n");
                return PTR_ERR(bip);
        }
 
-       bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
-                        dev->prot_length;
-       bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+       bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+       bip_set_seed(bip, bio->bi_iter.bi_sector);
 
        pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
                 (unsigned long long)bip->bip_iter.bi_sector);
 
-       for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+       resid = bip->bip_iter.bi_size;
+       while (resid > 0 && sg_miter_next(miter)) {
 
-               rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
-                                           sg->offset);
-               if (rc != sg->length) {
+               len = min_t(size_t, miter->length, resid);
+               rc = bio_integrity_add_page(bio, miter->page, len,
+                                           offset_in_page(miter->addr));
+               if (rc != len) {
                        pr_err("bio_integrity_add_page() failed; %d\n", rc);
+                       sg_miter_stop(miter);
                        return -ENOMEM;
                }
 
-               pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
-                        sg_page(sg), sg->length, sg->offset);
+               pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
+                         miter->page, len, offset_in_page(miter->addr));
+
+               resid -= len;
+               if (len < miter->length)
+                       miter->consumed -= miter->length - len;
        }
+       sg_miter_stop(miter);
 
        return 0;
 }
@@ -686,12 +695,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        struct se_device *dev = cmd->se_dev;
        sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
        struct iblock_req *ibr;
-       struct bio *bio, *bio_start;
+       struct bio *bio;
        struct bio_list list;
        struct scatterlist *sg;
        u32 sg_num = sgl_nents;
        unsigned bio_cnt;
-       int i, op, op_flags = 0;
+       int i, rc, op, op_flags = 0;
+       struct sg_mapping_iter prot_miter;
 
        if (data_direction == DMA_TO_DEVICE) {
                struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -726,13 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
        if (!bio)
                goto fail_free_ibr;
 
-       bio_start = bio;
        bio_list_init(&list);
        bio_list_add(&list, bio);
 
        refcount_set(&ibr->pending, 2);
        bio_cnt = 1;
 
+       if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
+               sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
+                              op == REQ_OP_READ ? SG_MITER_FROM_SG :
+                                                  SG_MITER_TO_SG);
+
        for_each_sg(sgl, sg, sgl_nents, i) {
                /*
                 * XXX: if the length the device accepts is shorter than the
@@ -741,6 +755,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                 */
                while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
                                != sg->length) {
+                       if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+                               rc = iblock_alloc_bip(cmd, bio, &prot_miter);
+                               if (rc)
+                                       goto fail_put_bios;
+                       }
+
                        if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
                                iblock_submit_bios(&list);
                                bio_cnt = 0;
@@ -757,12 +777,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                }
 
                /* Always in 512 byte units for Linux/Block */
-               block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+               block_lba += sg->length >> SECTOR_SHIFT;
                sg_num--;
        }
 
        if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
-               int rc = iblock_alloc_bip(cmd, bio_start);
+               rc = iblock_alloc_bip(cmd, bio, &prot_miter);
                if (rc)
                        goto fail_put_bios;
        }
index 9cc3843..cefc641 100644 (file)
@@ -9,7 +9,6 @@
 #define IBLOCK_VERSION         "4.0"
 
 #define IBLOCK_MAX_CDBS                16
-#define IBLOCK_LBA_SHIFT       9
 
 struct iblock_req {
        refcount_t pending;
index ebac2b4..1ac1f7d 100644 (file)
@@ -360,6 +360,10 @@ static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
        unsigned int offset;
        sense_reason_t ret = TCM_NO_SENSE;
        int i, count;
+
+       if (!success)
+               return 0;
+
        /*
         * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
         *
@@ -425,14 +429,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
        struct se_device *dev = cmd->se_dev;
        sense_reason_t ret = TCM_NO_SENSE;
 
-       /*
-        * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
-        * within target_complete_ok_work() if the command was successfully
-        * sent to the backend driver.
-        */
        spin_lock_irq(&cmd->t_state_lock);
-       if (cmd->transport_state & CMD_T_SENT) {
-               cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+       if (success) {
                *post_ret = 1;
 
                if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
@@ -453,7 +451,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
                                                 int *post_ret)
 {
        struct se_device *dev = cmd->se_dev;
-       struct scatterlist *write_sg = NULL, *sg;
+       struct sg_table write_tbl = { };
+       struct scatterlist *write_sg, *sg;
        unsigned char *buf = NULL, *addr;
        struct sg_mapping_iter m;
        unsigned int offset = 0, len;
@@ -494,14 +493,12 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
                goto out;
        }
 
-       write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg),
-                                GFP_KERNEL);
-       if (!write_sg) {
+       if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
                pr_err("Unable to allocate compare_and_write sg\n");
                ret = TCM_OUT_OF_RESOURCES;
                goto out;
        }
-       sg_init_table(write_sg, cmd->t_data_nents);
+       write_sg = write_tbl.sgl;
        /*
         * Setup verify and write data payloads from total NumberLBAs.
         */
@@ -597,7 +594,7 @@ out:
         * sbc_compare_and_write() before the original READ I/O submission.
         */
        up(&dev->caw_sem);
-       kfree(write_sg);
+       sg_free_table(&write_tbl);
        kfree(buf);
        return ret;
 }
index 86c0156..4cf33e2 100644 (file)
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
 void transport_generic_request_failure(struct se_cmd *cmd,
                sense_reason_t sense_reason)
 {
-       int ret = 0, post_ret = 0;
+       int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
                 sense_reason);
@@ -1789,13 +1789,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
         */
        transport_complete_task_attr(cmd);
 
-       /*
-        * Handle special case for COMPARE_AND_WRITE failure, where the
-        * callback is expected to drop the per device ->caw_sem.
-        */
-       if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
-            cmd->transport_complete_callback)
-               cmd->transport_complete_callback(cmd, false, &post_ret);
+       if (cmd->transport_complete_callback)
+               cmd->transport_complete_callback(cmd, false, NULL);
 
        if (transport_check_aborted_status(cmd, 1))
                return;
@@ -2012,7 +2007,7 @@ void target_execute_cmd(struct se_cmd *cmd)
         * Determine if frontend context caller is requesting the stopping of
         * this command for frontend exceptions.
         *
-        * If the received CDB has aleady been aborted stop processing it here.
+        * If the received CDB has already been aborted stop processing it here.
         */
        spin_lock_irq(&cmd->t_state_lock);
        if (__transport_check_aborted_status(cmd, 1)) {
@@ -2516,7 +2511,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
        }
 
        /*
-        * Determine is the TCM fabric module has already allocated physical
+        * Determine if the TCM fabric module has already allocated physical
         * memory, and is directly calling transport_generic_map_mem_to_cmd()
         * beforehand.
         */
@@ -2754,7 +2749,7 @@ static void target_release_cmd_kref(struct kref *kref)
        if (se_sess) {
                spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
                list_del_init(&se_cmd->se_cmd_list);
-               if (list_empty(&se_sess->sess_cmd_list))
+               if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
                        wake_up(&se_sess->cmd_list_wq);
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
        }
@@ -2907,7 +2902,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
 
        spin_lock_irq(&se_sess->sess_cmd_lock);
        do {
-               ret = wait_event_interruptible_lock_irq_timeout(
+               ret = wait_event_lock_irq_timeout(
                                se_sess->cmd_list_wq,
                                list_empty(&se_sess->sess_cmd_list),
                                se_sess->sess_cmd_lock, 180 * HZ);
index 2718a93..70adcfd 100644 (file)
@@ -391,7 +391,6 @@ out:
 struct xcopy_pt_cmd {
        bool remote_port;
        struct se_cmd se_cmd;
-       struct xcopy_op *xcopy_op;
        struct completion xpt_passthrough_sem;
        unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
 };
@@ -596,8 +595,6 @@ static int target_xcopy_setup_pt_cmd(
         * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
         */
        target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
-
-       xpt_cmd->xcopy_op = xop;
        target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
 
        cmd->tag = 0;
index d9f131e..ed7c122 100644 (file)
@@ -1052,10 +1052,9 @@ do {                                                                             \
        __ret;                                                                  \
 })
 
-#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition,                \
-                                                   lock, timeout)              \
+#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state)        \
        ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
-                     TASK_INTERRUPTIBLE, 0, timeout,                           \
+                     state, 0, timeout,                                        \
                      spin_unlock_irq(&lock);                                   \
                      __ret = schedule_timeout(__ret);                          \
                      spin_lock_irq(&lock));
@@ -1089,8 +1088,19 @@ do {                                                                             \
 ({                                                                             \
        long __ret = timeout;                                                   \
        if (!___wait_cond_timeout(condition))                                   \
-               __ret = __wait_event_interruptible_lock_irq_timeout(            \
-                                       wq_head, condition, lock, timeout);     \
+               __ret = __wait_event_lock_irq_timeout(                          \
+                                       wq_head, condition, lock, timeout,      \
+                                       TASK_INTERRUPTIBLE);                    \
+       __ret;                                                                  \
+})
+
+#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout)         \
+({                                                                             \
+       long __ret = timeout;                                                   \
+       if (!___wait_cond_timeout(condition))                                   \
+               __ret = __wait_event_lock_irq_timeout(                          \
+                                       wq_head, condition, lock, timeout,      \
+                                       TASK_UNINTERRUPTIBLE);                  \
        __ret;                                                                  \
 })
 
index f2e6abe..24c398f 100644 (file)
@@ -25,6 +25,7 @@ struct sock;
 #define ISCSIT_TCP_BACKLOG             256
 #define ISCSI_RX_THREAD_NAME           "iscsi_trx"
 #define ISCSI_TX_THREAD_NAME           "iscsi_ttx"
+#define ISCSI_IQN_LEN                  224
 
 /* struct iscsi_node_attrib sanity values */
 #define NA_DATAOUT_TIMEOUT             3
@@ -270,9 +271,9 @@ struct iscsi_conn_ops {
 };
 
 struct iscsi_sess_ops {
-       char    InitiatorName[224];
+       char    InitiatorName[ISCSI_IQN_LEN];
        char    InitiatorAlias[256];
-       char    TargetName[224];
+       char    TargetName[ISCSI_IQN_LEN];
        char    TargetAlias[256];
        char    TargetAddress[256];
        u16     TargetPortalGroupTag;           /* [0..65535] */
@@ -855,7 +856,6 @@ struct iscsi_wwn_stat_grps {
 };
 
 struct iscsi_tiqn {
-#define ISCSI_IQN_LEN                          224
        unsigned char           tiqn[ISCSI_IQN_LEN];
        enum tiqn_state_table   tiqn_state;
        int                     tiqn_access_count;
index 4d75a2c..ff6a472 100644 (file)
@@ -33,7 +33,7 @@ struct iscsi_sess_err_stats {
        u32             cxn_timeout_errors;
        u32             pdu_format_errors;
        u32             last_sess_failure_type;
-       char            last_sess_fail_rem_name[224];
+       char            last_sess_fail_rem_name[ISCSI_IQN_LEN];
 } ____cacheline_aligned;
 
 /* iSCSI login failure types (sub oids) */
@@ -56,7 +56,7 @@ struct iscsi_login_stats {
        u32             last_fail_type;
        int             last_intr_fail_ip_family;
        struct sockaddr_storage last_intr_fail_sockaddr;
-       char            last_intr_fail_name[224];
+       char            last_intr_fail_name[ISCSI_IQN_LEN];
 } ____cacheline_aligned;
 
 /* iSCSI logout stats */
index 7a4ee78..e3bdb05 100644 (file)
@@ -138,7 +138,6 @@ enum se_cmd_flags_table {
        SCF_ALUA_NON_OPTIMIZED          = 0x00008000,
        SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
        SCF_COMPARE_AND_WRITE           = 0x00080000,
-       SCF_COMPARE_AND_WRITE_POST      = 0x00100000,
        SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
        SCF_ACK_KREF                    = 0x00400000,
        SCF_USE_CPUID                   = 0x00800000,
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
new file mode 100644 (file)
index 0000000..17c7abd
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * UFS Transport SGIO v4 BSG Message Support
+ *
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef SCSI_BSG_UFS_H
+#define SCSI_BSG_UFS_H
+
+#include <linux/types.h>
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+#define UFS_CDB_SIZE   16
+#define UPIU_TRANSACTION_UIC_CMD 0x1F
+/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
+#define UIC_CMD_SIZE (sizeof(__u32) * 4)
+
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+       __be32 dword_0;
+       __be32 dword_1;
+       __be32 dword_2;
+};
+
+/**
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+       __u8 opcode;
+       __u8 idn;
+       __u8 index;
+       __u8 selector;
+       __be16 reserved_osf;
+       __be16 length;
+       __be32 value;
+       __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+       __be32 exp_data_transfer_len;
+       __u8 cdb[UFS_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+       struct utp_upiu_header header;
+       union {
+               struct utp_upiu_cmd             sc;
+               struct utp_upiu_query           qr;
+               struct utp_upiu_query           tr;
+               /* use utp_upiu_query to host the 4 dwords of uic command */
+               struct utp_upiu_query           uc;
+       };
+};
+
+/* request (CDB) structure of the sg_io_v4 */
+struct ufs_bsg_request {
+       __u32 msgcode;
+       struct utp_upiu_req upiu_req;
+};
+
+/* response (request sense data) structure of the sg_io_v4 */
+struct ufs_bsg_reply {
+       /*
+        * The completion result. Result exists in two forms:
+        * if negative, it is an -Exxx system errno value. There will
+        * be no further reply information supplied.
+        * else, it's the 4-byte scsi error result, with driver, host,
+        * msg and status fields. The per-msgcode reply structure
+        * will contain valid data.
+        */
+       __u32 result;
+
+       /* If there was reply_payload, how much was received? */
+       __u32 reply_payload_rcv_len;
+
+       struct utp_upiu_req upiu_rsp;
+};
+#endif /* UFS_BSG_H */