Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Feb 2017 19:51:42 +0000 (11:51 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 21 Feb 2017 19:51:42 +0000 (11:51 -0800)
Pull SCSI updates from James Bottomley:
 "This update includes the usual round of major driver updates (ncr5380,
  ufs, lpfc, be2iscsi, hisi_sas, storvsc, cxlflash, aacraid,
  megaraid_sas, ...).

  There's also an assortment of minor fixes and the major update of
  switching a bunch of drivers to pci_alloc_irq_vectors from Christoph"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (188 commits)
  scsi: megaraid_sas: handle dma_addr_t right on 32-bit
  scsi: megaraid_sas: array overflow in megasas_dump_frame()
  scsi: snic: switch to pci_irq_alloc_vectors
  scsi: megaraid_sas: driver version upgrade
  scsi: megaraid_sas: Change RAID_1_10_RMW_CMDS to RAID_1_PEER_CMDS and set value to 2
  scsi: megaraid_sas: Indentation and smatch warning fixes
  scsi: megaraid_sas: Cleanup VD_EXT_DEBUG and SPAN_DEBUG related debug prints
  scsi: megaraid_sas: Increase internal command pool
  scsi: megaraid_sas: Use synchronize_irq to wait for IRQs to complete
  scsi: megaraid_sas: Bail out the driver load if ld_list_query fails
  scsi: megaraid_sas: Change build_mpt_mfi_pass_thru to return void
  scsi: megaraid_sas: During OCR, if get_ctrl_info fails do not continue with OCR
  scsi: megaraid_sas: Do not set fp_possible if TM capable for non-RW syspdIO, change fp_possible to bool
  scsi: megaraid_sas: Remove unused pd_index from megasas_build_ld_nonrw_fusion
  scsi: megaraid_sas: megasas_return_cmd does not memset IO frame to zero
  scsi: megaraid_sas: max_fw_cmds are decremented twice, remove duplicate
  scsi: megaraid_sas: update can_queue only if the new value is less
  scsi: megaraid_sas: Change max_cmd from u32 to u16 in all functions
  scsi: megaraid_sas: set pd_after_lb from MR_BuildRaidContext and initialize pDevHandle to MR_DEVHANDLE_INVALID
  scsi: megaraid_sas: latest controller OCR capability from FW before sending shutdown DCMD
  ...

129 files changed:
drivers/ata/libata-eh.c
drivers/ata/libata-transport.c
drivers/ata/libata.h
drivers/block/cciss.h
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptlan.h
drivers/message/fusion/mptsas.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/aacraid/aachba.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commctrl.c
drivers/scsi/aacraid/comminit.c
drivers/scsi/aacraid/commsup.c
drivers/scsi/aacraid/dpcsup.c
drivers/scsi/aacraid/linit.c
drivers/scsi/aacraid/nark.c
drivers/scsi/aacraid/rkt.c
drivers/scsi/aacraid/rx.c
drivers/scsi/aacraid/sa.c
drivers/scsi/aacraid/src.c
drivers/scsi/atari_scsi.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bfa/bfa_fcs.c
drivers/scsi/bfa/bfa_fcs.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxlflash/common.h
drivers/scsi/cxlflash/lunmgt.c
drivers/scsi/cxlflash/main.c
drivers/scsi/cxlflash/sislite.h
drivers/scsi/cxlflash/superpipe.c
drivers/scsi/cxlflash/vlun.c
drivers/scsi/dpt_i2o.c
drivers/scsi/esas2r/esas2r_init.c
drivers/scsi/esas2r/esas2r_ioctl.c
drivers/scsi/esas2r/esas2r_log.h
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/g_NCR5380.c
drivers/scsi/g_NCR5380.h [deleted file]
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/iscsi_tcp.c
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_init.c
drivers/scsi/libsas/sas_internal.h
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mac_scsi.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/megaraid/megaraid_sas_fusion.h
drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mvumi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pm8001/pm8001_sas.h
drivers/scsi/pmcraid.c
drivers/scsi/pmcraid.h
drivers/scsi/qedi/qedi_dbg.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/sd.c
drivers/scsi/snic/snic.h
drivers/scsi/snic/snic_isr.c
drivers/scsi/storvsc_drv.c
drivers/scsi/sun3_scsi.c
drivers/scsi/sun3_scsi.h [deleted file]
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufs-qcom.h
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/scsi/vmw_pvscsi.c
drivers/scsi/vmw_pvscsi.h
include/linux/libata.h
include/scsi/libiscsi.h
include/scsi/scsi.h
include/scsi/scsi_transport.h
include/scsi/scsi_transport_fc.h
include/scsi/scsi_transport_srp.h
include/trace/events/ufs.h [new file with mode: 0644]
include/uapi/scsi/cxlflash_ioctl.h

index 0e1ec37..50ee10d 100644 (file)
@@ -549,6 +549,7 @@ enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
        DPRINTK("EXIT, ret=%d\n", ret);
        return ret;
 }
+EXPORT_SYMBOL(ata_scsi_timed_out);
 
 static void ata_eh_unload(struct ata_port *ap)
 {
index 7ef16c0..4669823 100644 (file)
@@ -716,7 +716,6 @@ struct scsi_transport_template *ata_attach_transport(void)
                return NULL;
 
        i->t.eh_strategy_handler        = ata_scsi_error;
-       i->t.eh_timed_out               = ata_scsi_timed_out;
        i->t.user_scan                  = ata_scsi_user_scan;
 
        i->t.host_attrs.ac.attrs = &i->port_attrs[0];
index 8f3a559..06d479d 100644 (file)
@@ -159,7 +159,6 @@ extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
 extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
 extern void ata_eh_acquire(struct ata_port *ap);
 extern void ata_eh_release(struct ata_port *ap);
-extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern void ata_scsi_error(struct Scsi_Host *host);
 extern void ata_eh_fastdrain_timerfn(unsigned long arg);
 extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
index 4affa94..24b5fd7 100644 (file)
@@ -400,27 +400,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
 }
 
 static struct access_method SA5_access = {
-       SA5_submit_command,
-       SA5_intr_mask,
-       SA5_fifo_full,
-       SA5_intr_pending,
-       SA5_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_intr_pending,
+       .command_completed = SA5_completed,
 };
 
 static struct access_method SA5B_access = {
-        SA5_submit_command,
-        SA5B_intr_mask,
-        SA5_fifo_full,
-        SA5B_intr_pending,
-        SA5_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5B_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5B_intr_pending,
+       .command_completed = SA5_completed,
 };
 
 static struct access_method SA5_performant_access = {
-       SA5_submit_command,
-       SA5_performant_intr_mask,
-       SA5_fifo_full,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .fifo_full = SA5_fifo_full,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 struct board_type {
index e71af71..30a6985 100644 (file)
@@ -994,6 +994,7 @@ static struct scsi_host_template iscsi_iser_sht = {
        .change_queue_depth     = scsi_change_queue_depth,
        .sg_tablesize           = ISCSI_ISER_DEF_SG_TABLESIZE,
        .cmd_per_lun            = ISER_DEF_CMD_PER_LUN,
+       .eh_timed_out           = iscsi_eh_cmd_timed_out,
        .eh_abort_handler       = iscsi_eh_abort,
        .eh_device_reset_handler= iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
index 79bf484..36529e3 100644 (file)
@@ -2869,6 +2869,7 @@ static struct scsi_host_template srp_template = {
        .info                           = srp_target_info,
        .queuecommand                   = srp_queuecommand,
        .change_queue_depth             = srp_change_queue_depth,
+       .eh_timed_out                   = srp_timed_out,
        .eh_abort_handler               = srp_abort,
        .eh_device_reset_handler        = srp_reset_device,
        .eh_host_reset_handler          = srp_reset_host,
index add6a3a..98eafae 100644 (file)
@@ -119,6 +119,7 @@ static struct scsi_host_template mptfc_driver_template = {
        .target_destroy                 = mptfc_target_destroy,
        .slave_destroy                  = mptscsih_slave_destroy,
        .change_queue_depth             = mptscsih_change_queue_depth,
+       .eh_timed_out                   = fc_eh_timed_out,
        .eh_abort_handler               = mptfc_abort,
        .eh_device_reset_handler        = mptfc_dev_reset,
        .eh_bus_reset_handler           = mptfc_bus_reset,
index 8946e19..8a24494 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
-#include <linux/miscdevice.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
index b8c4b2b..f6308ad 100644 (file)
@@ -1983,6 +1983,7 @@ static struct scsi_host_template mptsas_driver_template = {
        .target_destroy                 = mptsas_target_destroy,
        .slave_destroy                  = mptscsih_slave_destroy,
        .change_queue_depth             = mptscsih_change_queue_depth,
+       .eh_timed_out                   = mptsas_eh_timed_out,
        .eh_abort_handler               = mptscsih_abort,
        .eh_device_reset_handler        = mptscsih_dev_reset,
        .eh_host_reset_handler          = mptscsih_host_reset,
@@ -5398,7 +5399,6 @@ mptsas_init(void)
            sas_attach_transport(&mptsas_transport_functions);
        if (!mptsas_transport_template)
                return -ENODEV;
-       mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
 
        mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
            "mptscsih_io_done");
index 07ffdbb..0678cf7 100644 (file)
@@ -330,6 +330,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
        .module                  = THIS_MODULE,
        .name                    = "zfcp",
        .queuecommand            = zfcp_scsi_queuecommand,
+       .eh_timed_out            = fc_eh_timed_out,
        .eh_abort_handler        = zfcp_scsi_eh_abort_handler,
        .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
        .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
index 4f5ca79..acc3344 100644 (file)
  * of chips.  To use it, you write an architecture specific functions
  * and macros and include this file in your driver.
  *
- * These macros control options :
- * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
- * for commands that return with a CHECK CONDITION status.
- *
- * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
- * transceivers.
- *
- * PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
- *
- * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
- *
  * These macros MUST be defined :
  *
  * NCR5380_read(register)  - read from the specified register
@@ -347,7 +336,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
 #endif
 
 /**
- * NCR58380_info - report driver and host information
+ * NCR5380_info - report driver and host information
  * @instance: relevant scsi host instance
  *
  * For use as the host template info() handler.
@@ -360,33 +349,6 @@ static const char *NCR5380_info(struct Scsi_Host *instance)
        return hostdata->info;
 }
 
-static void prepare_info(struct Scsi_Host *instance)
-{
-       struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-       snprintf(hostdata->info, sizeof(hostdata->info),
-                "%s, irq %d, "
-                "io_port 0x%lx, base 0x%lx, "
-                "can_queue %d, cmd_per_lun %d, "
-                "sg_tablesize %d, this_id %d, "
-                "flags { %s%s%s}, "
-                "options { %s} ",
-                instance->hostt->name, instance->irq,
-                hostdata->io_port, hostdata->base,
-                instance->can_queue, instance->cmd_per_lun,
-                instance->sg_tablesize, instance->this_id,
-                hostdata->flags & FLAG_DMA_FIXUP     ? "DMA_FIXUP "     : "",
-                hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
-                hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY "  : "",
-#ifdef DIFFERENTIAL
-                "DIFFERENTIAL "
-#endif
-#ifdef PARITY
-                "PARITY "
-#endif
-                "");
-}
-
 /**
  * NCR5380_init - initialise an NCR5380
  * @instance: adapter to configure
@@ -436,7 +398,14 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
        if (!hostdata->work_q)
                return -ENOMEM;
 
-       prepare_info(instance);
+       snprintf(hostdata->info, sizeof(hostdata->info),
+               "%s, irq %d, io_port 0x%lx, base 0x%lx, can_queue %d, cmd_per_lun %d, sg_tablesize %d, this_id %d, flags { %s%s%s}",
+               instance->hostt->name, instance->irq, hostdata->io_port,
+               hostdata->base, instance->can_queue, instance->cmd_per_lun,
+               instance->sg_tablesize, instance->this_id,
+               hostdata->flags & FLAG_DMA_FIXUP     ? "DMA_FIXUP "     : "",
+               hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
+               hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "");
 
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
        NCR5380_write(MODE_REG, MR_BASE);
@@ -622,8 +591,9 @@ static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
            list_empty(&hostdata->unissued) &&
            list_empty(&hostdata->autosense) &&
            !hostdata->connected &&
-           !hostdata->selecting)
+           !hostdata->selecting) {
                NCR5380_release_dma_irq(instance);
+       }
 }
 
 /**
@@ -962,6 +932,7 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
 
 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
                                         struct scsi_cmnd *cmd)
+       __releases(&hostdata->lock) __acquires(&hostdata->lock)
 {
        struct NCR5380_hostdata *hostdata = shost_priv(instance);
        unsigned char tmp[3], phase;
@@ -1194,8 +1165,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
        data = tmp;
        phase = PHASE_MSGOUT;
        NCR5380_transfer_pio(instance, &phase, &len, &data);
+       if (len) {
+               NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+               cmd->result = DID_ERROR << 16;
+               complete_cmd(instance, cmd);
+               dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
+               cmd = NULL;
+               goto out;
+       }
+
        dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n");
-       /* XXX need to handle errors here */
 
        hostdata->connected = cmd;
        hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
@@ -1654,6 +1633,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
  */
 
 static void NCR5380_information_transfer(struct Scsi_Host *instance)
+       __releases(&hostdata->lock) __acquires(&hostdata->lock)
 {
        struct NCR5380_hostdata *hostdata = shost_priv(instance);
        unsigned char msgout = NOP;
index 51a3567..d78f095 100644 (file)
 #define ICR_ASSERT_ATN         0x02    /* rw Set to assert ATN */
 #define ICR_ASSERT_DATA                0x01    /* rw SCSI_DATA_REG is asserted */
 
-#ifdef DIFFERENTIAL
-#define ICR_BASE               ICR_DIFF_ENABLE
-#else
 #define ICR_BASE               0
-#endif
 
 #define MODE_REG               2
 /*
 #define MR_DMA_MODE            0x02    /* rw DMA / pseudo DMA mode */
 #define MR_ARBITRATE           0x01    /* rw start arbitration */
 
-#ifdef PARITY
-#define MR_BASE                        MR_ENABLE_PAR_CHECK
-#else
 #define MR_BASE                        0
-#endif
 
 #define TARGET_COMMAND_REG     3
 #define TCR_LAST_BYTE_SENT     0x80    /* ro DMA done */
 #define CSR_SCSI_BUF_RDY       0x02    /* ro  SCSI buffer read */
 #define CSR_GATED_53C80_IRQ    0x01    /* ro  Last block xferred */
 
-#if 0
-#define CSR_BASE CSR_SCSI_BUFF_INTR | CSR_53C80_INTR
-#else
 #define CSR_BASE CSR_53C80_INTR
-#endif
 
 /* Note : PHASE_* macros are based on the values of the STATUS register */
 #define PHASE_MASK     (SR_MSG | SR_CD | SR_IO)
@@ -234,11 +222,9 @@ struct NCR5380_hostdata {
        unsigned char id_higher_mask;           /* All bits above id_mask */
        unsigned char last_message;             /* Last Message Out */
        unsigned long region_size;              /* Size of address/port range */
-       char info[256];
+       char info[168];                         /* Host banner message */
 };
 
-#ifdef __KERNEL__
-
 struct NCR5380_cmd {
        struct list_head list;
 };
@@ -331,5 +317,4 @@ static inline int NCR5380_dma_residual_none(struct NCR5380_hostdata *hostdata)
        return 0;
 }
 
-#endif                         /* __KERNEL__ */
 #endif                         /* NCR5380_H */
index 1ee7c65..907f1e8 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * along with this program; see the file COPYING.  If not, write to
  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  *
+ * Module Name:
+ *  aachba.c
+ *
+ * Abstract: Contains Interfaces to manage IOs.
+ *
  */
 
 #include <linux/kernel.h>
@@ -62,6 +68,7 @@
 #define SENCODE_END_OF_DATA                    0x00
 #define SENCODE_BECOMING_READY                 0x04
 #define SENCODE_INIT_CMD_REQUIRED              0x04
+#define SENCODE_UNRECOVERED_READ_ERROR         0x11
 #define SENCODE_PARAM_LIST_LENGTH_ERROR                0x1A
 #define SENCODE_INVALID_COMMAND                        0x20
 #define SENCODE_LBA_OUT_OF_RANGE               0x21
 #define ASENCODE_LUN_FAILED_SELF_CONFIG                0x00
 #define ASENCODE_OVERLAPPED_COMMAND            0x00
 
+#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
+
 #define BYTE0(x) (unsigned char)(x)
 #define BYTE1(x) (unsigned char)((x) >> 8)
 #define BYTE2(x) (unsigned char)((x) >> 16)
@@ -164,46 +173,56 @@ struct inquiry_data {
 };
 
 /* Added for VPD 0x83 */
-typedef struct {
-       u8 CodeSet:4;   /* VPD_CODE_SET */
-       u8 Reserved:4;
-       u8 IdentifierType:4;    /* VPD_IDENTIFIER_TYPE */
-       u8 Reserved2:4;
-       u8 Reserved3;
-       u8 IdentifierLength;
-       u8 VendId[8];
-       u8 ProductId[16];
-       u8 SerialNumber[8];     /* SN in ASCII */
-
-} TVPD_ID_Descriptor_Type_1;
+struct  tvpd_id_descriptor_type_1 {
+       u8 codeset:4;           /* VPD_CODE_SET */
+       u8 reserved:4;
+       u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
+       u8 reserved2:4;
+       u8 reserved3;
+       u8 identifierlength;
+       u8 venid[8];
+       u8 productid[16];
+       u8 serialnumber[8];     /* SN in ASCII */
 
-typedef struct {
-       u8 CodeSet:4;   /* VPD_CODE_SET */
-       u8 Reserved:4;
-       u8 IdentifierType:4;    /* VPD_IDENTIFIER_TYPE */
-       u8 Reserved2:4;
-       u8 Reserved3;
-       u8 IdentifierLength;
-       struct TEU64Id {
+};
+
+struct tvpd_id_descriptor_type_2 {
+       u8 codeset:4;           /* VPD_CODE_SET */
+       u8 reserved:4;
+       u8 identifiertype:4;    /* VPD_IDENTIFIER_TYPE */
+       u8 reserved2:4;
+       u8 reserved3;
+       u8 identifierlength;
+       struct teu64id {
                u32 Serial;
                 /* The serial number supposed to be 40 bits,
                  * bit we only support 32, so make the last byte zero. */
-               u8 Reserved;
-               u8 VendId[3];
-       } EU64Id;
+               u8 reserved;
+               u8 venid[3];
+       } eu64id;
 
-} TVPD_ID_Descriptor_Type_2;
+};
 
-typedef struct {
+struct tvpd_id_descriptor_type_3 {
+       u8 codeset : 4;          /* VPD_CODE_SET */
+       u8 reserved : 4;
+       u8 identifiertype : 4;   /* VPD_IDENTIFIER_TYPE */
+       u8 reserved2 : 4;
+       u8 reserved3;
+       u8 identifierlength;
+       u8 Identifier[16];
+};
+
+struct tvpd_page83 {
        u8 DeviceType:5;
        u8 DeviceTypeQualifier:3;
        u8 PageCode;
-       u8 Reserved;
+       u8 reserved;
        u8 PageLength;
-       TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
-       TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
-
-} TVPD_Page83;
+       struct tvpd_id_descriptor_type_1 type1;
+       struct tvpd_id_descriptor_type_2 type2;
+       struct tvpd_id_descriptor_type_3 type3;
+};
 
 /*
  *              M O D U L E   G L O B A L S
@@ -214,9 +233,13 @@ static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
 static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
 static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
                                struct aac_raw_io2 *rio2, int sg_max);
+static long aac_build_sghba(struct scsi_cmnd *scsicmd,
+                               struct aac_hba_cmd_req *hbacmd,
+                               int sg_max, u64 sg_address);
 static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
                                int pages, int nseg, int nseg_new);
 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
+static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
 #ifdef AAC_DETAILED_STATUS_INFO
 static char *aac_get_status_string(u32 status);
 #endif
@@ -327,7 +350,7 @@ static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
        }
        scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
        device = scsicmd->device;
-       if (unlikely(!device || !scsi_device_online(device))) {
+       if (unlikely(!device)) {
                dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
                aac_fib_complete(fibptr);
                return 0;
@@ -473,16 +496,26 @@ int aac_get_containers(struct aac_dev *dev)
 
        if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
                maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
-       fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
-                       GFP_KERNEL);
-       if (!fsa_dev_ptr)
-               return -ENOMEM;
+       if (dev->fsa_dev == NULL ||
+               dev->maximum_num_containers != maximum_num_containers) {
+
+               fsa_dev_ptr = dev->fsa_dev;
+
+               dev->fsa_dev = kcalloc(maximum_num_containers,
+                                       sizeof(*fsa_dev_ptr), GFP_KERNEL);
+
+               kfree(fsa_dev_ptr);
+               fsa_dev_ptr = NULL;
 
-       dev->fsa_dev = fsa_dev_ptr;
-       dev->maximum_num_containers = maximum_num_containers;
 
-       for (index = 0; index < dev->maximum_num_containers; ) {
-               fsa_dev_ptr[index].devname[0] = '\0';
+               if (!dev->fsa_dev)
+                       return -ENOMEM;
+
+               dev->maximum_num_containers = maximum_num_containers;
+       }
+       for (index = 0; index < dev->maximum_num_containers; index++) {
+               dev->fsa_dev[index].devname[0] = '\0';
+               dev->fsa_dev[index].valid = 0;
 
                status = aac_probe_container(dev, index);
 
@@ -490,12 +523,6 @@ int aac_get_containers(struct aac_dev *dev)
                        printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
                        break;
                }
-
-               /*
-                *      If there are no more containers, then stop asking.
-                */
-               if (++index >= status)
-                       break;
        }
        return status;
 }
@@ -602,6 +629,7 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
        struct fsa_dev_info *fsa_dev_ptr;
        int (*callback)(struct scsi_cmnd *);
        struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+       int i;
 
 
        if (!aac_valid_context(scsicmd, fibptr))
@@ -624,6 +652,10 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
                                fsa_dev_ptr->block_size =
                                        le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
                        }
+                       for (i = 0; i < 16; i++)
+                               fsa_dev_ptr->identifier[i] =
+                                       dresp->mnt[0].fileinfo.bdevinfo
+                                                               .identifier[i];
                        fsa_dev_ptr->valid = 1;
                        /* sense_key holds the current state of the spin-up */
                        if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
@@ -918,6 +950,28 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
        inqstrcpy ("V1.0", str->prl);
 }
 
+static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
+               struct aac_dev *dev, struct scsi_cmnd *scsicmd)
+{
+       int container;
+
+       vpdpage83data->type3.codeset = 1;
+       vpdpage83data->type3.identifiertype = 3;
+       vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
+                       - 4;
+
+       for (container = 0; container < dev->maximum_num_containers;
+                       container++) {
+
+               if (scmd_id(scsicmd) == container) {
+                       memcpy(vpdpage83data->type3.Identifier,
+                                       dev->fsa_dev[container].identifier,
+                                       16);
+                       break;
+               }
+       }
+}
+
 static void get_container_serial_callback(void *context, struct fib * fibptr)
 {
        struct aac_get_serial_resp * get_serial_reply;
@@ -935,39 +989,47 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
                /*Check to see if it's for VPD 0x83 or 0x80 */
                if (scsicmd->cmnd[2] == 0x83) {
                        /* vpd page 0x83 - Device Identification Page */
+                       struct aac_dev *dev;
                        int i;
-                       TVPD_Page83 VPDPage83Data;
+                       struct tvpd_page83 vpdpage83data;
+
+                       dev = (struct aac_dev *)scsicmd->device->host->hostdata;
 
-                       memset(((u8 *)&VPDPage83Data), 0,
-                              sizeof(VPDPage83Data));
+                       memset(((u8 *)&vpdpage83data), 0,
+                              sizeof(vpdpage83data));
 
                        /* DIRECT_ACCESS_DEVIC */
-                       VPDPage83Data.DeviceType = 0;
+                       vpdpage83data.DeviceType = 0;
                        /* DEVICE_CONNECTED */
-                       VPDPage83Data.DeviceTypeQualifier = 0;
+                       vpdpage83data.DeviceTypeQualifier = 0;
                        /* VPD_DEVICE_IDENTIFIERS */
-                       VPDPage83Data.PageCode = 0x83;
-                       VPDPage83Data.Reserved = 0;
-                       VPDPage83Data.PageLength =
-                               sizeof(VPDPage83Data.IdDescriptorType1) +
-                               sizeof(VPDPage83Data.IdDescriptorType2);
+                       vpdpage83data.PageCode = 0x83;
+                       vpdpage83data.reserved = 0;
+                       vpdpage83data.PageLength =
+                               sizeof(vpdpage83data.type1) +
+                               sizeof(vpdpage83data.type2);
+
+                       /* VPD 83 Type 3 is not supported for ARC */
+                       if (dev->sa_firmware)
+                               vpdpage83data.PageLength +=
+                               sizeof(vpdpage83data.type3);
 
                        /* T10 Vendor Identifier Field Format */
-                       /* VpdCodeSetAscii */
-                       VPDPage83Data.IdDescriptorType1.CodeSet = 2;
+                       /* VpdcodesetAscii */
+                       vpdpage83data.type1.codeset = 2;
                        /* VpdIdentifierTypeVendorId */
-                       VPDPage83Data.IdDescriptorType1.IdentifierType = 1;
-                       VPDPage83Data.IdDescriptorType1.IdentifierLength =
-                               sizeof(VPDPage83Data.IdDescriptorType1) - 4;
+                       vpdpage83data.type1.identifiertype = 1;
+                       vpdpage83data.type1.identifierlength =
+                               sizeof(vpdpage83data.type1) - 4;
 
                        /* "ADAPTEC " for adaptec */
-                       memcpy(VPDPage83Data.IdDescriptorType1.VendId,
+                       memcpy(vpdpage83data.type1.venid,
                                "ADAPTEC ",
-                               sizeof(VPDPage83Data.IdDescriptorType1.VendId));
-                       memcpy(VPDPage83Data.IdDescriptorType1.ProductId,
+                               sizeof(vpdpage83data.type1.venid));
+                       memcpy(vpdpage83data.type1.productid,
                                "ARRAY           ",
                                sizeof(
-                               VPDPage83Data.IdDescriptorType1.ProductId));
+                               vpdpage83data.type1.productid));
 
                        /* Convert to ascii based serial number.
                         * The LSB is the the end.
@@ -976,32 +1038,41 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
                                u8 temp =
                                        (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
                                if (temp  > 0x9) {
-                                       VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+                                       vpdpage83data.type1.serialnumber[i] =
                                                        'A' + (temp - 0xA);
                                } else {
-                                       VPDPage83Data.IdDescriptorType1.SerialNumber[i] =
+                                       vpdpage83data.type1.serialnumber[i] =
                                                        '0' + temp;
                                }
                        }
 
                        /* VpdCodeSetBinary */
-                       VPDPage83Data.IdDescriptorType2.CodeSet = 1;
-                       /* VpdIdentifierTypeEUI64 */
-                       VPDPage83Data.IdDescriptorType2.IdentifierType = 2;
-                       VPDPage83Data.IdDescriptorType2.IdentifierLength =
-                               sizeof(VPDPage83Data.IdDescriptorType2) - 4;
+                       vpdpage83data.type2.codeset = 1;
+                       /* VpdidentifiertypeEUI64 */
+                       vpdpage83data.type2.identifiertype = 2;
+                       vpdpage83data.type2.identifierlength =
+                               sizeof(vpdpage83data.type2) - 4;
 
-                       VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0;
-                       VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
-                       VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
+                       vpdpage83data.type2.eu64id.venid[0] = 0xD0;
+                       vpdpage83data.type2.eu64id.venid[1] = 0;
+                       vpdpage83data.type2.eu64id.venid[2] = 0;
 
-                       VPDPage83Data.IdDescriptorType2.EU64Id.Serial =
+                       vpdpage83data.type2.eu64id.Serial =
                                                        get_serial_reply->uid;
-                       VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
+                       vpdpage83data.type2.eu64id.reserved = 0;
+
+                       /*
+                        * VpdIdentifierTypeFCPHName
+                        * VPD 0x83 Type 3 not supported for ARC
+                        */
+                       if (dev->sa_firmware) {
+                               build_vpd83_type3(&vpdpage83data,
+                                               dev, scsicmd);
+                       }
 
                        /* Move the inquiry data to the response buffer. */
-                       scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
-                                                sizeof(VPDPage83Data));
+                       scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
+                                                sizeof(vpdpage83data));
                } else {
                        /* It must be for VPD 0x80 */
                        char sp[13];
@@ -1144,7 +1215,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
        long ret;
 
        aac_fib_init(fib);
-       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+       if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+               dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
+               !dev->sync_mode) {
                struct aac_raw_io2 *readcmd2;
                readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
                memset(readcmd2, 0, sizeof(struct aac_raw_io2));
@@ -1270,7 +1343,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
        long ret;
 
        aac_fib_init(fib);
-       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+       if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+               dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
+               !dev->sync_mode) {
                struct aac_raw_io2 *writecmd2;
                writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
                memset(writecmd2, 0, sizeof(struct aac_raw_io2));
@@ -1435,6 +1510,52 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
        return srbcmd;
 }
 
+static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
+                                                       struct scsi_cmnd *cmd)
+{
+       struct aac_hba_cmd_req *hbacmd;
+       struct aac_dev *dev;
+       int bus, target;
+       u64 address;
+
+       dev = (struct aac_dev *)cmd->device->host->hostdata;
+
+       hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
+       memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
+       /* iu_type is a parameter of aac_hba_send */
+       switch (cmd->sc_data_direction) {
+       case DMA_TO_DEVICE:
+               hbacmd->byte1 = 2;
+               break;
+       case DMA_FROM_DEVICE:
+       case DMA_BIDIRECTIONAL:
+               hbacmd->byte1 = 1;
+               break;
+       case DMA_NONE:
+       default:
+               break;
+       }
+       hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
+
+       bus = aac_logical_to_phys(scmd_channel(cmd));
+       target = scmd_id(cmd);
+       hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
+
+       /* we fill in reply_qid later in aac_src_deliver_message */
+       /* we fill in iu_type, request_id later in aac_hba_send */
+       /* we fill in emb_data_desc_count later in aac_build_sghba */
+
+       memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
+       hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
+
+       address = (u64)fib->hw_error_pa;
+       hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+       hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+       hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+
+       return hbacmd;
+}
+
 static void aac_srb_callback(void *context, struct fib * fibptr);
 
 static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
@@ -1505,11 +1626,243 @@ static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
        return aac_scsi_32(fib, cmd);
 }
 
+static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
+{
+       struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
+       struct aac_dev *dev;
+       long ret;
+
+       dev = (struct aac_dev *)cmd->device->host->hostdata;
+
+       ret = aac_build_sghba(cmd, hbacmd,
+               dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
+       if (ret < 0)
+               return ret;
+
+       /*
+        *      Now send the HBA command to the adapter
+        */
+       fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
+               sizeof(struct aac_hba_sgl);
+
+       return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
+                                 (fib_callback) aac_hba_callback,
+                                 (void *) cmd);
+}
+
+int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+{
+       struct fib *fibptr;
+       struct aac_srb *srbcmd;
+       struct sgmap64 *sg64;
+       struct aac_ciss_identify_pd *identify_resp;
+       dma_addr_t addr;
+       u32 vbus, vid;
+       u16 fibsize, datasize;
+       int rcode = -ENOMEM;
+
+
+       fibptr = aac_fib_alloc(dev);
+       if (!fibptr)
+               goto out;
+
+       fibsize = sizeof(struct aac_srb) -
+                       sizeof(struct sgentry) + sizeof(struct sgentry64);
+       datasize = sizeof(struct aac_ciss_identify_pd);
+
+       identify_resp =  pci_alloc_consistent(dev->pdev, datasize, &addr);
+
+       if (!identify_resp)
+               goto fib_free_ptr;
+
+       vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
+       vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
+
+       aac_fib_init(fibptr);
+
+       srbcmd = (struct aac_srb *) fib_data(fibptr);
+       srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+       srbcmd->channel  = cpu_to_le32(vbus);
+       srbcmd->id       = cpu_to_le32(vid);
+       srbcmd->lun      = 0;
+       srbcmd->flags    = cpu_to_le32(SRB_DataIn);
+       srbcmd->timeout  = cpu_to_le32(10);
+       srbcmd->retry_limit = 0;
+       srbcmd->cdb_size = cpu_to_le32(12);
+       srbcmd->count = cpu_to_le32(datasize);
+
+       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+       srbcmd->cdb[0] = 0x26;
+       srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+       srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+       sg64 = (struct sgmap64 *)&srbcmd->sg;
+       sg64->count = cpu_to_le32(1);
+       sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
+       sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+       sg64->sg[0].count = cpu_to_le32(datasize);
+
+       rcode = aac_fib_send(ScsiPortCommand64,
+               fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+
+       if (identify_resp->current_queue_depth_limit <= 0 ||
+               identify_resp->current_queue_depth_limit > 32)
+               dev->hba_map[bus][target].qd_limit = 32;
+       else
+               dev->hba_map[bus][target].qd_limit =
+                       identify_resp->current_queue_depth_limit;
+
+       pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr);
+
+       aac_fib_complete(fibptr);
+
+fib_free_ptr:
+       aac_fib_free(fibptr);
+out:
+       return rcode;
+}
+
+/**
+ *     aac_update hba_map()-   update current hba map with data from FW
+ *     @dev:   aac_dev structure
+ *     @phys_luns: FW information from report phys luns
+ *
+ *     Update our hba map with the information gathered from the FW
+ */
+void aac_update_hba_map(struct aac_dev *dev,
+               struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+{
+       /* ok and extended reporting */
+       u32 lun_count, nexus;
+       u32 i, bus, target;
+       u8 expose_flag, attribs;
+       u8 devtype;
+
+       lun_count = ((phys_luns->list_length[0] << 24)
+                       + (phys_luns->list_length[1] << 16)
+                       + (phys_luns->list_length[2] << 8)
+                       + (phys_luns->list_length[3])) / 24;
+
+       for (i = 0; i < lun_count; ++i) {
+
+               bus = phys_luns->lun[i].level2[1] & 0x3f;
+               target = phys_luns->lun[i].level2[0];
+               expose_flag = phys_luns->lun[i].bus >> 6;
+               attribs = phys_luns->lun[i].node_ident[9];
+               nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+
+               if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
+                       continue;
+
+               dev->hba_map[bus][target].expose = expose_flag;
+
+               if (expose_flag != 0) {
+                       devtype = AAC_DEVTYPE_RAID_MEMBER;
+                       goto update_devtype;
+               }
+
+               if (nexus != 0 && (attribs & 8)) {
+                       devtype = AAC_DEVTYPE_NATIVE_RAW;
+                       dev->hba_map[bus][target].rmw_nexus =
+                                       nexus;
+               } else
+                       devtype = AAC_DEVTYPE_ARC_RAW;
+
+               if (devtype != AAC_DEVTYPE_NATIVE_RAW)
+                       goto update_devtype;
+
+               if (aac_issue_bmic_identify(dev, bus, target) < 0)
+                       dev->hba_map[bus][target].qd_limit = 32;
+
+update_devtype:
+               if (rescan == AAC_INIT)
+                       dev->hba_map[bus][target].devtype = devtype;
+               else
+                       dev->hba_map[bus][target].new_devtype = devtype;
+       }
+}
+
+/**
+ *     aac_report_phys_luns()  Process topology change
+ *     @dev:           aac_dev structure
+ *     @fibptr:        fib pointer
+ *
+ *     Execute a CISS REPORT PHYS LUNS and process the results into
+ *     the current hba_map.
+ */
+int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+{
+       int fibsize, datasize;
+       struct aac_ciss_phys_luns_resp *phys_luns;
+       struct aac_srb *srbcmd;
+       struct sgmap64 *sg64;
+       dma_addr_t addr;
+       u32 vbus, vid;
+       int rcode = 0;
+
+       /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
+       fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
+                       + sizeof(struct sgentry64);
+       datasize = sizeof(struct aac_ciss_phys_luns_resp)
+                       + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+
+       phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent(
+                       dev->pdev, datasize, &addr);
+
+       if (phys_luns == NULL) {
+               rcode = -ENOMEM;
+               goto err_out;
+       }
+
+       vbus = (u32) le16_to_cpu(
+                       dev->supplement_adapter_info.VirtDeviceBus);
+       vid = (u32) le16_to_cpu(
+                       dev->supplement_adapter_info.VirtDeviceTarget);
+
+       aac_fib_init(fibptr);
+
+       srbcmd = (struct aac_srb *) fib_data(fibptr);
+       srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+       srbcmd->channel = cpu_to_le32(vbus);
+       srbcmd->id = cpu_to_le32(vid);
+       srbcmd->lun = 0;
+       srbcmd->flags = cpu_to_le32(SRB_DataIn);
+       srbcmd->timeout = cpu_to_le32(10);
+       srbcmd->retry_limit = 0;
+       srbcmd->cdb_size = cpu_to_le32(12);
+       srbcmd->count = cpu_to_le32(datasize);
+
+       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+       srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+       srbcmd->cdb[1] = 2; /* extended reporting */
+       srbcmd->cdb[8] = (u8)(datasize >> 8);
+       srbcmd->cdb[9] = (u8)(datasize);
+
+       sg64 = (struct sgmap64 *) &srbcmd->sg;
+       sg64->count = cpu_to_le32(1);
+       sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+       sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+       sg64->sg[0].count = cpu_to_le32(datasize);
+
+       rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
+                       FsaNormal, 1, 1, NULL, NULL);
+
+       /* analyse data */
+       if (rcode >= 0 && phys_luns->resp_flag == 2) {
+               /* ok and extended reporting */
+               aac_update_hba_map(dev, phys_luns, rescan);
+       }
+
+       pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr);
+err_out:
+       return rcode;
+}
+
 int aac_get_adapter_info(struct aac_dev* dev)
 {
        struct fib* fibptr;
        int rcode;
-       u32 tmp;
+       u32 tmp, bus, target;
        struct aac_adapter_info *info;
        struct aac_bus_info *command;
        struct aac_bus_info_response *bus_info;
@@ -1540,6 +1893,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
        }
        memcpy(&dev->adapter_info, info, sizeof(*info));
 
+       dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
        if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
                struct aac_supplement_adapter_info * sinfo;
 
@@ -1567,6 +1921,13 @@ int aac_get_adapter_info(struct aac_dev* dev)
 
        }
 
+       /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
+       for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+               for (target = 0; target < AAC_MAX_TARGETS; target++) {
+                       dev->hba_map[bus][target].devtype = 0;
+                       dev->hba_map[bus][target].qd_limit = 0;
+               }
+       }
 
        /*
         * GetBusInfo
@@ -1599,6 +1960,12 @@ int aac_get_adapter_info(struct aac_dev* dev)
                dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
        }
 
+       if (!dev->sync_mode && dev->sa_firmware &&
+                       dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
+               /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
+               rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
+       }
+
        if (!dev->in_reset) {
                char buffer[16];
                tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -1765,6 +2132,11 @@ int aac_get_adapter_info(struct aac_dev* dev)
                          (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
                }
        }
+       if (!dev->sync_mode && dev->sa_firmware &&
+               dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
+               dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
+                       HBA_MAX_SG_SEPARATE;
+
        /* FIB should be freed only after getting the response from the F/W */
        if (rcode != -ERESTARTSYS) {
                aac_fib_complete(fibptr);
@@ -1845,6 +2217,15 @@ static void io_callback(void *context, struct fib * fibptr)
                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
                             SCSI_SENSE_BUFFERSIZE));
                break;
+       case ST_MEDERR:
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+                       SAM_STAT_CHECK_CONDITION;
+               set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
+                 SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
+               memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+                      min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+                            SCSI_SENSE_BUFFERSIZE));
+               break;
        default:
 #ifdef AAC_DETAILED_STATUS_INFO
                printk(KERN_WARNING "io_callback: io failed, status = %d\n",
@@ -2312,7 +2693,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
 
 int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
 {
-       u32 cid;
+       u32 cid, bus;
        struct Scsi_Host *host = scsicmd->device->host;
        struct aac_dev *dev = (struct aac_dev *)host->hostdata;
        struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
@@ -2330,8 +2711,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                        if((cid >= dev->maximum_num_containers) ||
                                        (scsicmd->device->lun != 0)) {
                                scsicmd->result = DID_NO_CONNECT << 16;
-                               scsicmd->scsi_done(scsicmd);
-                               return 0;
+                               goto scsi_done_ret;
                        }
 
                        /*
@@ -2359,15 +2739,30 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                                }
                        }
                } else {  /* check for physical non-dasd devices */
-                       if (dev->nondasd_support || expose_physicals ||
-                                       dev->jbod) {
+                       bus = aac_logical_to_phys(scmd_channel(scsicmd));
+                       if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+                               (dev->hba_map[bus][cid].expose
+                                               == AAC_HIDE_DISK)){
+                               if (scsicmd->cmnd[0] == INQUIRY) {
+                                       scsicmd->result = DID_NO_CONNECT << 16;
+                                       goto scsi_done_ret;
+                               }
+                       }
+
+                       if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+                               dev->hba_map[bus][cid].devtype
+                                       == AAC_DEVTYPE_NATIVE_RAW) {
+                               if (dev->in_reset)
+                                       return -1;
+                               return aac_send_hba_fib(scsicmd);
+                       } else if (dev->nondasd_support || expose_physicals ||
+                               dev->jbod) {
                                if (dev->in_reset)
                                        return -1;
                                return aac_send_srb_fib(scsicmd);
                        } else {
                                scsicmd->result = DID_NO_CONNECT << 16;
-                               scsicmd->scsi_done(scsicmd);
-                               return 0;
+                               goto scsi_done_ret;
                        }
                }
        }
@@ -2385,13 +2780,34 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
                       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
                             SCSI_SENSE_BUFFERSIZE));
-               scsicmd->scsi_done(scsicmd);
-               return 0;
+               goto scsi_done_ret;
        }
 
-
-       /* Handle commands here that don't really require going out to the adapter */
        switch (scsicmd->cmnd[0]) {
+       case READ_6:
+       case READ_10:
+       case READ_12:
+       case READ_16:
+               if (dev->in_reset)
+                       return -1;
+               return aac_read(scsicmd);
+
+       case WRITE_6:
+       case WRITE_10:
+       case WRITE_12:
+       case WRITE_16:
+               if (dev->in_reset)
+                       return -1;
+               return aac_write(scsicmd);
+
+       case SYNCHRONIZE_CACHE:
+               if (((aac_cache & 6) == 6) && dev->cache_protected) {
+                       scsicmd->result = AAC_STAT_GOOD;
+                       break;
+               }
+               /* Issue FIB to tell Firmware to flush it's cache */
+               if ((aac_cache & 6) != 2)
+                       return aac_synchronize(scsicmd);
        case INQUIRY:
        {
                struct inquiry_data inq_data;
@@ -2414,8 +2830,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                                arr[1] = scsicmd->cmnd[2];
                                scsi_sg_copy_from_buffer(scsicmd, &inq_data,
                                                         sizeof(inq_data));
-                               scsicmd->result = DID_OK << 16 |
-                                 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+                               scsicmd->result = AAC_STAT_GOOD;
                        } else if (scsicmd->cmnd[2] == 0x80) {
                                /* unit serial number page */
                                arr[3] = setinqserial(dev, &arr[4],
@@ -2426,8 +2841,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                                if (aac_wwn != 2)
                                        return aac_get_container_serial(
                                                scsicmd);
-                               scsicmd->result = DID_OK << 16 |
-                                 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+                               scsicmd->result = AAC_STAT_GOOD;
                        } else if (scsicmd->cmnd[2] == 0x83) {
                                /* vpd page 0x83 - Device Identification Page */
                                char *sno = (char *)&inq_data;
@@ -2436,8 +2850,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                                if (aac_wwn != 2)
                                        return aac_get_container_serial(
                                                scsicmd);
-                               scsicmd->result = DID_OK << 16 |
-                                 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+                               scsicmd->result = AAC_STAT_GOOD;
                        } else {
                                /* vpd page not implemented */
                                scsicmd->result = DID_OK << 16 |
@@ -2452,8 +2865,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                                        sizeof(dev->fsa_dev[cid].sense_data),
                                        SCSI_SENSE_BUFFERSIZE));
                        }
-                       scsicmd->scsi_done(scsicmd);
-                       return 0;
+                       break;
                }
                inq_data.inqd_ver = 2;  /* claim compliance to SCSI-2 */
                inq_data.inqd_rdf = 2;  /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
@@ -2469,9 +2881,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                        inq_data.inqd_pdt = INQD_PDT_PROC;      /* Processor device */
                        scsi_sg_copy_from_buffer(scsicmd, &inq_data,
                                                 sizeof(inq_data));
-                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-                       scsicmd->scsi_done(scsicmd);
-                       return 0;
+                       scsicmd->result = AAC_STAT_GOOD;
+                       break;
                }
                if (dev->in_reset)
                        return -1;
@@ -2519,10 +2930,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                /* Do not cache partition table for arrays */
                scsicmd->device->removable = 1;
 
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-
-               return 0;
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
        }
 
        case READ_CAPACITY:
@@ -2547,11 +2956,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
                /* Do not cache partition table for arrays */
                scsicmd->device->removable = 1;
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
-                 SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-
-               return 0;
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
        }
 
        case MODE_SENSE:
@@ -2629,10 +3035,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                scsi_sg_copy_from_buffer(scsicmd,
                                         (char *)&mpd,
                                         mode_buf_length);
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-
-               return 0;
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
        }
        case MODE_SENSE_10:
        {
@@ -2708,18 +3112,17 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                                         (char *)&mpd10,
                                         mode_buf_length);
 
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-
-               return 0;
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
        }
        case REQUEST_SENSE:
                dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
-               memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
-               memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-               return 0;
+               memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+                               sizeof(struct sense_data));
+               memset(&dev->fsa_dev[cid].sense_data, 0,
+                               sizeof(struct sense_data));
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
 
        case ALLOW_MEDIUM_REMOVAL:
                dprintk((KERN_DEBUG "LOCK command.\n"));
@@ -2728,9 +3131,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                else
                        fsa_dev_ptr[cid].locked = 0;
 
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-               return 0;
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
        /*
         *      These commands are all No-Ops
         */
@@ -2746,80 +3148,41 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
                               min_t(size_t,
                                     sizeof(dev->fsa_dev[cid].sense_data),
                                     SCSI_SENSE_BUFFERSIZE));
-                       scsicmd->scsi_done(scsicmd);
-                       return 0;
+               break;
                }
-               /* FALLTHRU */
        case RESERVE:
        case RELEASE:
        case REZERO_UNIT:
        case REASSIGN_BLOCKS:
        case SEEK_10:
-               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-               scsicmd->scsi_done(scsicmd);
-               return 0;
+               scsicmd->result = AAC_STAT_GOOD;
+               break;
 
        case START_STOP:
                return aac_start_stop(scsicmd);
-       }
-
-       switch (scsicmd->cmnd[0])
-       {
-               case READ_6:
-               case READ_10:
-               case READ_12:
-               case READ_16:
-                       if (dev->in_reset)
-                               return -1;
-                       /*
-                        *      Hack to keep track of ordinal number of the device that
-                        *      corresponds to a container. Needed to convert
-                        *      containers to /dev/sd device names
-                        */
-
-                       if (scsicmd->request->rq_disk)
-                               strlcpy(fsa_dev_ptr[cid].devname,
-                               scsicmd->request->rq_disk->disk_name,
-                               min(sizeof(fsa_dev_ptr[cid].devname),
-                               sizeof(scsicmd->request->rq_disk->disk_name) + 1));
-
-                       return aac_read(scsicmd);
 
-               case WRITE_6:
-               case WRITE_10:
-               case WRITE_12:
-               case WRITE_16:
-                       if (dev->in_reset)
-                               return -1;
-                       return aac_write(scsicmd);
-
-               case SYNCHRONIZE_CACHE:
-                       if (((aac_cache & 6) == 6) && dev->cache_protected) {
-                               scsicmd->result = DID_OK << 16 |
-                                       COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
-                               scsicmd->scsi_done(scsicmd);
-                               return 0;
-                       }
-                       /* Issue FIB to tell Firmware to flush it's cache */
-                       if ((aac_cache & 6) != 2)
-                               return aac_synchronize(scsicmd);
-                       /* FALLTHRU */
-               default:
-                       /*
-                        *      Unhandled commands
-                        */
-                       dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
-                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
-                       set_sense(&dev->fsa_dev[cid].sense_data,
+       /* FALLTHRU */
+       default:
+       /*
+        *      Unhandled commands
+        */
+               dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
+                               scsicmd->cmnd[0]));
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+                               SAM_STAT_CHECK_CONDITION;
+               set_sense(&dev->fsa_dev[cid].sense_data,
                          ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
                          ASENCODE_INVALID_COMMAND, 0, 0);
-                       memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+               memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
                                min_t(size_t,
                                      sizeof(dev->fsa_dev[cid].sense_data),
                                      SCSI_SENSE_BUFFERSIZE));
-                       scsicmd->scsi_done(scsicmd);
-                       return 0;
        }
+
+scsi_done_ret:
+
+       scsicmd->scsi_done(scsicmd);
+       return 0;
 }
 
 static int query_disk(struct aac_dev *dev, void __user *arg)
@@ -2954,16 +3317,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
                return;
 
        BUG_ON(fibptr == NULL);
-       dev = fibptr->dev;
 
-       scsi_dma_unmap(scsicmd);
-
-       /* expose physical device if expose_physicald flag is on */
-       if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
-         && expose_physicals > 0)
-               aac_expose_phy_device(scsicmd);
+       dev = fibptr->dev;
 
        srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
        scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
 
        if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
@@ -2976,158 +3334,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
                 */
                scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
                                   - le32_to_cpu(srbreply->data_xfer_length));
-               /*
-                * First check the fib status
-                */
+       }
 
-               if (le32_to_cpu(srbreply->status) != ST_OK) {
-                       int len;
 
-                       printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
-                       len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
-                                   SCSI_SENSE_BUFFERSIZE);
-                       scsicmd->result = DID_ERROR << 16
-                                               | COMMAND_COMPLETE << 8
-                                               | SAM_STAT_CHECK_CONDITION;
-                       memcpy(scsicmd->sense_buffer,
-                                       srbreply->sense_data, len);
-               }
+       scsi_dma_unmap(scsicmd);
 
-               /*
-                * Next check the srb status
-                */
-               switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
-               case SRB_STATUS_ERROR_RECOVERY:
-               case SRB_STATUS_PENDING:
-               case SRB_STATUS_SUCCESS:
-                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
-                       break;
-               case SRB_STATUS_DATA_OVERRUN:
-                       switch (scsicmd->cmnd[0]) {
-                       case  READ_6:
-                       case  WRITE_6:
-                       case  READ_10:
-                       case  WRITE_10:
-                       case  READ_12:
-                       case  WRITE_12:
-                       case  READ_16:
-                       case  WRITE_16:
-                               if (le32_to_cpu(srbreply->data_xfer_length)
-                                                       < scsicmd->underflow)
-                                       printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
-                               else
-                                       printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
-                               scsicmd->result = DID_ERROR << 16
-                                                       | COMMAND_COMPLETE << 8;
-                               break;
-                       case INQUIRY: {
-                               scsicmd->result = DID_OK << 16
-                                                       | COMMAND_COMPLETE << 8;
-                               break;
-                       }
-                       default:
-                               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
-                               break;
-                       }
-                       break;
-               case SRB_STATUS_ABORTED:
-                       scsicmd->result = DID_ABORT << 16 | ABORT << 8;
-                       break;
-               case SRB_STATUS_ABORT_FAILED:
-                       /*
-                        * Not sure about this one - but assuming the
-                        * hba was trying to abort for some reason
-                        */
-                       scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+       /* expose physical device if expose_physicald flag is on */
+       if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+         && expose_physicals > 0)
+               aac_expose_phy_device(scsicmd);
+
+       /*
+        * First check the fib status
+        */
+
+       if (le32_to_cpu(srbreply->status) != ST_OK) {
+               int len;
+
+               pr_warn("aac_srb_callback: srb failed, status = %d\n",
+                               le32_to_cpu(srbreply->status));
+               len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+                           SCSI_SENSE_BUFFERSIZE);
+               scsicmd->result = DID_ERROR << 16
+                               | COMMAND_COMPLETE << 8
+                               | SAM_STAT_CHECK_CONDITION;
+               memcpy(scsicmd->sense_buffer,
+                               srbreply->sense_data, len);
+       }
+
+       /*
+        * Next check the srb status
+        */
+       switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+       case SRB_STATUS_ERROR_RECOVERY:
+       case SRB_STATUS_PENDING:
+       case SRB_STATUS_SUCCESS:
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+               break;
+       case SRB_STATUS_DATA_OVERRUN:
+               switch (scsicmd->cmnd[0]) {
+               case  READ_6:
+               case  WRITE_6:
+               case  READ_10:
+               case  WRITE_10:
+               case  READ_12:
+               case  WRITE_12:
+               case  READ_16:
+               case  WRITE_16:
+                       if (le32_to_cpu(srbreply->data_xfer_length)
+                                               < scsicmd->underflow)
+                               pr_warn("aacraid: SCSI CMD underflow\n");
+                       else
+                               pr_warn("aacraid: SCSI CMD Data Overrun\n");
+                       scsicmd->result = DID_ERROR << 16
+                                       | COMMAND_COMPLETE << 8;
                        break;
-               case SRB_STATUS_PARITY_ERROR:
-                       scsicmd->result = DID_PARITY << 16
-                                               | MSG_PARITY_ERROR << 8;
+               case INQUIRY:
+                       scsicmd->result = DID_OK << 16
+                                       | COMMAND_COMPLETE << 8;
                        break;
-               case SRB_STATUS_NO_DEVICE:
-               case SRB_STATUS_INVALID_PATH_ID:
-               case SRB_STATUS_INVALID_TARGET_ID:
-               case SRB_STATUS_INVALID_LUN:
-               case SRB_STATUS_SELECTION_TIMEOUT:
-                       scsicmd->result = DID_NO_CONNECT << 16
-                                               | COMMAND_COMPLETE << 8;
+               default:
+                       scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
                        break;
+               }
+               break;
+       case SRB_STATUS_ABORTED:
+               scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+               break;
+       case SRB_STATUS_ABORT_FAILED:
+               /*
+                * Not sure about this one - but assuming the
+                * hba was trying to abort for some reason
+                */
+               scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+               break;
+       case SRB_STATUS_PARITY_ERROR:
+               scsicmd->result = DID_PARITY << 16
+                               | MSG_PARITY_ERROR << 8;
+               break;
+       case SRB_STATUS_NO_DEVICE:
+       case SRB_STATUS_INVALID_PATH_ID:
+       case SRB_STATUS_INVALID_TARGET_ID:
+       case SRB_STATUS_INVALID_LUN:
+       case SRB_STATUS_SELECTION_TIMEOUT:
+               scsicmd->result = DID_NO_CONNECT << 16
+                               | COMMAND_COMPLETE << 8;
+               break;
 
-               case SRB_STATUS_COMMAND_TIMEOUT:
-               case SRB_STATUS_TIMEOUT:
-                       scsicmd->result = DID_TIME_OUT << 16
-                                               | COMMAND_COMPLETE << 8;
-                       break;
+       case SRB_STATUS_COMMAND_TIMEOUT:
+       case SRB_STATUS_TIMEOUT:
+               scsicmd->result = DID_TIME_OUT << 16
+                               | COMMAND_COMPLETE << 8;
+               break;
 
-               case SRB_STATUS_BUSY:
-                       scsicmd->result = DID_BUS_BUSY << 16
-                                               | COMMAND_COMPLETE << 8;
-                       break;
+       case SRB_STATUS_BUSY:
+               scsicmd->result = DID_BUS_BUSY << 16
+                               | COMMAND_COMPLETE << 8;
+               break;
 
-               case SRB_STATUS_BUS_RESET:
-                       scsicmd->result = DID_RESET << 16
-                                               | COMMAND_COMPLETE << 8;
-                       break;
+       case SRB_STATUS_BUS_RESET:
+               scsicmd->result = DID_RESET << 16
+                               | COMMAND_COMPLETE << 8;
+               break;
 
-               case SRB_STATUS_MESSAGE_REJECTED:
-                       scsicmd->result = DID_ERROR << 16
-                                               | MESSAGE_REJECT << 8;
-                       break;
-               case SRB_STATUS_REQUEST_FLUSHED:
-               case SRB_STATUS_ERROR:
-               case SRB_STATUS_INVALID_REQUEST:
-               case SRB_STATUS_REQUEST_SENSE_FAILED:
-               case SRB_STATUS_NO_HBA:
-               case SRB_STATUS_UNEXPECTED_BUS_FREE:
-               case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
-               case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
-               case SRB_STATUS_DELAYED_RETRY:
-               case SRB_STATUS_BAD_FUNCTION:
-               case SRB_STATUS_NOT_STARTED:
-               case SRB_STATUS_NOT_IN_USE:
-               case SRB_STATUS_FORCE_ABORT:
-               case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
-               default:
+       case SRB_STATUS_MESSAGE_REJECTED:
+               scsicmd->result = DID_ERROR << 16
+                               | MESSAGE_REJECT << 8;
+               break;
+       case SRB_STATUS_REQUEST_FLUSHED:
+       case SRB_STATUS_ERROR:
+       case SRB_STATUS_INVALID_REQUEST:
+       case SRB_STATUS_REQUEST_SENSE_FAILED:
+       case SRB_STATUS_NO_HBA:
+       case SRB_STATUS_UNEXPECTED_BUS_FREE:
+       case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+       case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+       case SRB_STATUS_DELAYED_RETRY:
+       case SRB_STATUS_BAD_FUNCTION:
+       case SRB_STATUS_NOT_STARTED:
+       case SRB_STATUS_NOT_IN_USE:
+       case SRB_STATUS_FORCE_ABORT:
+       case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+       default:
 #ifdef AAC_DETAILED_STATUS_INFO
-                       printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
-                               le32_to_cpu(srbreply->srb_status) & 0x3F,
-                               aac_get_status_string(
-                                       le32_to_cpu(srbreply->srb_status) & 0x3F),
-                               scsicmd->cmnd[0],
-                               le32_to_cpu(srbreply->scsi_status));
+               pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
+                       le32_to_cpu(srbreply->srb_status) & 0x3F,
+                       aac_get_status_string(
+                               le32_to_cpu(srbreply->srb_status) & 0x3F),
+                       scsicmd->cmnd[0],
+                       le32_to_cpu(srbreply->scsi_status));
 #endif
-                       if ((scsicmd->cmnd[0] == ATA_12)
-                               || (scsicmd->cmnd[0] == ATA_16)) {
-                                       if (scsicmd->cmnd[2] & (0x01 << 5)) {
-                                               scsicmd->result = DID_OK << 16
-                                                       | COMMAND_COMPLETE << 8;
-                               break;
-                               } else {
-                                       scsicmd->result = DID_ERROR << 16
-                                               | COMMAND_COMPLETE << 8;
-                                       break;
-                               }
+               /*
+                * When the CC bit is SET by the host in ATA pass thru CDB,
+                *  driver is supposed to return DID_OK
+                *
+                * When the CC bit is RESET by the host, driver should
+                *  return DID_ERROR
+                */
+               if ((scsicmd->cmnd[0] == ATA_12)
+                       || (scsicmd->cmnd[0] == ATA_16)) {
+
+                       if (scsicmd->cmnd[2] & (0x01 << 5)) {
+                               scsicmd->result = DID_OK << 16
+                                       | COMMAND_COMPLETE << 8;
+                       break;
                        } else {
                                scsicmd->result = DID_ERROR << 16
                                        | COMMAND_COMPLETE << 8;
-                               break;
+                       break;
                        }
+               } else {
+                       scsicmd->result = DID_ERROR << 16
+                               | COMMAND_COMPLETE << 8;
+                       break;
                }
-               if (le32_to_cpu(srbreply->scsi_status)
-                               == SAM_STAT_CHECK_CONDITION) {
-                       int len;
+       }
+       if (le32_to_cpu(srbreply->scsi_status)
+                       == SAM_STAT_CHECK_CONDITION) {
+               int len;
 
-                       scsicmd->result |= SAM_STAT_CHECK_CONDITION;
-                       len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
-                                   SCSI_SENSE_BUFFERSIZE);
+               scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+               len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+                           SCSI_SENSE_BUFFERSIZE);
 #ifdef AAC_DETAILED_STATUS_INFO
-                       printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
-                                               le32_to_cpu(srbreply->status), len);
+               pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
+                                       le32_to_cpu(srbreply->status), len);
 #endif
-                       memcpy(scsicmd->sense_buffer,
-                                       srbreply->sense_data, len);
-               }
+               memcpy(scsicmd->sense_buffer,
+                               srbreply->sense_data, len);
        }
+
        /*
         * OR in the scsi status (already shifted up a bit)
         */
@@ -3137,9 +3513,152 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
        scsicmd->scsi_done(scsicmd);
 }
 
+static void hba_resp_task_complete(struct aac_dev *dev,
+                                       struct scsi_cmnd *scsicmd,
+                                       struct aac_hba_resp *err) {
+
+       scsicmd->result = err->status;
+       /* set residual count */
+       scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
+
+       switch (err->status) {
+       case SAM_STAT_GOOD:
+               scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+               break;
+       case SAM_STAT_CHECK_CONDITION:
+       {
+               int len;
+
+               len = min_t(u8, err->sense_response_data_len,
+                       SCSI_SENSE_BUFFERSIZE);
+               if (len)
+                       memcpy(scsicmd->sense_buffer,
+                               err->sense_response_buf, len);
+               scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+               break;
+       }
+       case SAM_STAT_BUSY:
+               scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+               break;
+       case SAM_STAT_TASK_ABORTED:
+               scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
+               break;
+       case SAM_STAT_RESERVATION_CONFLICT:
+       case SAM_STAT_TASK_SET_FULL:
+       default:
+               scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+               break;
+       }
+}
+
+static void hba_resp_task_failure(struct aac_dev *dev,
+                                       struct scsi_cmnd *scsicmd,
+                                       struct aac_hba_resp *err)
+{
+       switch (err->status) {
+       case HBA_RESP_STAT_HBAMODE_DISABLED:
+       {
+               u32 bus, cid;
+
+               bus = aac_logical_to_phys(scmd_channel(scsicmd));
+               cid = scmd_id(scsicmd);
+               if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+                       dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
+                       dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
+               }
+               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+               break;
+       }
+       case HBA_RESP_STAT_IO_ERROR:
+       case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
+               scsicmd->result = DID_OK << 16 |
+                       COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
+               break;
+       case HBA_RESP_STAT_IO_ABORTED:
+               scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+               break;
+       case HBA_RESP_STAT_INVALID_DEVICE:
+               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+               break;
+       case HBA_RESP_STAT_UNDERRUN:
+               /* UNDERRUN is OK */
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+               break;
+       case HBA_RESP_STAT_OVERRUN:
+       default:
+               scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+               break;
+       }
+}
+
+/**
+ *
+ * aac_hba_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a native HBA scsi command
+ *
+ */
+void aac_hba_callback(void *context, struct fib *fibptr)
+{
+       struct aac_dev *dev;
+       struct scsi_cmnd *scsicmd;
+
+       struct aac_hba_resp *err =
+                       &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
+
+       scsicmd = (struct scsi_cmnd *) context;
+
+       if (!aac_valid_context(scsicmd, fibptr))
+               return;
+
+       WARN_ON(fibptr == NULL);
+       dev = fibptr->dev;
+
+       if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
+               scsi_dma_unmap(scsicmd);
+
+       if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+               /* fast response */
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+               goto out;
+       }
+
+       switch (err->service_response) {
+       case HBA_RESP_SVCRES_TASK_COMPLETE:
+               hba_resp_task_complete(dev, scsicmd, err);
+               break;
+       case HBA_RESP_SVCRES_FAILURE:
+               hba_resp_task_failure(dev, scsicmd, err);
+               break;
+       case HBA_RESP_SVCRES_TMF_REJECTED:
+               scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+               break;
+       case HBA_RESP_SVCRES_TMF_LUN_INVALID:
+               scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+               break;
+       case HBA_RESP_SVCRES_TMF_COMPLETE:
+       case HBA_RESP_SVCRES_TMF_SUCCEEDED:
+               scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+               break;
+       default:
+               scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+               break;
+       }
+
+out:
+       aac_fib_complete(fibptr);
+
+       if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
+               scsicmd->SCp.sent_command = 1;
+       else
+               scsicmd->scsi_done(scsicmd);
+}
+
 /**
  *
- * aac_send_scb_fib
+ * aac_send_srb_fib
  * @scsicmd: the scsi command block
  *
  * This routine will form a FIB and fill in the aac_srb from the
@@ -3182,6 +3701,54 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
        return -1;
 }
 
+/**
+ *
+ * aac_send_hba_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_hba_cmd_req from the
+ * scsicmd passed in.
+ */
+static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
+{
+       struct fib *cmd_fibcontext;
+       struct aac_dev *dev;
+       int status;
+
+       dev = shost_priv(scsicmd->device->host);
+       if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
+                       scsicmd->device->lun > AAC_MAX_LUN - 1) {
+               scsicmd->result = DID_NO_CONNECT << 16;
+               scsicmd->scsi_done(scsicmd);
+               return 0;
+       }
+
+       /*
+        *      Allocate and initialize a Fib then setup a BlockWrite command
+        */
+       cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+       if (!cmd_fibcontext)
+               return -1;
+
+       status = aac_adapter_hba(cmd_fibcontext, scsicmd);
+
+       /*
+        *      Check that the command queued to the controller
+        */
+       if (status == -EINPROGRESS) {
+               scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+               return 0;
+       }
+
+       pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
+               status);
+       aac_fib_complete(cmd_fibcontext);
+       aac_fib_free(cmd_fibcontext);
+
+       return -1;
+}
+
+
 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
 {
        struct aac_dev *dev;
@@ -3434,6 +4001,75 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
        return 0;
 }
 
+static long aac_build_sghba(struct scsi_cmnd *scsicmd,
+                       struct aac_hba_cmd_req *hbacmd,
+                       int sg_max,
+                       u64 sg_address)
+{
+       unsigned long byte_count = 0;
+       int nseg;
+       struct scatterlist *sg;
+       int i;
+       u32 cur_size;
+       struct aac_hba_sgl *sge;
+
+       nseg = scsi_dma_map(scsicmd);
+       if (nseg <= 0) {
+               byte_count = nseg;
+               goto out;
+       }
+
+       if (nseg > HBA_MAX_SG_EMBEDDED)
+               sge = &hbacmd->sge[2];
+       else
+               sge = &hbacmd->sge[0];
+
+       scsi_for_each_sg(scsicmd, sg, nseg, i) {
+               int count = sg_dma_len(sg);
+               u64 addr = sg_dma_address(sg);
+
+               WARN_ON(i >= sg_max);
+               sge->addr_hi = cpu_to_le32((u32)(addr>>32));
+               sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
+               cur_size = cpu_to_le32(count);
+               sge->len = cur_size;
+               sge->flags = 0;
+               byte_count += count;
+               sge++;
+       }
+
+       sge--;
+       /* hba wants the size to be exact */
+       if (byte_count > scsi_bufflen(scsicmd)) {
+               u32 temp;
+
+               temp = le32_to_cpu(sge->len) - byte_count
+                                               - scsi_bufflen(scsicmd);
+               sge->len = cpu_to_le32(temp);
+               byte_count = scsi_bufflen(scsicmd);
+       }
+
+       if (nseg <= HBA_MAX_SG_EMBEDDED) {
+               hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
+               sge->flags = cpu_to_le32(0x40000000);
+       } else {
+               /* not embedded */
+               hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
+               hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
+               hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
+               hbacmd->sge[0].addr_lo =
+                       cpu_to_le32((u32)(sg_address & 0xffffffff));
+       }
+
+       /* Check for command underflow */
+       if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+               pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
+                               byte_count, scsicmd->underflow);
+       }
+out:
+       return byte_count;
+}
+
 #ifdef AAC_DETAILED_STATUS_INFO
 
 struct aac_srb_status_info {
index f059c14..f234497 100644 (file)
@@ -1,3 +1,37 @@
+/*
+ *     Adaptec AAC series RAID controller driver
+ *     (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  aacraid.h
+ *
+ * Abstract: Contains all routines for control of the aacraid driver
+ *
+ */
+
+#ifndef _AACRAID_H_
+#define _AACRAID_H_
 #ifndef dprintk
 # define dprintk(x)
 #endif
@@ -63,8 +97,8 @@ enum {
 #define        PMC_GLOBAL_INT_BIT0             0x00000001
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41066
-# define AAC_DRIVER_BRANCH "-ms"
+# define AAC_DRIVER_BUILD 50740
+# define AAC_DRIVER_BRANCH "-custom"
 #endif
 #define MAXIMUM_NUM_CONTAINERS 32
 
@@ -72,13 +106,311 @@ enum {
 #define AAC_NUM_IO_FIB         (1024 - AAC_NUM_MGT_FIB)
 #define AAC_NUM_FIB            (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
 
-#define AAC_MAX_LUN            (8)
+#define AAC_MAX_LUN            256
 
 #define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
 #define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256)
 
 #define AAC_DEBUG_INSTRUMENT_AIF_DELETE
 
+#define AAC_MAX_NATIVE_TARGETS         1024
+/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
+#define AAC_MAX_BUSES                  5
+#define AAC_MAX_TARGETS                256
+#define AAC_MAX_NATIVE_SIZE            2048
+#define FW_ERROR_BUFFER_SIZE           512
+
+/* Thor AIF events */
+#define SA_AIF_HOTPLUG                 (1<<1)
+#define SA_AIF_HARDWARE                (1<<2)
+#define SA_AIF_PDEV_CHANGE             (1<<4)
+#define SA_AIF_LDEV_CHANGE             (1<<5)
+#define SA_AIF_BPSTAT_CHANGE           (1<<30)
+#define SA_AIF_BPCFG_CHANGE            (1<<31)
+
+#define HBA_MAX_SG_EMBEDDED            28
+#define HBA_MAX_SG_SEPARATE            90
+#define HBA_SENSE_DATA_LEN_MAX         32
+#define HBA_REQUEST_TAG_ERROR_FLAG     0x00000002
+#define HBA_SGL_FLAGS_EXT              0x80000000UL
+
+struct aac_hba_sgl {
+       u32             addr_lo; /* Lower 32-bits of SGL element address */
+       u32             addr_hi; /* Upper 32-bits of SGL element address */
+       u32             len;    /* Length of SGL element in bytes */
+       u32             flags;  /* SGL element flags */
+};
+
+enum {
+       HBA_IU_TYPE_SCSI_CMD_REQ                = 0x40,
+       HBA_IU_TYPE_SCSI_TM_REQ                 = 0x41,
+       HBA_IU_TYPE_SATA_REQ                    = 0x42,
+       HBA_IU_TYPE_RESP                        = 0x60,
+       HBA_IU_TYPE_COALESCED_RESP              = 0x61,
+       HBA_IU_TYPE_INT_COALESCING_CFG_REQ      = 0x70
+};
+
+enum {
+       HBA_CMD_BYTE1_DATA_DIR_IN               = 0x1,
+       HBA_CMD_BYTE1_DATA_DIR_OUT              = 0x2,
+       HBA_CMD_BYTE1_DATA_TYPE_DDR             = 0x4,
+       HBA_CMD_BYTE1_CRYPTO_ENABLE             = 0x8
+};
+
+enum {
+       HBA_CMD_BYTE1_BITOFF_DATA_DIR_IN        = 0x0,
+       HBA_CMD_BYTE1_BITOFF_DATA_DIR_OUT,
+       HBA_CMD_BYTE1_BITOFF_DATA_TYPE_DDR,
+       HBA_CMD_BYTE1_BITOFF_CRYPTO_ENABLE
+};
+
+enum {
+       HBA_RESP_DATAPRES_NO_DATA               = 0x0,
+       HBA_RESP_DATAPRES_RESPONSE_DATA,
+       HBA_RESP_DATAPRES_SENSE_DATA
+};
+
+enum {
+       HBA_RESP_SVCRES_TASK_COMPLETE           = 0x0,
+       HBA_RESP_SVCRES_FAILURE,
+       HBA_RESP_SVCRES_TMF_COMPLETE,
+       HBA_RESP_SVCRES_TMF_SUCCEEDED,
+       HBA_RESP_SVCRES_TMF_REJECTED,
+       HBA_RESP_SVCRES_TMF_LUN_INVALID
+};
+
+enum {
+       HBA_RESP_STAT_IO_ERROR                  = 0x1,
+       HBA_RESP_STAT_IO_ABORTED,
+       HBA_RESP_STAT_NO_PATH_TO_DEVICE,
+       HBA_RESP_STAT_INVALID_DEVICE,
+       HBA_RESP_STAT_HBAMODE_DISABLED          = 0xE,
+       HBA_RESP_STAT_UNDERRUN                  = 0x51,
+       HBA_RESP_STAT_OVERRUN                   = 0x75
+};
+
+struct aac_hba_cmd_req {
+       u8      iu_type;        /* HBA information unit type */
+       /*
+        * byte1:
+        * [1:0] DIR - 0=No data, 0x1 = IN, 0x2 = OUT
+        * [2]   TYPE - 0=PCI, 1=DDR
+        * [3]   CRYPTO_ENABLE - 0=Crypto disabled, 1=Crypto enabled
+        */
+       u8      byte1;
+       u8      reply_qid;      /* Host reply queue to post response to */
+       u8      reserved1;
+       __le32  it_nexus;       /* Device handle for the request */
+       __le32  request_id;     /* Sender context */
+       /* Lower 32-bits of tweak value for crypto enabled IOs */
+       __le32  tweak_value_lo;
+       u8      cdb[16];        /* SCSI CDB of the command */
+       u8      lun[8];         /* SCSI LUN of the command */
+
+       /* Total data length in bytes to be read/written (if any) */
+       __le32  data_length;
+
+       /* [2:0] Task Attribute, [6:3] Command Priority */
+       u8      attr_prio;
+
+       /* Number of SGL elements embedded in the HBA req */
+       u8      emb_data_desc_count;
+
+       __le16  dek_index;      /* DEK index for crypto enabled IOs */
+
+       /* Lower 32-bits of reserved error data target location on the host */
+       __le32  error_ptr_lo;
+
+       /* Upper 32-bits of reserved error data target location on the host */
+       __le32  error_ptr_hi;
+
+       /* Length of reserved error data area on the host in bytes */
+       __le32  error_length;
+
+       /* Upper 32-bits of tweak value for crypto enabled IOs */
+       __le32  tweak_value_hi;
+
+       struct aac_hba_sgl sge[HBA_MAX_SG_SEPARATE+2]; /* SG list space */
+
+       /*
+        * structure must not exceed
+        * AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE
+        */
+};
+
+/* Task Management Functions (TMF) */
+#define HBA_TMF_ABORT_TASK     0x01
+#define HBA_TMF_LUN_RESET      0x08
+
+struct aac_hba_tm_req {
+       u8      iu_type;        /* HBA information unit type */
+       u8      reply_qid;      /* Host reply queue to post response to */
+       u8      tmf;            /* Task management function */
+       u8      reserved1;
+
+       __le32  it_nexus;       /* Device handle for the command */
+
+       u8      lun[8];         /* SCSI LUN */
+
+       /* Used to hold sender context. */
+       __le32  request_id;     /* Sender context */
+       __le32  reserved2;
+
+       /* Request identifier of managed task */
+       __le32  managed_request_id;     /* Sender context being managed */
+       __le32  reserved3;
+
+       /* Lower 32-bits of reserved error data target location on the host */
+       __le32  error_ptr_lo;
+       /* Upper 32-bits of reserved error data target location on the host */
+       __le32  error_ptr_hi;
+       /* Length of reserved error data area on the host in bytes */
+       __le32  error_length;
+};
+
+struct aac_hba_reset_req {
+       u8      iu_type;        /* HBA information unit type */
+       /* 0 - reset specified device, 1 - reset all devices */
+       u8      reset_type;
+       u8      reply_qid;      /* Host reply queue to post response to */
+       u8      reserved1;
+
+       __le32  it_nexus;       /* Device handle for the command */
+       __le32  request_id;     /* Sender context */
+       /* Lower 32-bits of reserved error data target location on the host */
+       __le32  error_ptr_lo;
+       /* Upper 32-bits of reserved error data target location on the host */
+       __le32  error_ptr_hi;
+       /* Length of reserved error data area on the host in bytes */
+       __le32  error_length;
+};
+
+struct aac_hba_resp {
+       u8      iu_type;                /* HBA information unit type */
+       u8      reserved1[3];
+       __le32  request_identifier;     /* sender context */
+       __le32  reserved2;
+       u8      service_response;       /* SCSI service response */
+       u8      status;                 /* SCSI status */
+       u8      datapres;       /* [1:0] - data present, [7:2] - reserved */
+       u8      sense_response_data_len;        /* Sense/response data length */
+       __le32  residual_count;         /* Residual data length in bytes */
+       /* Sense/response data */
+       u8      sense_response_buf[HBA_SENSE_DATA_LEN_MAX];
+};
+
+struct aac_native_hba {
+       union {
+               struct aac_hba_cmd_req cmd;
+               struct aac_hba_tm_req tmr;
+               u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE];
+       } cmd;
+       union {
+               struct aac_hba_resp err;
+               u8 resp_bytes[FW_ERROR_BUFFER_SIZE];
+       } resp;
+};
+
+#define CISS_REPORT_PHYSICAL_LUNS      0xc3
+#define WRITE_HOST_WELLNESS            0xa5
+#define CISS_IDENTIFY_PHYSICAL_DEVICE  0x15
+#define BMIC_IN                        0x26
+#define BMIC_OUT                       0x27
+
+struct aac_ciss_phys_luns_resp {
+       u8      list_length[4];         /* LUN list length (N-7, big endian) */
+       u8      resp_flag;              /* extended response_flag */
+       u8      reserved[3];
+       struct _ciss_lun {
+               u8      tid[3];         /* Target ID */
+               u8      bus;            /* Bus, flag (bits 6,7) */
+               u8      level3[2];
+               u8      level2[2];
+               u8      node_ident[16]; /* phys. node identifier */
+       } lun[1];                       /* List of phys. devices */
+};
+
+/*
+ * Interrupts
+ */
+#define AAC_MAX_HRRQ           64
+
+struct aac_ciss_identify_pd {
+       u8 scsi_bus;                    /* SCSI Bus number on controller */
+       u8 scsi_id;                     /* SCSI ID on this bus */
+       u16 block_size;                 /* sector size in bytes */
+       u32 total_blocks;               /* number for sectors on drive */
+       u32 reserved_blocks;            /* controller reserved (RIS) */
+       u8 model[40];                   /* Physical Drive Model */
+       u8 serial_number[40];           /* Drive Serial Number */
+       u8 firmware_revision[8];        /* drive firmware revision */
+       u8 scsi_inquiry_bits;           /* inquiry byte 7 bits */
+       u8 compaq_drive_stamp;          /* 0 means drive not stamped */
+       u8 last_failure_reason;
+
+       u8  flags;
+       u8  more_flags;
+       u8  scsi_lun;                   /* SCSI LUN for phys drive */
+       u8  yet_more_flags;
+       u8  even_more_flags;
+       u32 spi_speed_rules;            /* SPI Speed :Ultra disable diagnose */
+       u8  phys_connector[2];          /* connector number on controller */
+       u8  phys_box_on_bus;            /* phys enclosure this drive resides */
+       u8  phys_bay_in_box;            /* phys drv bay this drive resides */
+       u32 rpm;                        /* Drive rotational speed in rpm */
+       u8  device_type;                /* type of drive */
+       u8  sata_version;               /* only valid when drive_type is SATA */
+       u64 big_total_block_count;
+       u64 ris_starting_lba;
+       u32 ris_size;
+       u8  wwid[20];
+       u8  controller_phy_map[32];
+       u16 phy_count;
+       u8  phy_connected_dev_type[256];
+       u8  phy_to_drive_bay_num[256];
+       u16 phy_to_attached_dev_index[256];
+       u8  box_index;
+       u8  spitfire_support;
+       u16 extra_physical_drive_flags;
+       u8  negotiated_link_rate[256];
+       u8  phy_to_phy_map[256];
+       u8  redundant_path_present_map;
+       u8  redundant_path_failure_map;
+       u8  active_path_number;
+       u16 alternate_paths_phys_connector[8];
+       u8  alternate_paths_phys_box_on_port[8];
+       u8  multi_lun_device_lun_count;
+       u8  minimum_good_fw_revision[8];
+       u8  unique_inquiry_bytes[20];
+       u8  current_temperature_degreesC;
+       u8  temperature_threshold_degreesC;
+       u8  max_temperature_degreesC;
+       u8  logical_blocks_per_phys_block_exp;  /* phyblocksize = 512 * 2^exp */
+       u16 current_queue_depth_limit;
+       u8  switch_name[10];
+       u16 switch_port;
+       u8  alternate_paths_switch_name[40];
+       u8  alternate_paths_switch_port[8];
+       u16 power_on_hours;             /* valid only if gas gauge supported */
+       u16 percent_endurance_used;     /* valid only if gas gauge supported. */
+       u8  drive_authentication;
+       u8  smart_carrier_authentication;
+       u8  smart_carrier_app_fw_version;
+       u8  smart_carrier_bootloader_fw_version;
+       u8  SanitizeSecureEraseSupport;
+       u8  DriveKeyFlags;
+       u8  encryption_key_name[64];
+       u32 misc_drive_flags;
+       u16 dek_index;
+       u16 drive_encryption_flags;
+       u8  sanitize_maximum_time[6];
+       u8  connector_info_mode;
+       u8  connector_info_number[4];
+       u8  long_connector_name[64];
+       u8  device_unique_identifier[16];
+       u8  padto_2K[17];
+} __packed;
+
 /*
  * These macros convert from physical channels to virtual channels
  */
@@ -86,6 +418,7 @@ enum {
 #define CONTAINER_TO_CHANNEL(cont)     (CONTAINER_CHANNEL)
 #define CONTAINER_TO_ID(cont)          (cont)
 #define CONTAINER_TO_LUN(cont)         (0)
+#define ENCLOSURE_CHANNEL              (3)
 
 #define PMC_DEVICE_S6  0x28b
 #define PMC_DEVICE_S7  0x28c
@@ -351,10 +684,10 @@ enum aac_queue_types {
 
 /* transport FIB header (PMC) */
 struct aac_fib_xporthdr {
-       u64     HostAddress;    /* FIB host address w/o xport header */
-       u32     Size;           /* FIB size excluding xport header */
-       u32     Handle;         /* driver handle to reference the FIB */
-       u64     Reserved[2];
+       __le64  HostAddress;    /* FIB host address w/o xport header */
+       __le32  Size;           /* FIB size excluding xport header */
+       __le32  Handle;         /* driver handle to reference the FIB */
+       __le64  Reserved[2];
 };
 
 #define                ALIGN32         32
@@ -379,7 +712,7 @@ struct aac_fibhdr {
                __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
                __le32 TimeStamp;       /* otherwise timestamp for FW internal use */
        } u;
-       u32 Handle;             /* FIB handle used for MSGU commnunication */
+       __le32 Handle;          /* FIB handle used for MSGU commnunication */
        u32 Previous;           /* FW internal use */
        u32 Next;               /* FW internal use */
 };
@@ -489,41 +822,64 @@ enum fib_xfer_state {
 #define ADAPTER_INIT_STRUCT_REVISION_4         4 // rocket science
 #define ADAPTER_INIT_STRUCT_REVISION_6         6 /* PMC src */
 #define ADAPTER_INIT_STRUCT_REVISION_7         7 /* Denali */
+#define ADAPTER_INIT_STRUCT_REVISION_8         8 // Thor
 
-struct aac_init
+union aac_init
 {
-       __le32  InitStructRevision;
-       __le32  Sa_MSIXVectors;
-       __le32  fsrev;
-       __le32  CommHeaderAddress;
-       __le32  FastIoCommAreaAddress;
-       __le32  AdapterFibsPhysicalAddress;
-       __le32  AdapterFibsVirtualAddress;
-       __le32  AdapterFibsSize;
-       __le32  AdapterFibAlign;
-       __le32  printfbuf;
-       __le32  printfbufsiz;
-       __le32  HostPhysMemPages;   /* number of 4k pages of host
-                                      physical memory */
-       __le32  HostElapsedSeconds; /* number of seconds since 1970. */
-       /*
-        * ADAPTER_INIT_STRUCT_REVISION_4 begins here
-        */
-       __le32  InitFlags;      /* flags for supported features */
+       struct _r7 {
+               __le32  init_struct_revision;
+               __le32  no_of_msix_vectors;
+               __le32  fsrev;
+               __le32  comm_header_address;
+               __le32  fast_io_comm_area_address;
+               __le32  adapter_fibs_physical_address;
+               __le32  adapter_fibs_virtual_address;
+               __le32  adapter_fibs_size;
+               __le32  adapter_fib_align;
+               __le32  printfbuf;
+               __le32  printfbufsiz;
+               /* number of 4k pages of host phys. mem. */
+               __le32  host_phys_mem_pages;
+               /* number of seconds since 1970. */
+               __le32  host_elapsed_seconds;
+               /* ADAPTER_INIT_STRUCT_REVISION_4 begins here */
+               __le32  init_flags;     /* flags for supported features */
 #define INITFLAGS_NEW_COMM_SUPPORTED   0x00000001
 #define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
 #define INITFLAGS_DRIVER_SUPPORTS_PM   0x00000020
 #define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED     0x00000040
 #define INITFLAGS_FAST_JBOD_SUPPORTED  0x00000080
 #define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED     0x00000100
-       __le32  MaxIoCommands;  /* max outstanding commands */
-       __le32  MaxIoSize;      /* largest I/O command */
-       __le32  MaxFibSize;     /* largest FIB to adapter */
-       /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
-       __le32  MaxNumAif;      /* max number of aif */
-       /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
-       __le32  HostRRQ_AddrLow;
-       __le32  HostRRQ_AddrHigh;       /* Host RRQ (response queue) for SRC */
+#define INITFLAGS_DRIVER_SUPPORTS_HBA_MODE  0x00000400
+               __le32  max_io_commands;        /* max outstanding commands */
+               __le32  max_io_size;    /* largest I/O command */
+               __le32  max_fib_size;   /* largest FIB to adapter */
+               /* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+               __le32  max_num_aif;    /* max number of aif */
+               /* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+               /* Host RRQ (response queue) for SRC */
+               __le32  host_rrq_addr_low;
+               __le32  host_rrq_addr_high;
+       } r7;
+       struct _r8 {
+               /* ADAPTER_INIT_STRUCT_REVISION_8 */
+               __le32  init_struct_revision;
+               __le32  rr_queue_count;
+               __le32  host_elapsed_seconds; /* number of secs since 1970. */
+               __le32  init_flags;
+               __le32  max_io_size;    /* largest I/O command */
+               __le32  max_num_aif;    /* max number of aif */
+               __le32  reserved1;
+               __le32  reserved2;
+               struct _rrq {
+                       __le32  host_addr_low;
+                       __le32  host_addr_high;
+                       __le16  msix_id;
+                       __le16  element_count;
+                       __le16  comp_thresh;
+                       __le16  unused;
+               } rrq[1];               /* up to 64 RRQ addresses */
+       } r8;
 };
 
 enum aac_log_level {
@@ -554,7 +910,7 @@ struct adapter_ops
        void (*adapter_enable_int)(struct aac_dev *dev);
        int  (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
        int  (*adapter_check_health)(struct aac_dev *dev);
-       int  (*adapter_restart)(struct aac_dev *dev, int bled);
+       int  (*adapter_restart)(struct aac_dev *dev, int bled, u8 reset_type);
        void (*adapter_start)(struct aac_dev *dev);
        /* Transport operations */
        int  (*adapter_ioremap)(struct aac_dev * dev, u32 size);
@@ -727,6 +1083,7 @@ struct sa_registers {
 
 
 #define SA_INIT_NUM_MSIXVECTORS                1
+#define SA_MINIPORT_REVISION           SA_INIT_NUM_MSIXVECTORS
 
 #define sa_readw(AEP, CSR)             readl(&((AEP)->regs.sa->CSR))
 #define sa_readl(AEP, CSR)             readl(&((AEP)->regs.sa->CSR))
@@ -820,32 +1177,37 @@ struct rkt_registers {
 #define src_inbound rx_inbound
 
 struct src_mu_registers {
-                               /*      PCI*| Name */
-       __le32  reserved0[6];   /*      00h | Reserved */
-       __le32  IOAR[2];        /*      18h | IOA->host interrupt register */
-       __le32  IDR;            /*      20h | Inbound Doorbell Register */
-       __le32  IISR;           /*      24h | Inbound Int. Status Register */
-       __le32  reserved1[3];   /*      28h | Reserved */
-       __le32  OIMR;           /*      34h | Outbound Int. Mask Register */
-       __le32  reserved2[25];  /*      38h | Reserved */
-       __le32  ODR_R;          /*      9ch | Outbound Doorbell Read */
-       __le32  ODR_C;          /*      a0h | Outbound Doorbell Clear */
-       __le32  reserved3[6];   /*      a4h | Reserved */
-       __le32  OMR;            /*      bch | Outbound Message Register */
+                               /*  PCI*| Name */
+       __le32  reserved0[6];   /*  00h | Reserved */
+       __le32  IOAR[2];        /*  18h | IOA->host interrupt register */
+       __le32  IDR;            /*  20h | Inbound Doorbell Register */
+       __le32  IISR;           /*  24h | Inbound Int. Status Register */
+       __le32  reserved1[3];   /*  28h | Reserved */
+       __le32  OIMR;           /*  34h | Outbound Int. Mask Register */
+       __le32  reserved2[25];  /*  38h | Reserved */
+       __le32  ODR_R;          /*  9ch | Outbound Doorbell Read */
+       __le32  ODR_C;          /*  a0h | Outbound Doorbell Clear */
+       __le32  reserved3[3];   /*  a4h | Reserved */
+       __le32  SCR0;           /*  b0h | Scratchpad 0 */
+       __le32  reserved4[2];   /*  b4h | Reserved */
+       __le32  OMR;            /*  bch | Outbound Message Register */
        __le32  IQ_L;           /*  c0h | Inbound Queue (Low address) */
        __le32  IQ_H;           /*  c4h | Inbound Queue (High address) */
        __le32  ODR_MSI;        /*  c8h | MSI register for sync./AIF */
+       __le32  reserved5;      /*  cch | Reserved */
+       __le32  IQN_L;          /*  d0h | Inbound (native cmd) low  */
+       __le32  IQN_H;          /*  d4h | Inbound (native cmd) high */
 };
 
 struct src_registers {
        struct src_mu_registers MUnit;  /* 00h - cbh */
        union {
                struct {
-                       __le32 reserved1[130789];       /* cch - 7fc5fh */
+                       __le32 reserved1[130786];       /* d8h - 7fc5fh */
                        struct src_inbound IndexRegs;   /* 7fc60h */
                } tupelo;
                struct {
-                       __le32 reserved1[973];          /* cch - fffh */
+                       __le32 reserved1[970];          /* d8h - fffh */
                        struct src_inbound IndexRegs;   /* 1000h */
                } denali;
        } u;
@@ -930,6 +1292,7 @@ struct fsa_dev_info {
        char            devname[8];
        struct sense_data sense_data;
        u32             block_size;
+       u8              identifier[16];
 };
 
 struct fib {
@@ -958,8 +1321,30 @@ struct fib {
        struct list_head        fiblink;
        void                    *data;
        u32                     vector_no;
-       struct hw_fib           *hw_fib_va;             /* Actual shared object */
-       dma_addr_t              hw_fib_pa;              /* physical address of hw_fib*/
+       struct hw_fib           *hw_fib_va;     /* also used for native */
+       dma_addr_t              hw_fib_pa;      /* physical address of hw_fib*/
+       dma_addr_t              hw_sgl_pa;      /* extra sgl for native */
+       dma_addr_t              hw_error_pa;    /* error buffer for native */
+       u32                     hbacmd_size;    /* cmd size for native */
+};
+
+#define AAC_INIT                       0
+#define AAC_RESCAN                     1
+
+#define AAC_DEVTYPE_RAID_MEMBER        1
+#define AAC_DEVTYPE_ARC_RAW            2
+#define AAC_DEVTYPE_NATIVE_RAW         3
+#define AAC_EXPOSE_DISK                0
+#define AAC_HIDE_DISK                  3
+
+struct aac_hba_map_info {
+       __le32  rmw_nexus;              /* nexus for native HBA devices */
+       u8              devtype;        /* device type */
+       u8              new_devtype;
+       u8              reset_state;    /* 0 - no reset, 1..x - */
+                                       /* after xth TM LUN reset */
+       u16             qd_limit;
+       u8              expose;         /*checks if to expose or not*/
 };
 
 /*
@@ -1025,7 +1410,28 @@ struct aac_supplement_adapter_info
        /* StructExpansion == 1 */
        __le32  FeatureBits3;
        __le32  SupportedPerformanceModes;
-       __le32  ReservedForFutureGrowth[80];
+       u8      HostBusType;            /* uses HOST_BUS_TYPE_xxx defines */
+       u8      HostBusWidth;           /* actual width in bits or links */
+       u16     HostBusSpeed;           /* actual bus speed/link rate in MHz */
+       u8      MaxRRCDrives;           /* max. number of ITP-RRC drives/pool */
+       u8      MaxDiskXtasks;          /* max. possible num of DiskX Tasks */
+
+       u8      CpldVerLoaded;
+       u8      CpldVerInFlash;
+
+       __le64  MaxRRCCapacity;
+       __le32  CompiledMaxHistLogLevel;
+       u8      CustomBoardName[12];
+       u16     SupportedCntlrMode;     /* identify supported controller mode */
+       u16     ReservedForFuture16;
+       __le32  SupportedOptions3;      /* reserved for future options */
+
+       __le16  VirtDeviceBus;          /* virt. SCSI device for Thor */
+       __le16  VirtDeviceTarget;
+       __le16  VirtDeviceLUN;
+       __le16  Unused;
+       __le32  ReservedForFutureGrowth[68];
+
 };
 #define AAC_FEATURE_FALCON     cpu_to_le32(0x00000010)
 #define AAC_FEATURE_JBOD       cpu_to_le32(0x08000000)
@@ -1099,11 +1505,21 @@ struct aac_bus_info_response {
 #define AAC_OPT_SUPPLEMENT_ADAPTER_INFO        cpu_to_le32(1<<16)
 #define AAC_OPT_NEW_COMM               cpu_to_le32(1<<17)
 #define AAC_OPT_NEW_COMM_64            cpu_to_le32(1<<18)
+#define AAC_OPT_EXTENDED               cpu_to_le32(1<<23)
+#define AAC_OPT_NATIVE_HBA             cpu_to_le32(1<<25)
 #define AAC_OPT_NEW_COMM_TYPE1         cpu_to_le32(1<<28)
 #define AAC_OPT_NEW_COMM_TYPE2         cpu_to_le32(1<<29)
 #define AAC_OPT_NEW_COMM_TYPE3         cpu_to_le32(1<<30)
 #define AAC_OPT_NEW_COMM_TYPE4         cpu_to_le32(1<<31)
 
+#define AAC_COMM_PRODUCER              0
+#define AAC_COMM_MESSAGE               1
+#define AAC_COMM_MESSAGE_TYPE1         3
+#define AAC_COMM_MESSAGE_TYPE2         4
+#define AAC_COMM_MESSAGE_TYPE3         5
+
+#define AAC_EXTOPT_SA_FIRMWARE         cpu_to_le32(1<<1)
+
 /* MSIX context */
 struct aac_msix_ctx {
        int             vector_no;
@@ -1119,15 +1535,17 @@ struct aac_dev
        /*
         *      negotiated FIB settings
         */
-       unsigned                max_fib_size;
-       unsigned                sg_tablesize;
-       unsigned                max_num_aif;
+       unsigned int            max_fib_size;
+       unsigned int            sg_tablesize;
+       unsigned int            max_num_aif;
+
+       unsigned int            max_cmd_size;   /* max_fib_size or MAX_NATIVE */
 
        /*
         *      Map for 128 fib objects (64k)
         */
-       dma_addr_t              hw_fib_pa;
-       struct hw_fib           *hw_fib_va;
+       dma_addr_t              hw_fib_pa;      /* also used for native cmd */
+       struct hw_fib           *hw_fib_va;     /* also used for native cmd */
        struct hw_fib           *aif_base_va;
        /*
         *      Fib Headers
@@ -1157,21 +1575,23 @@ struct aac_dev
 
        resource_size_t         base_size, dbg_size;    /* Size of
                                                         *  mapped in region */
-
-       struct aac_init         *init;          /* Holds initialization info to communicate with adapter */
+       /*
+        * Holds initialization info
+        * to communicate with adapter
+        */
+       union aac_init          *init;
        dma_addr_t              init_pa;        /* Holds physical address of the init struct */
-
-       u32                     *host_rrq;      /* response queue
-                                                * if AAC_COMM_MESSAGE_TYPE1 */
-
+       /* response queue (if AAC_COMM_MESSAGE_TYPE1) */
+       __le32                  *host_rrq;
        dma_addr_t              host_rrq_pa;    /* phys. address */
        /* index into rrq buffer */
        u32                     host_rrq_idx[AAC_MAX_MSIX];
        atomic_t                rrq_outstanding[AAC_MAX_MSIX];
        u32                     fibs_pushed_no;
        struct pci_dev          *pdev;          /* Our PCI interface */
-       void *                  printfbuf;      /* pointer to buffer used for printf's from the adapter */
-       void *                  comm_addr;      /* Base address of Comm area */
+       /* pointer to buffer used for printf's from the adapter */
+       void                    *printfbuf;
+       void                    *comm_addr;     /* Base address of Comm area */
        dma_addr_t              comm_phys;      /* Physical Address of Comm area */
        size_t                  comm_size;
 
@@ -1227,15 +1647,12 @@ struct aac_dev
        u8                      needs_dac;
        u8                      raid_scsi_mode;
        u8                      comm_interface;
-#      define AAC_COMM_PRODUCER 0
-#      define AAC_COMM_MESSAGE  1
-#      define AAC_COMM_MESSAGE_TYPE1   3
-#      define AAC_COMM_MESSAGE_TYPE2   4
        u8                      raw_io_interface;
        u8                      raw_io_64;
        u8                      printf_enabled;
        u8                      in_reset;
        u8                      msi;
+       u8                      sa_firmware;
        int                     management_fib_count;
        spinlock_t              manage_lock;
        spinlock_t              sync_lock;
@@ -1246,7 +1663,10 @@ struct aac_dev
        u32                     max_msix;       /* max. MSI-X vectors */
        u32                     vector_cap;     /* MSI-X vector capab.*/
        int                     msi_enabled;    /* MSI/MSI-X enabled */
+       atomic_t                msix_counter;
+       struct msix_entry       msixentry[AAC_MAX_MSIX];
        struct aac_msix_ctx     aac_msix[AAC_MAX_MSIX]; /* context */
+       struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
        u8                      adapter_shutdown;
        u32                     handle_pci_error;
 };
@@ -1269,8 +1689,8 @@ struct aac_dev
 #define aac_adapter_check_health(dev) \
        (dev)->a_ops.adapter_check_health(dev)
 
-#define aac_adapter_restart(dev,bled) \
-       (dev)->a_ops.adapter_restart(dev,bled)
+#define aac_adapter_restart(dev, bled, reset_type) \
+       ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
 
 #define aac_adapter_start(dev) \
        ((dev)->a_ops.adapter_start(dev))
@@ -1300,6 +1720,8 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG                       (0x00000002)
 #define FIB_CONTEXT_FLAG_WAIT                  (0x00000004)
 #define FIB_CONTEXT_FLAG_FASTRESP              (0x00000008)
+#define FIB_CONTEXT_FLAG_NATIVE_HBA            (0x00000010)
+#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF        (0x00000020)
 
 /*
  *     Define the command values
@@ -1358,6 +1780,7 @@ struct aac_dev
 #define                ST_IO           5
 #define                ST_NXIO         6
 #define                ST_E2BIG        7
+#define                ST_MEDERR       8
 #define                ST_ACCES        13
 #define                ST_EXIST        17
 #define                ST_XDEV         18
@@ -1715,6 +2138,8 @@ struct aac_fsinfo {
 
 struct  aac_blockdevinfo {
        __le32  block_size;
+       __le32  logical_phys_map;
+       u8      identifier[16];
 };
 
 union aac_contentinfo {
@@ -1940,6 +2365,15 @@ struct revision
 #define FSACTL_FORCE_DELETE_DISK               CTL_CODE(2120, METHOD_NEITHER)
 #define FSACTL_GET_CONTAINERS                  2131
 #define FSACTL_SEND_LARGE_FIB                  CTL_CODE(2138, METHOD_BUFFERED)
+#define FSACTL_RESET_IOP                       CTL_CODE(2140, METHOD_BUFFERED)
+#define FSACTL_GET_HBA_INFO                    CTL_CODE(2150, METHOD_BUFFERED)
+/* flags defined for IOP & HW SOFT RESET */
+#define HW_IOP_RESET                           0x01
+#define HW_SOFT_RESET                          0x02
+#define IOP_HWSOFT_RESET                       (HW_IOP_RESET | HW_SOFT_RESET)
+/* HW Soft Reset register offset */
+#define IBW_SWR_OFFSET                         0x4000
+#define SOFT_RESET_TIME                        60
 
 
 struct aac_common
@@ -1958,6 +2392,8 @@ struct aac_common
 #ifdef DBG
        u32 FibsSent;
        u32 FibRecved;
+       u32 NativeSent;
+       u32 NativeRecved;
        u32 NoResponseSent;
        u32 NoResponseRecved;
        u32 AsyncSent;
@@ -1969,6 +2405,56 @@ struct aac_common
 
 extern struct aac_common aac_config;
 
+/*
+ * This is for management ioctl purpose only.
+ */
+struct aac_hba_info {
+
+       u8      driver_name[50];
+       u8      adapter_number;
+       u8      system_io_bus_number;
+       u8      device_number;
+       u32     function_number;
+       u32     vendor_id;
+       u32     device_id;
+       u32     sub_vendor_id;
+       u32     sub_system_id;
+       u32     mapped_base_address_size;
+       u32     base_physical_address_high_part;
+       u32     base_physical_address_low_part;
+
+       u32     max_command_size;
+       u32     max_fib_size;
+       u32     max_scatter_gather_from_os;
+       u32     max_scatter_gather_to_fw;
+       u32     max_outstanding_fibs;
+
+       u32     queue_start_threshold;
+       u32     queue_dump_threshold;
+       u32     max_io_size_queued;
+       u32     outstanding_io;
+
+       u32     firmware_build_number;
+       u32     bios_build_number;
+       u32     driver_build_number;
+       u32     serial_number_high_part;
+       u32     serial_number_low_part;
+       u32     supported_options;
+       u32     feature_bits;
+       u32     currentnumber_ports;
+
+       u8      new_comm_interface:1;
+       u8      new_commands_supported:1;
+       u8      disable_passthrough:1;
+       u8      expose_non_dasd:1;
+       u8      queue_allowed:1;
+       u8      bled_check_enabled:1;
+       u8      reserved1:1;
+       u8      reserted2:1;
+
+       u32     reserved3[10];
+
+};
 
 /*
  *     The following macro is used when sending and receiving FIBs. It is
@@ -2096,9 +2582,10 @@ extern struct aac_common aac_config;
 
 /* PMC NEW COMM: Request the event data */
 #define                AifReqEvent             200
+#define                AifRawDeviceRemove      203     /* RAW device deleted */
+#define                AifNativeDeviceAdd      204     /* native HBA device added */
+#define                AifNativeDeviceRemove   205     /* native HBA device removed */
 
-/* RAW device deleted */
-#define                AifRawDeviceRemove      203
 
 /*
  *     Adapter Initiated FIB command structures. Start with the adapter
@@ -2131,6 +2618,8 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
 
 int aac_acquire_irq(struct aac_dev *dev);
 void aac_free_irq(struct aac_dev *dev);
+int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
+int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
 const char *aac_driverinfo(struct Scsi_Host *);
 void aac_fib_vector_assign(struct aac_dev *dev);
 struct fib *aac_fib_alloc(struct aac_dev *dev);
@@ -2141,9 +2630,12 @@ void aac_fib_free(struct fib * context);
 void aac_fib_init(struct fib * context);
 void aac_printf(struct aac_dev *dev, u32 val);
 int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_hba_send(u8 command, struct fib *context,
+               fib_callback callback, void *ctxt);
 int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
 void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
 int aac_fib_complete(struct fib * context);
+void aac_hba_callback(void *context, struct fib *fibptr);
 #define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
 struct aac_dev *aac_init_adapter(struct aac_dev *dev);
 void aac_src_access_devreg(struct aac_dev *dev, int mode);
@@ -2169,7 +2661,7 @@ unsigned int aac_command_normal(struct aac_queue * q);
 unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
                        int isAif, int isFastResponse,
                        struct hw_fib *aif_fib);
-int aac_reset_adapter(struct aac_dev * dev, int forced);
+int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type);
 int aac_check_health(struct aac_dev * dev);
 int aac_command_thread(void *data);
 int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
@@ -2183,7 +2675,6 @@ int aac_rx_select_comm(struct aac_dev *dev, int comm);
 int aac_rx_deliver_producer(struct fib * fib);
 char * get_container_type(unsigned type);
 extern int numacb;
-extern int acbsize;
 extern char aac_driver_version[];
 extern int startup_timeout;
 extern int aif_timeout;
@@ -2194,3 +2685,4 @@ extern int aac_commit;
 extern int update_interval;
 extern int check_interval;
 extern int aac_check_reset;
+#endif
index e1daff2..614842a 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -477,20 +478,24 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
        struct fib* srbfib;
        int status;
        struct aac_srb *srbcmd = NULL;
+       struct aac_hba_cmd_req *hbacmd = NULL;
        struct user_aac_srb *user_srbcmd = NULL;
        struct user_aac_srb __user *user_srb = arg;
        struct aac_srb_reply __user *user_reply;
-       struct aac_srb_reply* reply;
+       u32 chn;
        u32 fibsize = 0;
        u32 flags = 0;
        s32 rcode = 0;
        u32 data_dir;
-       void __user *sg_user[32];
-       void *sg_list[32];
+       void __user *sg_user[HBA_MAX_SG_EMBEDDED];
+       void *sg_list[HBA_MAX_SG_EMBEDDED];
+       u32 sg_count[HBA_MAX_SG_EMBEDDED];
        u32 sg_indx = 0;
        u32 byte_count = 0;
        u32 actual_fibsize64, actual_fibsize = 0;
        int i;
+       int is_native_device;
+       u64 address;
 
 
        if (dev->in_reset) {
@@ -507,11 +512,6 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
        if (!(srbfib = aac_fib_alloc(dev))) {
                return -ENOMEM;
        }
-       aac_fib_init(srbfib);
-       /* raw_srb FIB is not FastResponseCapable */
-       srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
-
-       srbcmd = (struct aac_srb*) fib_data(srbfib);
 
        memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
        if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
@@ -538,21 +538,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                goto cleanup;
        }
 
-       user_reply = arg+fibsize;
-
        flags = user_srbcmd->flags; /* from user in cpu order */
-       // Fix up srb for endian and force some values
-
-       srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);       // Force this
-       srbcmd->channel  = cpu_to_le32(user_srbcmd->channel);
-       srbcmd->id       = cpu_to_le32(user_srbcmd->id);
-       srbcmd->lun      = cpu_to_le32(user_srbcmd->lun);
-       srbcmd->timeout  = cpu_to_le32(user_srbcmd->timeout);
-       srbcmd->flags    = cpu_to_le32(flags);
-       srbcmd->retry_limit = 0; // Obsolete parameter
-       srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
-       memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
-
        switch (flags & (SRB_DataIn | SRB_DataOut)) {
        case SRB_DataOut:
                data_dir = DMA_TO_DEVICE;
@@ -568,7 +554,12 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
        }
        if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
                dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
-                 le32_to_cpu(srbcmd->sg.count)));
+                       user_srbcmd->sg.count));
+               rcode = -EINVAL;
+               goto cleanup;
+       }
+       if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
+               dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
                rcode = -EINVAL;
                goto cleanup;
        }
@@ -588,13 +579,136 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                rcode = -EINVAL;
                goto cleanup;
        }
-       if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
-               dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
-               rcode = -EINVAL;
-               goto cleanup;
+
+       chn = aac_logical_to_phys(user_srbcmd->channel);
+       if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
+               dev->hba_map[chn][user_srbcmd->id].devtype ==
+               AAC_DEVTYPE_NATIVE_RAW) {
+               is_native_device = 1;
+               hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
+               memset(hbacmd, 0, 96);  /* sizeof(*hbacmd) is not necessary */
+
+               /* iu_type is a parameter of aac_hba_send */
+               switch (data_dir) {
+               case DMA_TO_DEVICE:
+                       hbacmd->byte1 = 2;
+                       break;
+               case DMA_FROM_DEVICE:
+               case DMA_BIDIRECTIONAL:
+                       hbacmd->byte1 = 1;
+                       break;
+               case DMA_NONE:
+               default:
+                       break;
+               }
+               hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
+               hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
+
+               /*
+                * we fill in reply_qid later in aac_src_deliver_message
+                * we fill in iu_type, request_id later in aac_hba_send
+                * we fill in emb_data_desc_count, data_length later
+                * in sg list build
+                */
+
+               memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
+
+               address = (u64)srbfib->hw_error_pa;
+               hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+               hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+               hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+               hbacmd->emb_data_desc_count =
+                                       cpu_to_le32(user_srbcmd->sg.count);
+               srbfib->hbacmd_size = 64 +
+                       user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
+
+       } else {
+               is_native_device = 0;
+               aac_fib_init(srbfib);
+
+               /* raw_srb FIB is not FastResponseCapable */
+               srbfib->hw_fib_va->header.XferState &=
+                       ~cpu_to_le32(FastResponseCapable);
+
+               srbcmd = (struct aac_srb *) fib_data(srbfib);
+
+               // Fix up srb for endian and force some values
+
+               srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
+               srbcmd->channel  = cpu_to_le32(user_srbcmd->channel);
+               srbcmd->id       = cpu_to_le32(user_srbcmd->id);
+               srbcmd->lun      = cpu_to_le32(user_srbcmd->lun);
+               srbcmd->timeout  = cpu_to_le32(user_srbcmd->timeout);
+               srbcmd->flags    = cpu_to_le32(flags);
+               srbcmd->retry_limit = 0; // Obsolete parameter
+               srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
+               memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
        }
+
        byte_count = 0;
-       if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
+       if (is_native_device) {
+               struct user_sgmap *usg32 = &user_srbcmd->sg;
+               struct user_sgmap64 *usg64 =
+                       (struct user_sgmap64 *)&user_srbcmd->sg;
+
+               for (i = 0; i < usg32->count; i++) {
+                       void *p;
+                       u64 addr;
+
+                       sg_count[i] = (actual_fibsize64 == fibsize) ?
+                               usg64->sg[i].count : usg32->sg[i].count;
+                       if (sg_count[i] >
+                               (dev->scsi_host_ptr->max_sectors << 9)) {
+                               pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
+                                       i, sg_count[i],
+                                       dev->scsi_host_ptr->max_sectors << 9);
+                               rcode = -EINVAL;
+                               goto cleanup;
+                       }
+
+                       p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+                       if (!p) {
+                               rcode = -ENOMEM;
+                               goto cleanup;
+                       }
+
+                       if (actual_fibsize64 == fibsize) {
+                               addr = (u64)usg64->sg[i].addr[0];
+                               addr += ((u64)usg64->sg[i].addr[1]) << 32;
+                       } else {
+                               addr = (u64)usg32->sg[i].addr;
+                       }
+
+                       sg_user[i] = (void __user *)(uintptr_t)addr;
+                       sg_list[i] = p; // save so we can clean up later
+                       sg_indx = i;
+
+                       if (flags & SRB_DataOut) {
+                               if (copy_from_user(p, sg_user[i],
+                                       sg_count[i])) {
+                                       rcode = -EFAULT;
+                                       goto cleanup;
+                               }
+                       }
+                       addr = pci_map_single(dev->pdev, p, sg_count[i],
+                                               data_dir);
+                       hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
+                       hbacmd->sge[i].addr_lo = cpu_to_le32(
+                                               (u32)(addr & 0xffffffff));
+                       hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
+                       hbacmd->sge[i].flags = 0;
+                       byte_count += sg_count[i];
+               }
+
+               if (usg32->count > 0)   /* embedded sglist */
+                       hbacmd->sge[usg32->count-1].flags =
+                               cpu_to_le32(0x40000000);
+               hbacmd->data_length = cpu_to_le32(byte_count);
+
+               status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
+                                       NULL, NULL);
+
+       } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
                struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
                struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
 
@@ -606,7 +720,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < upsg->count; i++) {
                                u64 addr;
                                void* p;
-                               if (upsg->sg[i].count >
+
+                               sg_count[i] = upsg->sg[i].count;
+                               if (sg_count[i] >
                                    ((dev->adapter_info.options &
                                     AAC_OPT_NEW_COMM) ?
                                      (dev->scsi_host_ptr->max_sectors << 9) :
@@ -615,10 +731,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                        goto cleanup;
                                }
                                /* Does this really need to be GFP_DMA? */
-                               p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+                               p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
                                if(!p) {
                                        dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
-                                         upsg->sg[i].count,i,upsg->count));
+                                         sg_count[i], i, upsg->count));
                                        rcode = -ENOMEM;
                                        goto cleanup;
                                }
@@ -629,18 +745,20 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                sg_indx = i;
 
                                if (flags & SRB_DataOut) {
-                                       if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+                                       if (copy_from_user(p, sg_user[i],
+                                               sg_count[i])){
                                                dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
                                                rcode = -EFAULT;
                                                goto cleanup;
                                        }
                                }
-                               addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
+                               addr = pci_map_single(dev->pdev, p,
+                                                       sg_count[i], data_dir);
 
                                psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
                                psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
-                               byte_count += upsg->sg[i].count;
-                               psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+                               byte_count += sg_count[i];
+                               psg->sg[i].count = cpu_to_le32(sg_count[i]);
                        }
                } else {
                        struct user_sgmap* usg;
@@ -657,7 +775,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < usg->count; i++) {
                                u64 addr;
                                void* p;
-                               if (usg->sg[i].count >
+
+                               sg_count[i] = usg->sg[i].count;
+                               if (sg_count[i] >
                                    ((dev->adapter_info.options &
                                     AAC_OPT_NEW_COMM) ?
                                      (dev->scsi_host_ptr->max_sectors << 9) :
@@ -667,10 +787,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                        goto cleanup;
                                }
                                /* Does this really need to be GFP_DMA? */
-                               p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
+                               p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
                                if(!p) {
                                        dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
-                                         usg->sg[i].count,i,usg->count));
+                                               sg_count[i], i, usg->count));
                                        kfree(usg);
                                        rcode = -ENOMEM;
                                        goto cleanup;
@@ -680,19 +800,21 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                sg_indx = i;
 
                                if (flags & SRB_DataOut) {
-                                       if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
+                                       if (copy_from_user(p, sg_user[i],
+                                               sg_count[i])) {
                                                kfree (usg);
                                                dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
                                                rcode = -EFAULT;
                                                goto cleanup;
                                        }
                                }
-                               addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+                               addr = pci_map_single(dev->pdev, p,
+                                                       sg_count[i], data_dir);
 
                                psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
                                psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
-                               byte_count += usg->sg[i].count;
-                               psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+                               byte_count += sg_count[i];
+                               psg->sg[i].count = cpu_to_le32(sg_count[i]);
                        }
                        kfree (usg);
                }
@@ -711,7 +833,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        for (i = 0; i < upsg->count; i++) {
                                uintptr_t addr;
                                void* p;
-                               if (usg->sg[i].count >
+
+                               sg_count[i] = usg->sg[i].count;
+                               if (sg_count[i] >
                                    ((dev->adapter_info.options &
                                     AAC_OPT_NEW_COMM) ?
                                      (dev->scsi_host_ptr->max_sectors << 9) :
@@ -720,10 +844,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                        goto cleanup;
                                }
                                /* Does this really need to be GFP_DMA? */
-                               p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
-                               if(!p) {
+                               p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+                               if (!p) {
                                        dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
-                                         usg->sg[i].count,i,usg->count));
+                                               sg_count[i], i, usg->count));
                                        rcode = -ENOMEM;
                                        goto cleanup;
                                }
@@ -734,7 +858,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                sg_indx = i;
 
                                if (flags & SRB_DataOut) {
-                                       if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
+                                       if (copy_from_user(p, sg_user[i],
+                                               sg_count[i])){
                                                dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
                                                rcode = -EFAULT;
                                                goto cleanup;
@@ -744,13 +869,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 
                                psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
                                byte_count += usg->sg[i].count;
-                               psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
+                               psg->sg[i].count = cpu_to_le32(sg_count[i]);
                        }
                } else {
                        for (i = 0; i < upsg->count; i++) {
                                dma_addr_t addr;
                                void* p;
-                               if (upsg->sg[i].count >
+
+                               sg_count[i] = upsg->sg[i].count;
+                               if (sg_count[i] >
                                    ((dev->adapter_info.options &
                                     AAC_OPT_NEW_COMM) ?
                                      (dev->scsi_host_ptr->max_sectors << 9) :
@@ -758,10 +885,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                        rcode = -EINVAL;
                                        goto cleanup;
                                }
-                               p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
+                               p = kmalloc(sg_count[i], GFP_KERNEL);
                                if (!p) {
                                        dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
-                                         upsg->sg[i].count, i, upsg->count));
+                                         sg_count[i], i, upsg->count));
                                        rcode = -ENOMEM;
                                        goto cleanup;
                                }
@@ -770,19 +897,19 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                                sg_indx = i;
 
                                if (flags & SRB_DataOut) {
-                                       if(copy_from_user(p, sg_user[i],
-                                                       upsg->sg[i].count)) {
+                                       if (copy_from_user(p, sg_user[i],
+                                               sg_count[i])) {
                                                dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
                                                rcode = -EFAULT;
                                                goto cleanup;
                                        }
                                }
                                addr = pci_map_single(dev->pdev, p,
-                                       upsg->sg[i].count, data_dir);
+                                       sg_count[i], data_dir);
 
                                psg->sg[i].addr = cpu_to_le32(addr);
-                               byte_count += upsg->sg[i].count;
-                               psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
+                               byte_count += sg_count[i];
+                               psg->sg[i].count = cpu_to_le32(sg_count[i]);
                        }
                }
                srbcmd->count = cpu_to_le32(byte_count);
@@ -792,12 +919,13 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                        psg->count = 0;
                status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
        }
+
        if (status == -ERESTARTSYS) {
                rcode = -ERESTARTSYS;
                goto cleanup;
        }
 
-       if (status != 0){
+       if (status != 0) {
                dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
                rcode = -ENXIO;
                goto cleanup;
@@ -805,11 +933,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
 
        if (flags & SRB_DataIn) {
                for(i = 0 ; i <= sg_indx; i++){
-                       byte_count = le32_to_cpu(
-                         (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
-                             ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
-                             : srbcmd->sg.sg[i].count);
-                       if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
+                       if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
                                dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
                                rcode = -EFAULT;
                                goto cleanup;
@@ -818,19 +942,50 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
                }
        }
 
-       reply = (struct aac_srb_reply *) fib_data(srbfib);
-       if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
-               dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
-               rcode = -EFAULT;
-               goto cleanup;
+       user_reply = arg + fibsize;
+       if (is_native_device) {
+               struct aac_hba_resp *err =
+                       &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
+               struct aac_srb_reply reply;
+
+               reply.status = ST_OK;
+               if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+                       /* fast response */
+                       reply.srb_status = SRB_STATUS_SUCCESS;
+                       reply.scsi_status = 0;
+                       reply.data_xfer_length = byte_count;
+               } else {
+                       reply.srb_status = err->service_response;
+                       reply.scsi_status = err->status;
+                       reply.data_xfer_length = byte_count -
+                               le32_to_cpu(err->residual_count);
+                       reply.sense_data_size = err->sense_response_data_len;
+                       memcpy(reply.sense_data, err->sense_response_buf,
+                               AAC_SENSE_BUFFERSIZE);
+               }
+               if (copy_to_user(user_reply, &reply,
+                       sizeof(struct aac_srb_reply))) {
+                       dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
+                       rcode = -EFAULT;
+                       goto cleanup;
+               }
+       } else {
+               struct aac_srb_reply *reply;
+
+               reply = (struct aac_srb_reply *) fib_data(srbfib);
+               if (copy_to_user(user_reply, reply,
+                       sizeof(struct aac_srb_reply))) {
+                       dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
+                       rcode = -EFAULT;
+                       goto cleanup;
+               }
        }
 
 cleanup:
        kfree(user_srbcmd);
-       for(i=0; i <= sg_indx; i++){
-               kfree(sg_list[i]);
-       }
        if (rcode != -ERESTARTSYS) {
+               for (i = 0; i <= sg_indx; i++)
+                       kfree(sg_list[i]);
                aac_fib_complete(srbfib);
                aac_fib_free(srbfib);
        }
@@ -858,6 +1013,44 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
        return 0;
 }
 
+static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
+{
+       struct aac_hba_info hbainfo;
+
+       hbainfo.adapter_number          = (u8) dev->id;
+       hbainfo.system_io_bus_number    = dev->pdev->bus->number;
+       hbainfo.device_number           = (dev->pdev->devfn >> 3);
+       hbainfo.function_number         = (dev->pdev->devfn & 0x0007);
+
+       hbainfo.vendor_id               = dev->pdev->vendor;
+       hbainfo.device_id               = dev->pdev->device;
+       hbainfo.sub_vendor_id           = dev->pdev->subsystem_vendor;
+       hbainfo.sub_system_id           = dev->pdev->subsystem_device;
+
+       if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
+               dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+struct aac_reset_iop {
+       u8      reset_type;
+};
+
+static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
+{
+       struct aac_reset_iop reset;
+       int retval;
+
+       if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
+               return -EFAULT;
+
+       retval = aac_reset_adapter(dev, 0, reset.reset_type);
+       return retval;
+
+}
 
 int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
 {
@@ -901,6 +1094,13 @@ int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
        case FSACTL_GET_PCI_INFO:
                status = aac_get_pci_info(dev,arg);
                break;
+       case FSACTL_GET_HBA_INFO:
+               status = aac_get_hba_info(dev, arg);
+               break;
+       case FSACTL_RESET_IOP:
+               status = aac_send_reset_adapter(dev, arg);
+               break;
+
        default:
                status = -ENOTTY;
                break;
index 5b48bed..40bfc57 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -72,104 +73,175 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
        unsigned long size, align;
        const unsigned long fibsize = dev->max_fib_size;
        const unsigned long printfbufsiz = 256;
-       unsigned long host_rrq_size = 0;
-       struct aac_init *init;
+       unsigned long host_rrq_size, aac_init_size;
+       union aac_init *init;
        dma_addr_t phys;
        unsigned long aac_max_hostphysmempages;
 
-       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
-           dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
+       if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
+               (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
+               (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
+               !dev->sa_firmware)) {
+               host_rrq_size =
+                       (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)
+                               * sizeof(u32);
+               aac_init_size = sizeof(union aac_init);
+       } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
+               dev->sa_firmware) {
                host_rrq_size = (dev->scsi_host_ptr->can_queue
-                       + AAC_NUM_MGT_FIB) * sizeof(u32);
-       size = fibsize + sizeof(struct aac_init) + commsize +
-                       commalign + printfbufsiz + host_rrq_size;
+                       + AAC_NUM_MGT_FIB) * sizeof(u32)  * AAC_MAX_MSIX;
+               aac_init_size = sizeof(union aac_init) +
+                       (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq);
+       } else {
+               host_rrq_size = 0;
+               aac_init_size = sizeof(union aac_init);
+       }
+       size = fibsize + aac_init_size + commsize + commalign +
+                       printfbufsiz + host_rrq_size;
+
        base = pci_alloc_consistent(dev->pdev, size, &phys);
 
-       if(base == NULL)
-       {
+       if (base == NULL) {
                printk(KERN_ERR "aacraid: unable to create mapping.\n");
                return 0;
        }
+
        dev->comm_addr = (void *)base;
        dev->comm_phys = phys;
        dev->comm_size = size;
-       
-       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
-           dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+
+       if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
+           (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
+           (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) {
                dev->host_rrq = (u32 *)(base + fibsize);
                dev->host_rrq_pa = phys + fibsize;
                memset(dev->host_rrq, 0, host_rrq_size);
        }
 
-       dev->init = (struct aac_init *)(base + fibsize + host_rrq_size);
+       dev->init = (union aac_init *)(base + fibsize + host_rrq_size);
        dev->init_pa = phys + fibsize + host_rrq_size;
 
        init = dev->init;
 
-       init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
-       if (dev->max_fib_size != sizeof(struct hw_fib))
-               init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
-       init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS);
-       init->fsrev = cpu_to_le32(dev->fsrev);
+       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+               int i;
+               u64 addr;
+
+               init->r8.init_struct_revision =
+                       cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8);
+               init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+                                       INITFLAGS_DRIVER_USES_UTC_TIME |
+                                       INITFLAGS_DRIVER_SUPPORTS_PM);
+               init->r8.init_flags |=
+                               cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE);
+               init->r8.rr_queue_count = cpu_to_le32(dev->max_msix);
+               init->r8.max_io_size =
+                       cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+               init->r8.max_num_aif = init->r8.reserved1 =
+                       init->r8.reserved2 = 0;
+
+               for (i = 0; i < dev->max_msix; i++) {
+                       addr = (u64)dev->host_rrq_pa + dev->vector_cap * i *
+                                       sizeof(u32);
+                       init->r8.rrq[i].host_addr_high = cpu_to_le32(
+                                               upper_32_bits(addr));
+                       init->r8.rrq[i].host_addr_low = cpu_to_le32(
+                                               lower_32_bits(addr));
+                       init->r8.rrq[i].msix_id = i;
+                       init->r8.rrq[i].element_count = cpu_to_le16(
+                                       (u16)dev->vector_cap);
+                       init->r8.rrq[i].comp_thresh =
+                                       init->r8.rrq[i].unused = 0;
+               }
 
-       /*
-        *      Adapter Fibs are the first thing allocated so that they
-        *      start page aligned
-        */
-       dev->aif_base_va = (struct hw_fib *)base;
-       
-       init->AdapterFibsVirtualAddress = 0;
-       init->AdapterFibsPhysicalAddress = cpu_to_le32((u32)phys);
-       init->AdapterFibsSize = cpu_to_le32(fibsize);
-       init->AdapterFibAlign = cpu_to_le32(sizeof(struct hw_fib));
-       /*
-        * number of 4k pages of host physical memory. The aacraid fw needs
-        * this number to be less than 4gb worth of pages. New firmware doesn't
-        * have any issues with the mapping system, but older Firmware did, and
-        * had *troubles* dealing with the math overloading past 32 bits, thus
-        * we must limit this field.
-        */
-       aac_max_hostphysmempages = dma_get_required_mask(&dev->pdev->dev) >> 12;
-       if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
-               init->HostPhysMemPages = cpu_to_le32(aac_max_hostphysmempages);
-       else
-               init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
-
-       init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
-               INITFLAGS_DRIVER_SUPPORTS_PM);
-       init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
-       init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
-       init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
-       init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
-
-       if (dev->comm_interface == AAC_COMM_MESSAGE) {
-               init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
-               dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
-       } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
-               init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
-               init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
-                       INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
-               init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
-               init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
-               dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
-       } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
-               init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
-               init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
-                       INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
-               init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
-               init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
-               /* number of MSI-X */
-               init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
-               dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
+               pr_warn("aacraid: Comm Interface type3 enabled\n");
+       } else {
+               init->r7.init_struct_revision =
+                       cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
+               if (dev->max_fib_size != sizeof(struct hw_fib))
+                       init->r7.init_struct_revision =
+                               cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
+               init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION);
+               init->r7.fsrev = cpu_to_le32(dev->fsrev);
+
+               /*
+                *      Adapter Fibs are the first thing allocated so that they
+                *      start page aligned
+                */
+               dev->aif_base_va = (struct hw_fib *)base;
+
+               init->r7.adapter_fibs_virtual_address = 0;
+               init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys);
+               init->r7.adapter_fibs_size = cpu_to_le32(fibsize);
+               init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib));
+
+               /*
+                * number of 4k pages of host physical memory. The aacraid fw
+                * needs this number to be less than 4gb worth of pages. New
+                * firmware doesn't have any issues with the mapping system, but
+                * older Firmware did, and had *troubles* dealing with the math
+                * overloading past 32 bits, thus we must limit this field.
+                */
+               aac_max_hostphysmempages =
+                               dma_get_required_mask(&dev->pdev->dev) >> 12;
+               if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
+                       init->r7.host_phys_mem_pages =
+                                       cpu_to_le32(aac_max_hostphysmempages);
+               else
+                       init->r7.host_phys_mem_pages =
+                                       cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
+
+               init->r7.init_flags =
+                       cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+                       INITFLAGS_DRIVER_SUPPORTS_PM);
+               init->r7.max_io_commands =
+                       cpu_to_le32(dev->scsi_host_ptr->can_queue +
+                                       AAC_NUM_MGT_FIB);
+               init->r7.max_io_size =
+                       cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+               init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size);
+               init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif);
+
+               if (dev->comm_interface == AAC_COMM_MESSAGE) {
+                       init->r7.init_flags |=
+                               cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+                       pr_warn("aacraid: Comm Interface enabled\n");
+               } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+                       init->r7.init_struct_revision =
+                               cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+                       init->r7.init_flags |=
+                               cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+                               INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
+                               INITFLAGS_FAST_JBOD_SUPPORTED);
+                       init->r7.host_rrq_addr_high =
+                               cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
+                       init->r7.host_rrq_addr_low =
+                               cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
+                       pr_warn("aacraid: Comm Interface type1 enabled\n");
+               } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+                       init->r7.init_struct_revision =
+                               cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
+                       init->r7.init_flags |=
+                               cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+                               INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
+                               INITFLAGS_FAST_JBOD_SUPPORTED);
+                       init->r7.host_rrq_addr_high =
+                               cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
+                       init->r7.host_rrq_addr_low =
+                               cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
+                       init->r7.no_of_msix_vectors =
+                               cpu_to_le32(dev->max_msix);
+                       /* must be the COMM_PREFERRED_SETTINGS values */
+                       pr_warn("aacraid: Comm Interface type2 enabled\n");
+               }
        }
 
        /*
         * Increment the base address by the amount already used
         */
-       base = base + fibsize + host_rrq_size + sizeof(struct aac_init);
+       base = base + fibsize + host_rrq_size + aac_init_size;
        phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
-               sizeof(struct aac_init));
+                       aac_init_size);
 
        /*
         *      Align the beginning of Headers to commalign
@@ -181,7 +253,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
         *      Fill in addresses of the Comm Area Headers and Queues
         */
        *commaddr = base;
-       init->CommHeaderAddress = cpu_to_le32((u32)phys);
+       if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
+               init->r7.comm_header_address = cpu_to_le32((u32)phys);
        /*
         *      Increment the base address by the size of the CommArea
         */
@@ -191,12 +264,14 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
         *       Place the Printf buffer area after the Fast I/O comm area.
         */
        dev->printfbuf = (void *)base;
-       init->printfbuf = cpu_to_le32(phys);
-       init->printfbufsiz = cpu_to_le32(printfbufsiz);
+       if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) {
+               init->r7.printfbuf = cpu_to_le32(phys);
+               init->r7.printfbufsiz = cpu_to_le32(printfbufsiz);
+       }
        memset(base, 0, printfbufsiz);
        return 1;
 }
-    
+
 static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
 {
        atomic_set(&q->numpending, 0);
@@ -404,9 +479,13 @@ void aac_define_int_mode(struct aac_dev *dev)
                if (dev->max_msix > msi_count)
                        dev->max_msix = msi_count;
        }
-       dev->vector_cap =
-               (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) /
-               msi_count;
+       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware)
+               dev->vector_cap = dev->scsi_host_ptr->can_queue +
+                               AAC_NUM_MGT_FIB;
+       else
+               dev->vector_cap = (dev->scsi_host_ptr->can_queue +
+                               AAC_NUM_MGT_FIB) / msi_count;
+
 }
 struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 {
@@ -440,30 +519,37 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
 
        if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
                0, 0, 0, 0, 0, 0,
-               status+0, status+1, status+2, status+3, NULL)) &&
-                       (status[0] == 0x00000001)) {
+               status+0, status+1, status+2, status+3, status+4)) &&
+               (status[0] == 0x00000001)) {
                dev->doorbell_mask = status[3];
-               if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
+               if (status[1] & AAC_OPT_NEW_COMM_64)
                        dev->raw_io_64 = 1;
                dev->sync_mode = aac_sync_mode;
                if (dev->a_ops.adapter_comm &&
-                       (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
+                       (status[1] & AAC_OPT_NEW_COMM)) {
                                dev->comm_interface = AAC_COMM_MESSAGE;
                                dev->raw_io_interface = 1;
-                       if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
+                       if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
                                /* driver supports TYPE1 (Tupelo) */
                                dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
-                       } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
-                               /* driver supports TYPE2 (Denali) */
+                       } else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) {
+                               /* driver supports TYPE2 (Denali, Yosemite) */
                                dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
-                       } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
-                                 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
-                               /* driver doesn't TYPE3 and TYPE4 */
-                               /* switch to sync. mode */
+                       } else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) {
+                               /* driver supports TYPE3 (Yosemite, Thor) */
+                               dev->comm_interface = AAC_COMM_MESSAGE_TYPE3;
+                       } else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) {
+                               /* not supported TYPE - switch to sync. mode */
                                dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
                                dev->sync_mode = 1;
                        }
                }
+               if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
+                       (status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
+                       dev->sa_firmware = 1;
+               else
+                       dev->sa_firmware = 0;
+
                if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
                    (status[2] > dev->base_size)) {
                        aac_adapter_ioremap(dev, 0);
@@ -500,61 +586,25 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
                dev->sg_tablesize = status[2] & 0xFFFF;
                if (dev->pdev->device == PMC_DEVICE_S7 ||
                    dev->pdev->device == PMC_DEVICE_S8 ||
-                   dev->pdev->device == PMC_DEVICE_S9)
-                       host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
-                               (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
-               else
-                       host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+                   dev->pdev->device == PMC_DEVICE_S9) {
+                       if (host->can_queue > (status[3] >> 16) -
+                                       AAC_NUM_MGT_FIB)
+                               host->can_queue = (status[3] >> 16) -
+                                       AAC_NUM_MGT_FIB;
+               } else if (host->can_queue > (status[3] & 0xFFFF) -
+                               AAC_NUM_MGT_FIB)
+                       host->can_queue = (status[3] & 0xFFFF) -
+                               AAC_NUM_MGT_FIB;
+
                dev->max_num_aif = status[4] & 0xFFFF;
-               /*
-                *      NOTE:
-                *      All these overrides are based on a fixed internal
-                *      knowledge and understanding of existing adapters,
-                *      acbsize should be set with caution.
-                */
-               if (acbsize == 512) {
-                       host->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
-                       dev->max_fib_size = 512;
-                       dev->sg_tablesize = host->sg_tablesize
-                         = (512 - sizeof(struct aac_fibhdr)
-                           - sizeof(struct aac_write) + sizeof(struct sgentry))
-                            / sizeof(struct sgentry);
-                       host->can_queue = AAC_NUM_IO_FIB;
-               } else if (acbsize == 2048) {
-                       host->max_sectors = 512;
-                       dev->max_fib_size = 2048;
-                       host->sg_tablesize = 65;
-                       dev->sg_tablesize = 81;
-                       host->can_queue = 512 - AAC_NUM_MGT_FIB;
-               } else if (acbsize == 4096) {
-                       host->max_sectors = 1024;
-                       dev->max_fib_size = 4096;
-                       host->sg_tablesize = 129;
-                       dev->sg_tablesize = 166;
-                       host->can_queue = 256 - AAC_NUM_MGT_FIB;
-               } else if (acbsize == 8192) {
-                       host->max_sectors = 2048;
-                       dev->max_fib_size = 8192;
-                       host->sg_tablesize = 257;
-                       dev->sg_tablesize = 337;
-                       host->can_queue = 128 - AAC_NUM_MGT_FIB;
-               } else if (acbsize > 0) {
-                       printk("Illegal acbsize=%d ignored\n", acbsize);
-               }
        }
-       {
-
-               if (numacb > 0) {
-                       if (numacb < host->can_queue)
-                               host->can_queue = numacb;
-                       else
-                               printk("numacb=%d ignored\n", numacb);
-               }
+       if (numacb > 0) {
+               if (numacb < host->can_queue)
+                       host->can_queue = numacb;
+               else
+                       pr_warn("numacb=%d ignored\n", numacb);
        }
 
-       if (host->can_queue > AAC_NUM_IO_FIB)
-               host->can_queue = AAC_NUM_IO_FIB;
-
        if (dev->pdev->device == PMC_DEVICE_S6 ||
            dev->pdev->device == PMC_DEVICE_S7 ||
            dev->pdev->device == PMC_DEVICE_S8 ||
index 9e7551f..969727b 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -43,6 +44,7 @@
 #include <linux/kthread.h>
 #include <linux/interrupt.h>
 #include <linux/semaphore.h>
+#include <linux/bcd.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_device.h>
 
 static int fib_map_alloc(struct aac_dev *dev)
 {
+       if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
+               dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
+       else
+               dev->max_cmd_size = dev->max_fib_size;
+       if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
+               dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
+       } else {
+               dev->max_cmd_size = dev->max_fib_size;
+       }
+
        dprintk((KERN_INFO
          "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
-         dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
+         dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
          AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
        dev->hw_fib_va = pci_alloc_consistent(dev->pdev,
-               (dev->max_fib_size + sizeof(struct aac_fib_xporthdr))
+               (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
                * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
                &dev->hw_fib_pa);
        if (dev->hw_fib_va == NULL)
@@ -83,9 +95,9 @@ static int fib_map_alloc(struct aac_dev *dev)
 
 void aac_fib_map_free(struct aac_dev *dev)
 {
-       if (dev->hw_fib_va && dev->max_fib_size) {
+       if (dev->hw_fib_va && dev->max_cmd_size) {
                pci_free_consistent(dev->pdev,
-               (dev->max_fib_size *
+               (dev->max_cmd_size *
                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
                dev->hw_fib_va, dev->hw_fib_pa);
        }
@@ -129,11 +141,14 @@ int aac_fib_setup(struct aac_dev * dev)
        struct hw_fib *hw_fib;
        dma_addr_t hw_fib_pa;
        int i;
+       u32 max_cmds;
 
        while (((i = fib_map_alloc(dev)) == -ENOMEM)
         && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
-               dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
-               dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
+               max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
+               dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
+               if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
+                       dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
        }
        if (i<0)
                return -ENOMEM;
@@ -144,7 +159,7 @@ int aac_fib_setup(struct aac_dev * dev)
                (hw_fib_pa - dev->hw_fib_pa));
        dev->hw_fib_pa = hw_fib_pa;
        memset(dev->hw_fib_va, 0,
-               (dev->max_fib_size + sizeof(struct aac_fib_xporthdr)) *
+               (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
                (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
 
        /* add Xport header */
@@ -170,12 +185,22 @@ int aac_fib_setup(struct aac_dev * dev)
                sema_init(&fibptr->event_wait, 0);
                spin_lock_init(&fibptr->event_lock);
                hw_fib->header.XferState = cpu_to_le32(0xffffffff);
-               hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
+               hw_fib->header.SenderSize =
+                       cpu_to_le16(dev->max_fib_size); /* ?? max_cmd_size */
                fibptr->hw_fib_pa = hw_fib_pa;
+               fibptr->hw_sgl_pa = hw_fib_pa +
+                       offsetof(struct aac_hba_cmd_req, sge[2]);
+               /*
+                * one element is for the ptr to the separate sg list,
+                * second element for 32 byte alignment
+                */
+               fibptr->hw_error_pa = hw_fib_pa +
+                       offsetof(struct aac_native_hba, resp.resp_bytes[0]);
+
                hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
-                       dev->max_fib_size + sizeof(struct aac_fib_xporthdr));
+                       dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
                hw_fib_pa = hw_fib_pa +
-                       dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+                       dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
        }
 
        /*
@@ -273,7 +298,8 @@ void aac_fib_free(struct fib *fibptr)
        spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
        if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
                aac_config.fib_timeouts++;
-       if (fibptr->hw_fib_va->header.XferState != 0) {
+       if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
+               fibptr->hw_fib_va->header.XferState != 0) {
                printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
                         (void*)fibptr,
                         le32_to_cpu(fibptr->hw_fib_va->header.XferState));
@@ -501,8 +527,15 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
         *      Map the fib into 32bits by using the fib number
         */
 
-       hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
-       hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
+       hw_fib->header.SenderFibAddress =
+               cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
+
+       /* use the same shifted value for handle to be compatible
+        * with the new native hba command handle
+        */
+       hw_fib->header.Handle =
+               cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+
        /*
         *      Set FIB state to indicate where it came from and if we want a
         *      response from the adapter. Also load the command from the
@@ -670,6 +703,82 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                return 0;
 }
 
+int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+               void *callback_data)
+{
+       struct aac_dev *dev = fibptr->dev;
+       int wait;
+       unsigned long flags = 0;
+       unsigned long mflags = 0;
+
+       fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
+       if (callback) {
+               wait = 0;
+               fibptr->callback = callback;
+               fibptr->callback_data = callback_data;
+       } else
+               wait = 1;
+
+
+       if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
+               struct aac_hba_cmd_req *hbacmd =
+                       (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
+
+               hbacmd->iu_type = command;
+               /* bit1 of request_id must be 0 */
+               hbacmd->request_id =
+                       cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+       } else
+               return -EINVAL;
+
+
+       if (wait) {
+               spin_lock_irqsave(&dev->manage_lock, mflags);
+               if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+                       spin_unlock_irqrestore(&dev->manage_lock, mflags);
+                       return -EBUSY;
+               }
+               dev->management_fib_count++;
+               spin_unlock_irqrestore(&dev->manage_lock, mflags);
+               spin_lock_irqsave(&fibptr->event_lock, flags);
+       }
+
+       if (aac_adapter_deliver(fibptr) != 0) {
+               if (wait) {
+                       spin_unlock_irqrestore(&fibptr->event_lock, flags);
+                       spin_lock_irqsave(&dev->manage_lock, mflags);
+                       dev->management_fib_count--;
+                       spin_unlock_irqrestore(&dev->manage_lock, mflags);
+               }
+               return -EBUSY;
+       }
+       FIB_COUNTER_INCREMENT(aac_config.NativeSent);
+
+       if (wait) {
+               spin_unlock_irqrestore(&fibptr->event_lock, flags);
+               /* Only set for first known interruptable command */
+               if (down_interruptible(&fibptr->event_wait)) {
+                       fibptr->done = 2;
+                       up(&fibptr->event_wait);
+               }
+               spin_lock_irqsave(&fibptr->event_lock, flags);
+               if ((fibptr->done == 0) || (fibptr->done == 2)) {
+                       fibptr->done = 2; /* Tell interrupt we aborted */
+                       spin_unlock_irqrestore(&fibptr->event_lock, flags);
+                       return -ERESTARTSYS;
+               }
+               spin_unlock_irqrestore(&fibptr->event_lock, flags);
+               WARN_ON(fibptr->done == 0);
+
+               if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+                       return -ETIMEDOUT;
+
+               return 0;
+       }
+
+       return -EINPROGRESS;
+}
+
 /**
  *     aac_consumer_get        -       get the top of the queue
  *     @dev: Adapter
@@ -761,7 +870,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
        unsigned long qflags;
 
        if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
-           dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+               dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+               dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
                kfree(hw_fib);
                return 0;
        }
@@ -827,11 +937,17 @@ int aac_fib_complete(struct fib *fibptr)
 {
        struct hw_fib * hw_fib = fibptr->hw_fib_va;
 
+       if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+               fib_dealloc(fibptr);
+               return 0;
+       }
+
        /*
-        *      Check for a fib which has already been completed
+        *      Check for a fib which has already been completed or with a
+        *      status wait timeout
         */
 
-       if (hw_fib->header.XferState == 0)
+       if (hw_fib->header.XferState == 0 || fibptr->done == 2)
                return 0;
        /*
         *      If we plan to do anything check the structure type first.
@@ -984,20 +1100,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
                        lun = (container >> 16) & 0xFF;
                        container = (u32)-1;
                        channel = aac_phys_to_logical(channel);
-                       device_config_needed =
-                         (((__le32 *)aifcmd->data)[0] ==
-                           cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD;
-
-                       if (device_config_needed == ADD) {
-                               device = scsi_device_lookup(
-                                       dev->scsi_host_ptr,
-                                       channel, id, lun);
-                               if (device) {
-                                       scsi_remove_device(device);
-                                       scsi_device_put(device);
-                               }
-                       }
+                       device_config_needed = DELETE;
                        break;
+
                /*
                 *      Morph or Expand complete
                 */
@@ -1351,7 +1456,7 @@ retry_next:
        }
 }
 
-static int _aac_reset_adapter(struct aac_dev *aac, int forced)
+static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
 {
        int index, quirks;
        int retval;
@@ -1360,6 +1465,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
        struct scsi_cmnd *command;
        struct scsi_cmnd *command_list;
        int jafo = 0;
+       int bled;
 
        /*
         * Assumptions:
@@ -1384,7 +1490,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced)
         *      If a positive health, means in a known DEAD PANIC
         * state and the adapter could be reset to `try again'.
         */
-       retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
+       bled = forced ? 0 : aac_adapter_check_health(aac);
+       retval = aac_adapter_restart(aac, bled, reset_type);
 
        if (retval)
                goto out;
@@ -1494,11 +1601,12 @@ out:
        return retval;
 }
 
-int aac_reset_adapter(struct aac_dev * aac, int forced)
+int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
 {
        unsigned long flagv = 0;
        int retval;
        struct Scsi_Host * host;
+       int bled;
 
        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
                return -EBUSY;
@@ -1547,7 +1655,9 @@ int aac_reset_adapter(struct aac_dev * aac, int forced)
        if (forced < 2)
                aac_send_shutdown(aac);
        spin_lock_irqsave(host->host_lock, flagv);
-       retval = _aac_reset_adapter(aac, forced ? forced : ((aac_check_reset != 0) && (aac_check_reset != 1)));
+       bled = forced ? forced :
+                       (aac_check_reset != 0 && aac_check_reset != 1);
+       retval = _aac_reset_adapter(aac, bled, reset_type);
        spin_unlock_irqrestore(host->host_lock, flagv);
 
        if ((forced < 2) && (retval == -ENODEV)) {
@@ -1593,6 +1703,7 @@ int aac_check_health(struct aac_dev * aac)
        unsigned long time_now, flagv = 0;
        struct list_head * entry;
        struct Scsi_Host * host;
+       int bled;
 
        /* Extending the scope of fib_lock slightly to protect aac->in_reset */
        if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1710,7 +1821,8 @@ int aac_check_health(struct aac_dev * aac)
        host = aac->scsi_host_ptr;
        if (aac->thread->pid != current->pid)
                spin_lock_irqsave(host->host_lock, flagv);
-       BlinkLED = _aac_reset_adapter(aac, aac_check_reset != 1);
+       bled = aac_check_reset != 1 ? 1 : 0;
+       _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
        if (aac->thread->pid != current->pid)
                spin_unlock_irqrestore(host->host_lock, flagv);
        return BlinkLED;
@@ -1721,6 +1833,552 @@ out:
 }
 
 
+static void aac_resolve_luns(struct aac_dev *dev)
+{
+       int bus, target, channel;
+       struct scsi_device *sdev;
+       u8 devtype;
+       u8 new_devtype;
+
+       for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+               for (target = 0; target < AAC_MAX_TARGETS; target++) {
+
+                       if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL)
+                               continue;
+
+                       if (bus == CONTAINER_CHANNEL)
+                               channel = CONTAINER_CHANNEL;
+                       else
+                               channel = aac_phys_to_logical(bus);
+
+                       devtype = dev->hba_map[bus][target].devtype;
+                       new_devtype = dev->hba_map[bus][target].new_devtype;
+
+                       sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
+                                       target, 0);
+
+                       if (!sdev && devtype)
+                               scsi_add_device(dev->scsi_host_ptr, channel,
+                                               target, 0);
+                       else if (sdev && new_devtype != devtype)
+                               scsi_remove_device(sdev);
+                       else if (sdev && new_devtype == devtype)
+                               scsi_rescan_device(&sdev->sdev_gendev);
+
+                       if (sdev)
+                               scsi_device_put(sdev);
+
+                       dev->hba_map[bus][target].devtype = new_devtype;
+               }
+       }
+}
+
+/**
+ *     aac_handle_sa_aif       Handle a message from the firmware
+ *     @dev: Which adapter this fib is from
+ *     @fibptr: Pointer to fibptr from adapter
+ *
+ *     This routine handles a driver notify fib from the adapter and
+ *     dispatches it to the appropriate routine for handling.
+ */
+static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
+{
+       int i, bus, target, container, rcode = 0;
+       u32 events = 0;
+       struct fib *fib;
+       struct scsi_device *sdev;
+
+       if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
+               events = SA_AIF_HOTPLUG;
+       else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
+               events = SA_AIF_HARDWARE;
+       else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
+               events = SA_AIF_PDEV_CHANGE;
+       else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
+               events = SA_AIF_LDEV_CHANGE;
+       else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
+               events = SA_AIF_BPSTAT_CHANGE;
+       else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
+               events = SA_AIF_BPCFG_CHANGE;
+
+       switch (events) {
+       case SA_AIF_HOTPLUG:
+       case SA_AIF_HARDWARE:
+       case SA_AIF_PDEV_CHANGE:
+       case SA_AIF_LDEV_CHANGE:
+       case SA_AIF_BPCFG_CHANGE:
+
+               fib = aac_fib_alloc(dev);
+               if (!fib) {
+                       pr_err("aac_handle_sa_aif: out of memory\n");
+                       return;
+               }
+               for (bus = 0; bus < AAC_MAX_BUSES; bus++)
+                       for (target = 0; target < AAC_MAX_TARGETS; target++)
+                               dev->hba_map[bus][target].new_devtype = 0;
+
+               rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
+
+               if (rcode != -ERESTARTSYS)
+                       aac_fib_free(fib);
+
+               aac_resolve_luns(dev);
+
+               if (events == SA_AIF_LDEV_CHANGE ||
+                   events == SA_AIF_BPCFG_CHANGE) {
+                       aac_get_containers(dev);
+                       for (container = 0; container <
+                       dev->maximum_num_containers; ++container) {
+                               sdev = scsi_device_lookup(dev->scsi_host_ptr,
+                                               CONTAINER_CHANNEL,
+                                               container, 0);
+                               if (dev->fsa_dev[container].valid && !sdev) {
+                                       scsi_add_device(dev->scsi_host_ptr,
+                                               CONTAINER_CHANNEL,
+                                               container, 0);
+                               } else if (!dev->fsa_dev[container].valid &&
+                                       sdev) {
+                                       scsi_remove_device(sdev);
+                                       scsi_device_put(sdev);
+                               } else if (sdev) {
+                                       scsi_rescan_device(&sdev->sdev_gendev);
+                                       scsi_device_put(sdev);
+                               }
+                       }
+               }
+               break;
+
+       case SA_AIF_BPSTAT_CHANGE:
+               /* currently do nothing */
+               break;
+       }
+
+       for (i = 1; i <= 10; ++i) {
+               events = src_readl(dev, MUnit.IDR);
+               if (events & (1<<23)) {
+                       pr_warn(" AIF not cleared by firmware - %d/%d)\n",
+                               i, 10);
+                       ssleep(1);
+               }
+       }
+}
+
+static int get_fib_count(struct aac_dev *dev)
+{
+       unsigned int num = 0;
+       struct list_head *entry;
+       unsigned long flagv;
+
+       /*
+        * Warning: no sleep allowed while
+        * holding spinlock. We take the estimate
+        * and pre-allocate a set of fibs outside the
+        * lock.
+        */
+       num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
+                       / sizeof(struct hw_fib); /* some extra */
+       spin_lock_irqsave(&dev->fib_lock, flagv);
+       entry = dev->fib_list.next;
+       while (entry != &dev->fib_list) {
+               entry = entry->next;
+               ++num;
+       }
+       spin_unlock_irqrestore(&dev->fib_lock, flagv);
+
+       return num;
+}
+
+static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
+                                               struct fib **fib_pool,
+                                               unsigned int num)
+{
+       struct hw_fib **hw_fib_p;
+       struct fib **fib_p;
+       int rcode = 1;
+
+       hw_fib_p = hw_fib_pool;
+       fib_p = fib_pool;
+       while (hw_fib_p < &hw_fib_pool[num]) {
+               *(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
+               if (!(*(hw_fib_p++))) {
+                       --hw_fib_p;
+                       break;
+               }
+
+               *(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
+               if (!(*(fib_p++))) {
+                       kfree(*(--hw_fib_p));
+                       break;
+               }
+       }
+
+       num = hw_fib_p - hw_fib_pool;
+       if (!num)
+               rcode = 0;
+
+       return rcode;
+}
+
+static void wakeup_fibctx_threads(struct aac_dev *dev,
+                                               struct hw_fib **hw_fib_pool,
+                                               struct fib **fib_pool,
+                                               struct fib *fib,
+                                               struct hw_fib *hw_fib,
+                                               unsigned int num)
+{
+       unsigned long flagv;
+       struct list_head *entry;
+       struct hw_fib **hw_fib_p;
+       struct fib **fib_p;
+       u32 time_now, time_last;
+       struct hw_fib *hw_newfib;
+       struct fib *newfib;
+       struct aac_fib_context *fibctx;
+
+       time_now = jiffies/HZ;
+       spin_lock_irqsave(&dev->fib_lock, flagv);
+       entry = dev->fib_list.next;
+       /*
+        * For each Context that is on the
+        * fibctxList, make a copy of the
+        * fib, and then set the event to wake up the
+        * thread that is waiting for it.
+        */
+
+       hw_fib_p = hw_fib_pool;
+       fib_p = fib_pool;
+       while (entry != &dev->fib_list) {
+               /*
+                * Extract the fibctx
+                */
+               fibctx = list_entry(entry, struct aac_fib_context,
+                               next);
+               /*
+                * Check if the queue is getting
+                * backlogged
+                */
+               if (fibctx->count > 20) {
+                       /*
+                        * It's *not* jiffies folks,
+                        * but jiffies / HZ so do not
+                        * panic ...
+                        */
+                       time_last = fibctx->jiffies;
+                       /*
+                        * Has it been > 2 minutes
+                        * since the last read off
+                        * the queue?
+                        */
+                       if ((time_now - time_last) > aif_timeout) {
+                               entry = entry->next;
+                               aac_close_fib_context(dev, fibctx);
+                               continue;
+                       }
+               }
+               /*
+                * Warning: no sleep allowed while
+                * holding spinlock
+                */
+               if (hw_fib_p >= &hw_fib_pool[num]) {
+                       pr_warn("aifd: didn't allocate NewFib\n");
+                       entry = entry->next;
+                       continue;
+               }
+
+               hw_newfib = *hw_fib_p;
+               *(hw_fib_p++) = NULL;
+               newfib = *fib_p;
+               *(fib_p++) = NULL;
+               /*
+                * Make the copy of the FIB
+                */
+               memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
+               memcpy(newfib, fib, sizeof(struct fib));
+               newfib->hw_fib_va = hw_newfib;
+               /*
+                * Put the FIB onto the
+                * fibctx's fibs
+                */
+               list_add_tail(&newfib->fiblink, &fibctx->fib_list);
+               fibctx->count++;
+               /*
+                * Set the event to wake up the
+                * thread that is waiting.
+                */
+               up(&fibctx->wait_sem);
+
+               entry = entry->next;
+       }
+       /*
+        *      Set the status of this FIB
+        */
+       *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+       aac_fib_adapter_complete(fib, sizeof(u32));
+       spin_unlock_irqrestore(&dev->fib_lock, flagv);
+
+}
+
+static void aac_process_events(struct aac_dev *dev)
+{
+       struct hw_fib *hw_fib;
+       struct fib *fib;
+       unsigned long flags;
+       spinlock_t *t_lock;
+       unsigned int rcode;
+
+       t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+       spin_lock_irqsave(t_lock, flags);
+
+       while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
+               struct list_head *entry;
+               struct aac_aifcmd *aifcmd;
+               unsigned int  num;
+               struct hw_fib **hw_fib_pool, **hw_fib_p;
+               struct fib **fib_pool, **fib_p;
+
+               set_current_state(TASK_RUNNING);
+
+               entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
+               list_del(entry);
+
+               t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+               spin_unlock_irqrestore(t_lock, flags);
+
+               fib = list_entry(entry, struct fib, fiblink);
+               hw_fib = fib->hw_fib_va;
+               if (dev->sa_firmware) {
+                       /* Thor AIF */
+                       aac_handle_sa_aif(dev, fib);
+                       aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+                       continue;
+               }
+               /*
+                *      We will process the FIB here or pass it to a
+                *      worker thread that is TBD. We Really can't
+                *      do anything at this point since we don't have
+                *      anything defined for this thread to do.
+                */
+               memset(fib, 0, sizeof(struct fib));
+               fib->type = FSAFS_NTC_FIB_CONTEXT;
+               fib->size = sizeof(struct fib);
+               fib->hw_fib_va = hw_fib;
+               fib->data = hw_fib->data;
+               fib->dev = dev;
+               /*
+                *      We only handle AifRequest fibs from the adapter.
+                */
+
+               aifcmd = (struct aac_aifcmd *) hw_fib->data;
+               if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
+                       /* Handle Driver Notify Events */
+                       aac_handle_aif(dev, fib);
+                       *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+                       aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+                       goto free_fib;
+               }
+               /*
+                * The u32 here is important and intended. We are using
+                * 32bit wrapping time to fit the adapter field
+                */
+
+               /* Sniff events */
+               if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
+                || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
+                       aac_handle_aif(dev, fib);
+               }
+
+               /*
+                * get number of fibs to process
+                */
+               num = get_fib_count(dev);
+               if (!num)
+                       goto free_fib;
+
+               hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
+                                               GFP_KERNEL);
+               if (!hw_fib_pool)
+                       goto free_fib;
+
+               fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
+               if (!fib_pool)
+                       goto free_hw_fib_pool;
+
+               /*
+                * Fill up fib pointer pools with actual fibs
+                * and hw_fibs
+                */
+               rcode = fillup_pools(dev, hw_fib_pool, fib_pool, num);
+               if (!rcode)
+                       goto free_mem;
+
+               /*
+                * wakeup the thread that is waiting for
+                * the response from fw (ioctl)
+                */
+               wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
+                                                           fib, hw_fib, num);
+
+free_mem:
+               /* Free up the remaining resources */
+               hw_fib_p = hw_fib_pool;
+               fib_p = fib_pool;
+               while (hw_fib_p < &hw_fib_pool[num]) {
+                       kfree(*hw_fib_p);
+                       kfree(*fib_p);
+                       ++fib_p;
+                       ++hw_fib_p;
+               }
+               kfree(fib_pool);
+free_hw_fib_pool:
+               kfree(hw_fib_pool);
+free_fib:
+               kfree(fib);
+               t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+               spin_lock_irqsave(t_lock, flags);
+       }
+       /*
+        *      There are no more AIF's
+        */
+       t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+       spin_unlock_irqrestore(t_lock, flags);
+}
+
+static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
+                                                       u32 datasize)
+{
+       struct aac_srb *srbcmd;
+       struct sgmap64 *sg64;
+       dma_addr_t addr;
+       char *dma_buf;
+       struct fib *fibptr;
+       int ret = -ENOMEM;
+       u32 vbus, vid;
+
+       fibptr = aac_fib_alloc(dev);
+       if (!fibptr)
+               goto out;
+
+       dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr);
+       if (!dma_buf)
+               goto fib_free_out;
+
+       aac_fib_init(fibptr);
+
+       vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
+       vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
+
+       srbcmd = (struct aac_srb *)fib_data(fibptr);
+
+       srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+       srbcmd->channel = cpu_to_le32(vbus);
+       srbcmd->id = cpu_to_le32(vid);
+       srbcmd->lun = 0;
+       srbcmd->flags = cpu_to_le32(SRB_DataOut);
+       srbcmd->timeout = cpu_to_le32(10);
+       srbcmd->retry_limit = 0;
+       srbcmd->cdb_size = cpu_to_le32(12);
+       srbcmd->count = cpu_to_le32(datasize);
+
+       memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+       srbcmd->cdb[0] = BMIC_OUT;
+       srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
+       memcpy(dma_buf, (char *)wellness_str, datasize);
+
+       sg64 = (struct sgmap64 *)&srbcmd->sg;
+       sg64->count = cpu_to_le32(1);
+       sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
+       sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+       sg64->sg[0].count = cpu_to_le32(datasize);
+
+       ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
+                               FsaNormal, 1, 1, NULL, NULL);
+
+       pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr);
+
+       /*
+        * Do not set XferState to zero unless
+        * receives a response from F/W
+        */
+       if (ret >= 0)
+               aac_fib_complete(fibptr);
+
+       /*
+        * FIB should be freed only after
+        * getting the response from the F/W
+        */
+       if (ret != -ERESTARTSYS)
+               goto fib_free_out;
+
+out:
+       return ret;
+fib_free_out:
+       aac_fib_free(fibptr);
+       goto out;
+}
+
+int aac_send_safw_hostttime(struct aac_dev *dev, struct timeval *now)
+{
+       struct tm cur_tm;
+       char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
+       u32 datasize = sizeof(wellness_str);
+       unsigned long local_time;
+       int ret = -ENODEV;
+
+       if (!dev->sa_firmware)
+               goto out;
+
+       local_time = (u32)(now->tv_sec - (sys_tz.tz_minuteswest * 60));
+       time_to_tm(local_time, 0, &cur_tm);
+       cur_tm.tm_mon += 1;
+       cur_tm.tm_year += 1900;
+       wellness_str[8] = bin2bcd(cur_tm.tm_hour);
+       wellness_str[9] = bin2bcd(cur_tm.tm_min);
+       wellness_str[10] = bin2bcd(cur_tm.tm_sec);
+       wellness_str[12] = bin2bcd(cur_tm.tm_mon);
+       wellness_str[13] = bin2bcd(cur_tm.tm_mday);
+       wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
+       wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
+
+       ret = aac_send_wellness_command(dev, wellness_str, datasize);
+
+out:
+       return ret;
+}
+
+int aac_send_hosttime(struct aac_dev *dev, struct timeval *now)
+{
+       int ret = -ENOMEM;
+       struct fib *fibptr;
+       __le32 *info;
+
+       fibptr = aac_fib_alloc(dev);
+       if (!fibptr)
+               goto out;
+
+       aac_fib_init(fibptr);
+       info = (__le32 *)fib_data(fibptr);
+       *info = cpu_to_le32(now->tv_sec);
+       ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
+                                       1, 1, NULL, NULL);
+
+       /*
+        * Do not set XferState to zero unless
+        * receives a response from F/W
+        */
+       if (ret >= 0)
+               aac_fib_complete(fibptr);
+
+       /*
+        * FIB should be freed only after
+        * getting the response from the F/W
+        */
+       if (ret != -ERESTARTSYS)
+               aac_fib_free(fibptr);
+
+out:
+       return ret;
+}
+
 /**
  *     aac_command_thread      -       command processing thread
  *     @dev: Adapter to monitor
@@ -1734,10 +2392,6 @@ out:
 int aac_command_thread(void *data)
 {
        struct aac_dev *dev = data;
-       struct hw_fib *hw_fib, *hw_newfib;
-       struct fib *fib, *newfib;
-       struct aac_fib_context *fibctx;
-       unsigned long flags;
        DECLARE_WAITQUEUE(wait, current);
        unsigned long next_jiffies = jiffies + HZ;
        unsigned long next_check_jiffies = next_jiffies;
@@ -1757,196 +2411,8 @@ int aac_command_thread(void *data)
        set_current_state(TASK_INTERRUPTIBLE);
        dprintk ((KERN_INFO "aac_command_thread start\n"));
        while (1) {
-               spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
-               while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
-                       struct list_head *entry;
-                       struct aac_aifcmd * aifcmd;
-
-                       set_current_state(TASK_RUNNING);
 
-                       entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
-                       list_del(entry);
-
-                       spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
-                       fib = list_entry(entry, struct fib, fiblink);
-                       /*
-                        *      We will process the FIB here or pass it to a
-                        *      worker thread that is TBD. We Really can't
-                        *      do anything at this point since we don't have
-                        *      anything defined for this thread to do.
-                        */
-                       hw_fib = fib->hw_fib_va;
-                       memset(fib, 0, sizeof(struct fib));
-                       fib->type = FSAFS_NTC_FIB_CONTEXT;
-                       fib->size = sizeof(struct fib);
-                       fib->hw_fib_va = hw_fib;
-                       fib->data = hw_fib->data;
-                       fib->dev = dev;
-                       /*
-                        *      We only handle AifRequest fibs from the adapter.
-                        */
-                       aifcmd = (struct aac_aifcmd *) hw_fib->data;
-                       if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
-                               /* Handle Driver Notify Events */
-                               aac_handle_aif(dev, fib);
-                               *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
-                               aac_fib_adapter_complete(fib, (u16)sizeof(u32));
-                       } else {
-                               /* The u32 here is important and intended. We are using
-                                  32bit wrapping time to fit the adapter field */
-
-                               u32 time_now, time_last;
-                               unsigned long flagv;
-                               unsigned num;
-                               struct hw_fib ** hw_fib_pool, ** hw_fib_p;
-                               struct fib ** fib_pool, ** fib_p;
-
-                               /* Sniff events */
-                               if ((aifcmd->command ==
-                                    cpu_to_le32(AifCmdEventNotify)) ||
-                                   (aifcmd->command ==
-                                    cpu_to_le32(AifCmdJobProgress))) {
-                                       aac_handle_aif(dev, fib);
-                               }
-
-                               time_now = jiffies/HZ;
-
-                               /*
-                                * Warning: no sleep allowed while
-                                * holding spinlock. We take the estimate
-                                * and pre-allocate a set of fibs outside the
-                                * lock.
-                                */
-                               num = le32_to_cpu(dev->init->AdapterFibsSize)
-                                   / sizeof(struct hw_fib); /* some extra */
-                               spin_lock_irqsave(&dev->fib_lock, flagv);
-                               entry = dev->fib_list.next;
-                               while (entry != &dev->fib_list) {
-                                       entry = entry->next;
-                                       ++num;
-                               }
-                               spin_unlock_irqrestore(&dev->fib_lock, flagv);
-                               hw_fib_pool = NULL;
-                               fib_pool = NULL;
-                               if (num
-                                && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
-                                && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
-                                       hw_fib_p = hw_fib_pool;
-                                       fib_p = fib_pool;
-                                       while (hw_fib_p < &hw_fib_pool[num]) {
-                                               if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
-                                                       --hw_fib_p;
-                                                       break;
-                                               }
-                                               if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
-                                                       kfree(*(--hw_fib_p));
-                                                       break;
-                                               }
-                                       }
-                                       if ((num = hw_fib_p - hw_fib_pool) == 0) {
-                                               kfree(fib_pool);
-                                               fib_pool = NULL;
-                                               kfree(hw_fib_pool);
-                                               hw_fib_pool = NULL;
-                                       }
-                               } else {
-                                       kfree(hw_fib_pool);
-                                       hw_fib_pool = NULL;
-                               }
-                               spin_lock_irqsave(&dev->fib_lock, flagv);
-                               entry = dev->fib_list.next;
-                               /*
-                                * For each Context that is on the
-                                * fibctxList, make a copy of the
-                                * fib, and then set the event to wake up the
-                                * thread that is waiting for it.
-                                */
-                               hw_fib_p = hw_fib_pool;
-                               fib_p = fib_pool;
-                               while (entry != &dev->fib_list) {
-                                       /*
-                                        * Extract the fibctx
-                                        */
-                                       fibctx = list_entry(entry, struct aac_fib_context, next);
-                                       /*
-                                        * Check if the queue is getting
-                                        * backlogged
-                                        */
-                                       if (fibctx->count > 20)
-                                       {
-                                               /*
-                                                * It's *not* jiffies folks,
-                                                * but jiffies / HZ so do not
-                                                * panic ...
-                                                */
-                                               time_last = fibctx->jiffies;
-                                               /*
-                                                * Has it been > 2 minutes
-                                                * since the last read off
-                                                * the queue?
-                                                */
-                                               if ((time_now - time_last) > aif_timeout) {
-                                                       entry = entry->next;
-                                                       aac_close_fib_context(dev, fibctx);
-                                                       continue;
-                                               }
-                                       }
-                                       /*
-                                        * Warning: no sleep allowed while
-                                        * holding spinlock
-                                        */
-                                       if (hw_fib_p < &hw_fib_pool[num]) {
-                                               hw_newfib = *hw_fib_p;
-                                               *(hw_fib_p++) = NULL;
-                                               newfib = *fib_p;
-                                               *(fib_p++) = NULL;
-                                               /*
-                                                * Make the copy of the FIB
-                                                */
-                                               memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
-                                               memcpy(newfib, fib, sizeof(struct fib));
-                                               newfib->hw_fib_va = hw_newfib;
-                                               /*
-                                                * Put the FIB onto the
-                                                * fibctx's fibs
-                                                */
-                                               list_add_tail(&newfib->fiblink, &fibctx->fib_list);
-                                               fibctx->count++;
-                                               /*
-                                                * Set the event to wake up the
-                                                * thread that is waiting.
-                                                */
-                                               up(&fibctx->wait_sem);
-                                       } else {
-                                               printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
-                                       }
-                                       entry = entry->next;
-                               }
-                               /*
-                                *      Set the status of this FIB
-                                */
-                               *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
-                               aac_fib_adapter_complete(fib, sizeof(u32));
-                               spin_unlock_irqrestore(&dev->fib_lock, flagv);
-                               /* Free up the remaining resources */
-                               hw_fib_p = hw_fib_pool;
-                               fib_p = fib_pool;
-                               while (hw_fib_p < &hw_fib_pool[num]) {
-                                       kfree(*hw_fib_p);
-                                       kfree(*fib_p);
-                                       ++fib_p;
-                                       ++hw_fib_p;
-                               }
-                               kfree(hw_fib_pool);
-                               kfree(fib_pool);
-                       }
-                       kfree(fib);
-                       spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
-               }
-               /*
-                *      There are no more AIF's
-                */
-               spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
+               aac_process_events(dev);
 
                /*
                 *      Background activity
@@ -1968,7 +2434,7 @@ int aac_command_thread(void *data)
 
                        /* Don't even try to talk to adapter if its sick */
                        ret = aac_check_health(dev);
-                       if (!ret && !dev->queues)
+                       if (!dev->queues)
                                break;
                        next_check_jiffies = jiffies
                                           + ((long)(unsigned)check_interval)
@@ -1981,36 +2447,16 @@ int aac_command_thread(void *data)
                                difference = (((1000000 - now.tv_usec) * HZ)
                                  + 500000) / 1000000;
                        else if (ret == 0) {
-                               struct fib *fibptr;
-
-                               if ((fibptr = aac_fib_alloc(dev))) {
-                                       int status;
-                                       __le32 *info;
-
-                                       aac_fib_init(fibptr);
-
-                                       info = (__le32 *) fib_data(fibptr);
-                                       if (now.tv_usec > 500000)
-                                               ++now.tv_sec;
-
-                                       *info = cpu_to_le32(now.tv_sec);
-
-                                       status = aac_fib_send(SendHostTime,
-                                               fibptr,
-                                               sizeof(*info),
-                                               FsaNormal,
-                                               1, 1,
-                                               NULL,
-                                               NULL);
-                                       /* Do not set XferState to zero unless
-                                        * receives a response from F/W */
-                                       if (status >= 0)
-                                               aac_fib_complete(fibptr);
-                                       /* FIB should be freed only after
-                                        * getting the response from the F/W */
-                                       if (status != -ERESTARTSYS)
-                                               aac_fib_free(fibptr);
-                               }
+
+                               if (now.tv_usec > 500000)
+                                       ++now.tv_sec;
+
+                               if (dev->sa_firmware)
+                                       ret =
+                                       aac_send_safw_hostttime(dev, &now);
+                               else
+                                       ret = aac_send_hosttime(dev, &now);
+
                                difference = (long)(unsigned)update_interval*HZ;
                        } else {
                                /* retry shortly */
index 7e83620..417ba34 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -122,7 +123,6 @@ unsigned int aac_response_normal(struct aac_queue * q)
                         *      NOTE:  we cannot touch the fib after this
                         *          call, because it may have been deallocated.
                         */
-                       fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
                        fib->callback(fib->callback_data, fib);
                } else {
                        unsigned long flagv;
@@ -251,8 +251,9 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
        BUG_ON(fibptr == NULL);
        dev = fibptr->dev;
 
-       if (fibptr->hw_fib_va->header.XferState &
-           cpu_to_le32(NoMoreAifDataAvailable)) {
+       if ((fibptr->hw_fib_va->header.XferState &
+           cpu_to_le32(NoMoreAifDataAvailable)) ||
+               dev->sa_firmware) {
                aac_fib_complete(fibptr);
                aac_fib_free(fibptr);
                return;
@@ -282,8 +283,8 @@ static void aac_aif_callback(void *context, struct fib * fibptr)
  *     know there is a response on our normal priority queue. We will pull off
  *     all QE there are and wake up all the waiters before exiting.
  */
-unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
-                       int isAif, int isFastResponse, struct hw_fib *aif_fib)
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
+       int isFastResponse, struct hw_fib *aif_fib)
 {
        unsigned long mflags;
        dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
@@ -305,12 +306,14 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
                        kfree (fib);
                        return 1;
                }
-               if (aif_fib != NULL) {
+               if (dev->sa_firmware) {
+                       fib->hbacmd_size = index;       /* store event type */
+               } else if (aif_fib != NULL) {
                        memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
                } else {
-                       memcpy(hw_fib,
-                               (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
-                               index), sizeof(struct hw_fib));
+                       memcpy(hw_fib, (struct hw_fib *)
+                               (((uintptr_t)(dev->regs.sa)) + index),
+                               sizeof(struct hw_fib));
                }
                INIT_LIST_HEAD(&fib->fiblink);
                fib->type = FSAFS_NTC_FIB_CONTEXT;
@@ -344,7 +347,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
                        (fib_callback)aac_aif_callback, fibctx);
        } else {
                struct fib *fib = &dev->fibs[index];
-               struct hw_fib * hwfib = fib->hw_fib_va;
+               int start_callback = 0;
 
                /*
                 *      Remove this fib from the Outstanding I/O queue.
@@ -362,60 +365,104 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
                        return 0;
                }
 
-               if (isFastResponse) {
-                       /*
-                        *      Doctor the fib
-                        */
-                       *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
-                       hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
-                       fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
-               }
-
                FIB_COUNTER_INCREMENT(aac_config.FibRecved);
 
-               if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
-               {
-                       __le32 *pstatus = (__le32 *)hwfib->data;
-                       if (*pstatus & cpu_to_le32(0xffff0000))
-                               *pstatus = cpu_to_le32(ST_OK);
-               }
-               if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
-               {
-                       if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
-                               FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
-                       else 
-                               FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
-                       /*
-                        *      NOTE:  we cannot touch the fib after this
-                        *          call, because it may have been deallocated.
-                        */
-                       if (likely(fib->callback && fib->callback_data)) {
-                               fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
-                               fib->callback(fib->callback_data, fib);
-                       } else
-                               dev_info(&dev->pdev->dev,
-                               "Invalid callback_fib[%d] (*%p)(%p)\n",
-                               index, fib->callback, fib->callback_data);
+               if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+
+                       if (isFastResponse)
+                               fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+
+                       if (fib->callback) {
+                               start_callback = 1;
+                       } else {
+                               unsigned long flagv;
+                               int complete = 0;
+
+                               dprintk((KERN_INFO "event_wait up\n"));
+                               spin_lock_irqsave(&fib->event_lock, flagv);
+                               if (fib->done == 2) {
+                                       fib->done = 1;
+                                       complete = 1;
+                               } else {
+                                       fib->done = 1;
+                                       up(&fib->event_wait);
+                               }
+                               spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+                               spin_lock_irqsave(&dev->manage_lock, mflags);
+                               dev->management_fib_count--;
+                               spin_unlock_irqrestore(&dev->manage_lock,
+                                       mflags);
+
+                               FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
+                               if (complete)
+                                       aac_fib_complete(fib);
+                       }
                } else {
-                       unsigned long flagv;
-                       dprintk((KERN_INFO "event_wait up\n"));
-                       spin_lock_irqsave(&fib->event_lock, flagv);
-                       if (!fib->done) {
-                               fib->done = 1;
-                               up(&fib->event_wait);
+                       struct hw_fib *hwfib = fib->hw_fib_va;
+
+                       if (isFastResponse) {
+                               /* Doctor the fib */
+                               *(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+                               hwfib->header.XferState |=
+                                       cpu_to_le32(AdapterProcessed);
+                               fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
                        }
-                       spin_unlock_irqrestore(&fib->event_lock, flagv);
 
-                       spin_lock_irqsave(&dev->manage_lock, mflags);
-                       dev->management_fib_count--;
-                       spin_unlock_irqrestore(&dev->manage_lock, mflags);
+                       if (hwfib->header.Command ==
+                               cpu_to_le16(NuFileSystem)) {
+                               __le32 *pstatus = (__le32 *)hwfib->data;
 
-                       FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
-                       if (fib->done == 2) {
+                               if (*pstatus & cpu_to_le32(0xffff0000))
+                                       *pstatus = cpu_to_le32(ST_OK);
+                       }
+                       if (hwfib->header.XferState &
+                               cpu_to_le32(NoResponseExpected | Async)) {
+                               if (hwfib->header.XferState & cpu_to_le32(
+                                       NoResponseExpected))
+                                       FIB_COUNTER_INCREMENT(
+                                               aac_config.NoResponseRecved);
+                               else
+                                       FIB_COUNTER_INCREMENT(
+                                               aac_config.AsyncRecved);
+                               start_callback = 1;
+                       } else {
+                               unsigned long flagv;
+                               int complete = 0;
+
+                               dprintk((KERN_INFO "event_wait up\n"));
                                spin_lock_irqsave(&fib->event_lock, flagv);
-                               fib->done = 0;
+                               if (fib->done == 2) {
+                                       fib->done = 1;
+                                       complete = 1;
+                               } else {
+                                       fib->done = 1;
+                                       up(&fib->event_wait);
+                               }
                                spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+                               spin_lock_irqsave(&dev->manage_lock, mflags);
+                               dev->management_fib_count--;
+                               spin_unlock_irqrestore(&dev->manage_lock,
+                                       mflags);
+
+                               FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+                               if (complete)
+                                       aac_fib_complete(fib);
+                       }
+               }
+
+
+               if (start_callback) {
+                       /*
+                        * NOTE:  we cannot touch the fib after this
+                        *  call, because it may have been deallocated.
+                        */
+                       if (likely(fib->callback && fib->callback_data)) {
+                               fib->callback(fib->callback_data, fib);
+                       } else {
                                aac_fib_complete(fib);
+                               aac_fib_free(fib);
                        }
 
                }
index 3ecbf20..137d22d 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -57,7 +58,7 @@
 
 #include "aacraid.h"
 
-#define AAC_DRIVER_VERSION             "1.2-1"
+#define AAC_DRIVER_VERSION             "1.2.1"
 #ifndef AAC_DRIVER_BRANCH
 #define AAC_DRIVER_BRANCH              ""
 #endif
@@ -401,61 +402,89 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
 static int aac_slave_configure(struct scsi_device *sdev)
 {
        struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+       int chn, tid;
+       unsigned int depth = 0;
+       unsigned int set_timeout = 0;
+
+       chn = aac_logical_to_phys(sdev_channel(sdev));
+       tid = sdev_id(sdev);
+       if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
+               aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+               depth = aac->hba_map[chn][tid].qd_limit;
+               set_timeout = 1;
+               goto common_config;
+       }
+
+
        if (aac->jbod && (sdev->type == TYPE_DISK))
                sdev->removable = 1;
-       if ((sdev->type == TYPE_DISK) &&
-                       (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
-                       (!aac->jbod || sdev->inq_periph_qual) &&
-                       (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+
+       if (sdev->type == TYPE_DISK
+        && sdev_channel(sdev) != CONTAINER_CHANNEL
+        && (!aac->jbod || sdev->inq_periph_qual)
+        && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+
                if (expose_physicals == 0)
                        return -ENXIO;
+
                if (expose_physicals < 0)
                        sdev->no_uld_attach = 1;
        }
-       if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
-                       (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
-                       !sdev->no_uld_attach) {
+
+       if (sdev->tagged_supported
+        &&  sdev->type == TYPE_DISK
+        &&  (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+        && !sdev->no_uld_attach) {
+
                struct scsi_device * dev;
                struct Scsi_Host *host = sdev->host;
                unsigned num_lsu = 0;
                unsigned num_one = 0;
-               unsigned depth;
                unsigned cid;
 
-               /*
-                * Firmware has an individual device recovery time typically
-                * of 35 seconds, give us a margin.
-                */
-               if (sdev->request_queue->rq_timeout < (45 * HZ))
-                       blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+               set_timeout = 1;
+
                for (cid = 0; cid < aac->maximum_num_containers; ++cid)
                        if (aac->fsa_dev[cid].valid)
                                ++num_lsu;
+
                __shost_for_each_device(dev, host) {
-                       if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
-                                       (!aac->raid_scsi_mode ||
-                                               (sdev_channel(sdev) != 2)) &&
-                                       !dev->no_uld_attach) {
+                       if (dev->tagged_supported
+                        && dev->type == TYPE_DISK
+                        && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+                        && !dev->no_uld_attach) {
                                if ((sdev_channel(dev) != CONTAINER_CHANNEL)
-                                || !aac->fsa_dev[sdev_id(dev)].valid)
+                                || !aac->fsa_dev[sdev_id(dev)].valid) {
                                        ++num_lsu;
-                       } else
+                               }
+                       } else {
                                ++num_one;
+                       }
                }
+
                if (num_lsu == 0)
                        ++num_lsu;
-               depth = (host->can_queue - num_one) / num_lsu;
-               if (depth > 256)
-                       depth = 256;
-               else if (depth < 2)
-                       depth = 2;
-               scsi_change_queue_depth(sdev, depth);
-       } else {
-               scsi_change_queue_depth(sdev, 1);
 
-               sdev->tagged_supported = 1;
+               depth = (host->can_queue - num_one) / num_lsu;
        }
 
+common_config:
+       /*
+        * Firmware has an individual device recovery time typically
+        * of 35 seconds, give us a margin.
+        */
+       if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
+               blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+
+       if (depth > 256)
+               depth = 256;
+       else if (depth < 1)
+               depth = 1;
+
+       scsi_change_queue_depth(sdev, depth);
+
+       sdev->tagged_supported = 1;
+
        return 0;
 }
 
@@ -470,6 +499,15 @@ static int aac_slave_configure(struct scsi_device *sdev)
 
 static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
 {
+       struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+       int chn, tid, is_native_device = 0;
+
+       chn = aac_logical_to_phys(sdev_channel(sdev));
+       tid = sdev_id(sdev);
+       if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
+               aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
+               is_native_device = 1;
+
        if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
            (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
                struct scsi_device * dev;
@@ -491,9 +529,12 @@ static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
                else if (depth < 2)
                        depth = 2;
                return scsi_change_queue_depth(sdev, depth);
+       } else if (is_native_device) {
+               scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
+       } else {
+               scsi_change_queue_depth(sdev, 1);
        }
-
-       return scsi_change_queue_depth(sdev, 1);
+       return sdev->queue_depth;
 }
 
 static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -516,8 +557,39 @@ static struct device_attribute aac_raid_level_attr = {
        .show = aac_show_raid_level
 };
 
+static ssize_t aac_show_unique_id(struct device *dev,
+            struct device_attribute *attr, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+       unsigned char sn[16];
+
+       memset(sn, 0, sizeof(sn));
+
+       if (sdev_channel(sdev) == CONTAINER_CHANNEL)
+               memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
+
+       return snprintf(buf, 16 * 2 + 2,
+               "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+               sn[0], sn[1], sn[2], sn[3],
+               sn[4], sn[5], sn[6], sn[7],
+               sn[8], sn[9], sn[10], sn[11],
+               sn[12], sn[13], sn[14], sn[15]);
+}
+
+static struct device_attribute aac_unique_id_attr = {
+       .attr = {
+               .name = "unique_id",
+               .mode = 0444,
+       },
+       .show = aac_show_unique_id
+};
+
+
+
 static struct device_attribute *aac_dev_attrs[] = {
        &aac_raid_level_attr,
+       &aac_unique_id_attr,
        NULL,
 };
 
@@ -534,46 +606,136 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
        struct scsi_device * dev = cmd->device;
        struct Scsi_Host * host = dev->host;
        struct aac_dev * aac = (struct aac_dev *)host->hostdata;
-       int count;
+       int count, found;
+       u32 bus, cid;
        int ret = FAILED;
 
-       printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n",
-               AAC_DRIVERNAME,
-               host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
-       switch (cmd->cmnd[0]) {
-       case SERVICE_ACTION_IN_16:
-               if (!(aac->raw_io_interface) ||
-                   !(aac->raw_io_64) ||
-                   ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
-                       break;
-       case INQUIRY:
-       case READ_CAPACITY:
-               /* Mark associated FIB to not complete, eh handler does this */
+       bus = aac_logical_to_phys(scmd_channel(cmd));
+       cid = scmd_id(cmd);
+       if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+               struct fib *fib;
+               struct aac_hba_tm_req *tmf;
+               int status;
+               u64 address;
+               __le32 managed_request_id;
+
+               pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
+                AAC_DRIVERNAME,
+                host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
+
+               found = 0;
                for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
-                       struct fib * fib = &aac->fibs[count];
-                       if (fib->hw_fib_va->header.XferState &&
-                         (fib->flags & FIB_CONTEXT_FLAG) &&
-                         (fib->callback_data == cmd)) {
-                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
-                               cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+                       fib = &aac->fibs[count];
+                       if (*(u8 *)fib->hw_fib_va != 0 &&
+                               (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
+                               (fib->callback_data == cmd)) {
+                               found = 1;
+                               managed_request_id = ((struct aac_hba_cmd_req *)
+                                       fib->hw_fib_va)->request_id;
+                               break;
+                       }
+               }
+               if (!found)
+                       return ret;
+
+               /* start a HBA_TMF_ABORT_TASK TMF request */
+               fib = aac_fib_alloc(aac);
+               if (!fib)
+                       return ret;
+
+               tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+               memset(tmf, 0, sizeof(*tmf));
+               tmf->tmf = HBA_TMF_ABORT_TASK;
+               tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+               tmf->lun[1] = cmd->device->lun;
+
+               address = (u64)fib->hw_error_pa;
+               tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+               tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+               tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+
+               fib->hbacmd_size = sizeof(*tmf);
+               cmd->SCp.sent_command = 0;
+
+               status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
+                                 (fib_callback) aac_hba_callback,
+                                 (void *) cmd);
+
+               /* Wait up to 2 minutes for completion */
+               for (count = 0; count < 120; ++count) {
+                       if (cmd->SCp.sent_command) {
                                ret = SUCCESS;
+                               break;
                        }
+                       msleep(1000);
                }
-               break;
-       case TEST_UNIT_READY:
-               /* Mark associated FIB to not complete, eh handler does this */
-               for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
-                       struct scsi_cmnd * command;
-                       struct fib * fib = &aac->fibs[count];
-                       if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
-                         (fib->flags & FIB_CONTEXT_FLAG) &&
-                         ((command = fib->callback_data)) &&
-                         (command->device == cmd->device)) {
-                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
-                               command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
-                               if (command == cmd)
+
+               if (ret != SUCCESS)
+                       pr_err("%s: Host adapter abort request timed out\n",
+                       AAC_DRIVERNAME);
+       } else {
+               pr_err(
+                       "%s: Host adapter abort request.\n"
+                       "%s: Outstanding commands on (%d,%d,%d,%d):\n",
+                       AAC_DRIVERNAME, AAC_DRIVERNAME,
+                       host->host_no, sdev_channel(dev), sdev_id(dev),
+                       (int)dev->lun);
+               switch (cmd->cmnd[0]) {
+               case SERVICE_ACTION_IN_16:
+                       if (!(aac->raw_io_interface) ||
+                           !(aac->raw_io_64) ||
+                           ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+                               break;
+               case INQUIRY:
+               case READ_CAPACITY:
+                       /*
+                        * Mark associated FIB to not complete,
+                        * eh handler does this
+                        */
+                       for (count = 0;
+                               count < (host->can_queue + AAC_NUM_MGT_FIB);
+                               ++count) {
+                               struct fib *fib = &aac->fibs[count];
+
+                               if (fib->hw_fib_va->header.XferState &&
+                               (fib->flags & FIB_CONTEXT_FLAG) &&
+                               (fib->callback_data == cmd)) {
+                                       fib->flags |=
+                                               FIB_CONTEXT_FLAG_TIMED_OUT;
+                                       cmd->SCp.phase =
+                                               AAC_OWNER_ERROR_HANDLER;
                                        ret = SUCCESS;
+                               }
+                       }
+                       break;
+               case TEST_UNIT_READY:
+                       /*
+                        * Mark associated FIB to not complete,
+                        * eh handler does this
+                        */
+                       for (count = 0;
+                               count < (host->can_queue + AAC_NUM_MGT_FIB);
+                               ++count) {
+                               struct scsi_cmnd *command;
+                               struct fib *fib = &aac->fibs[count];
+
+                               command = fib->callback_data;
+
+                               if ((fib->hw_fib_va->header.XferState &
+                                       cpu_to_le32
+                                       (Async | NoResponseExpected)) &&
+                                       (fib->flags & FIB_CONTEXT_FLAG) &&
+                                       ((command)) &&
+                                       (command->device == cmd->device)) {
+                                       fib->flags |=
+                                               FIB_CONTEXT_FLAG_TIMED_OUT;
+                                       command->SCp.phase =
+                                               AAC_OWNER_ERROR_HANDLER;
+                                       if (command == cmd)
+                                               ret = SUCCESS;
+                               }
                        }
+                       break;
                }
        }
        return ret;
@@ -588,70 +750,165 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
 {
        struct scsi_device * dev = cmd->device;
        struct Scsi_Host * host = dev->host;
-       struct scsi_cmnd * command;
-       int count;
        struct aac_dev * aac = (struct aac_dev *)host->hostdata;
-       unsigned long flags;
-
-       /* Mark the associated FIB to not complete, eh handler does this */
-       for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
-               struct fib * fib = &aac->fibs[count];
-               if (fib->hw_fib_va->header.XferState &&
-                 (fib->flags & FIB_CONTEXT_FLAG) &&
-                 (fib->callback_data == cmd)) {
-                       fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
-                       cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+       int count;
+       u32 bus, cid;
+       int ret = FAILED;
+
+       bus = aac_logical_to_phys(scmd_channel(cmd));
+       cid = scmd_id(cmd);
+       if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+               aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+               struct fib *fib;
+               int status;
+               u64 address;
+               u8 command;
+
+               pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+                       AAC_DRIVERNAME);
+
+               fib = aac_fib_alloc(aac);
+               if (!fib)
+                       return ret;
+
+
+               if (aac->hba_map[bus][cid].reset_state == 0) {
+                       struct aac_hba_tm_req *tmf;
+
+                       /* start a HBA_TMF_LUN_RESET TMF request */
+                       tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+                       memset(tmf, 0, sizeof(*tmf));
+                       tmf->tmf = HBA_TMF_LUN_RESET;
+                       tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+                       tmf->lun[1] = cmd->device->lun;
+
+                       address = (u64)fib->hw_error_pa;
+                       tmf->error_ptr_hi = cpu_to_le32
+                                       ((u32)(address >> 32));
+                       tmf->error_ptr_lo = cpu_to_le32
+                                       ((u32)(address & 0xffffffff));
+                       tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+                       fib->hbacmd_size = sizeof(*tmf);
+
+                       command = HBA_IU_TYPE_SCSI_TM_REQ;
+                       aac->hba_map[bus][cid].reset_state++;
+               } else if (aac->hba_map[bus][cid].reset_state >= 1) {
+                       struct aac_hba_reset_req *rst;
+
+                       /* already tried, start a hard reset now */
+                       rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
+                       memset(rst, 0, sizeof(*rst));
+                       /* reset_type is already zero... */
+                       rst->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+
+                       address = (u64)fib->hw_error_pa;
+                       rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+                       rst->error_ptr_lo = cpu_to_le32
+                               ((u32)(address & 0xffffffff));
+                       rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+                       fib->hbacmd_size = sizeof(*rst);
+
+                       command = HBA_IU_TYPE_SATA_REQ;
+                       aac->hba_map[bus][cid].reset_state = 0;
                }
-       }
-       printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
-                                       AAC_DRIVERNAME);
+               cmd->SCp.sent_command = 0;
 
-       if ((count = aac_check_health(aac)))
-               return count;
-       /*
-        * Wait for all commands to complete to this specific
-        * target (block maximum 60 seconds).
-        */
-       for (count = 60; count; --count) {
-               int active = aac->in_reset;
+               status = aac_hba_send(command, fib,
+                                 (fib_callback) aac_hba_callback,
+                                 (void *) cmd);
 
-               if (active == 0)
-               __shost_for_each_device(dev, host) {
-                       spin_lock_irqsave(&dev->list_lock, flags);
-                       list_for_each_entry(command, &dev->cmd_list, list) {
-                               if ((command != cmd) &&
-                                   (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
-                                       active++;
-                                       break;
-                               }
-                       }
-                       spin_unlock_irqrestore(&dev->list_lock, flags);
-                       if (active)
+               /* Wait up to 2 minutes for completion */
+               for (count = 0; count < 120; ++count) {
+                       if (cmd->SCp.sent_command) {
+                               ret = SUCCESS;
                                break;
+                       }
+                       msleep(1000);
+               }
 
+               if (ret != SUCCESS)
+                       pr_err("%s: Host adapter reset request timed out\n",
+                       AAC_DRIVERNAME);
+       } else {
+               struct scsi_cmnd *command;
+               unsigned long flags;
+
+               /* Mark the assoc. FIB to not complete, eh handler does this */
+               for (count = 0;
+                       count < (host->can_queue + AAC_NUM_MGT_FIB);
+                       ++count) {
+                       struct fib *fib = &aac->fibs[count];
+
+                       if (fib->hw_fib_va->header.XferState &&
+                               (fib->flags & FIB_CONTEXT_FLAG) &&
+                               (fib->callback_data == cmd)) {
+                               fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+                               cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+                       }
                }
+
+               pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+                                       AAC_DRIVERNAME);
+
+               count = aac_check_health(aac);
+               if (count)
+                       return count;
                /*
-                * We can exit If all the commands are complete
+                * Wait for all commands to complete to this specific
+                * target (block maximum 60 seconds).
                 */
-               if (active == 0)
-                       return SUCCESS;
-               ssleep(1);
+               for (count = 60; count; --count) {
+                       int active = aac->in_reset;
+
+                       if (active == 0)
+                       __shost_for_each_device(dev, host) {
+                               spin_lock_irqsave(&dev->list_lock, flags);
+                               list_for_each_entry(command, &dev->cmd_list,
+                                       list) {
+                                       if ((command != cmd) &&
+                                       (command->SCp.phase ==
+                                       AAC_OWNER_FIRMWARE)) {
+                                               active++;
+                                               break;
+                                       }
+                               }
+                               spin_unlock_irqrestore(&dev->list_lock, flags);
+                               if (active)
+                                       break;
+
+                       }
+                       /*
+                        * We can exit If all the commands are complete
+                        */
+                       if (active == 0)
+                               return SUCCESS;
+                       ssleep(1);
+               }
+               pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+
+               /*
+                * This adapter needs a blind reset, only do so for
+                * Adapters that support a register, instead of a commanded,
+                * reset.
+                */
+               if (((aac->supplement_adapter_info.SupportedOptions2 &
+                         AAC_OPTION_MU_RESET) ||
+                         (aac->supplement_adapter_info.SupportedOptions2 &
+                         AAC_OPTION_DOORBELL_RESET)) &&
+                         aac_check_reset &&
+                         ((aac_check_reset != 1) ||
+                          !(aac->supplement_adapter_info.SupportedOptions2 &
+                           AAC_OPTION_IGNORE_RESET))) {
+                       /* Bypass wait for command quiesce */
+                       aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
+               }
+               ret = SUCCESS;
        }
-       printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
        /*
-        * This adapter needs a blind reset, only do so for Adapters that
-        * support a register, instead of a commanded, reset.
+        * Cause an immediate retry of the command with a ten second delay
+        * after successful tur
         */
-       if (((aac->supplement_adapter_info.SupportedOptions2 &
-         AAC_OPTION_MU_RESET) ||
-         (aac->supplement_adapter_info.SupportedOptions2 &
-         AAC_OPTION_DOORBELL_RESET)) &&
-         aac_check_reset &&
-         ((aac_check_reset != 1) ||
-          !(aac->supplement_adapter_info.SupportedOptions2 &
-           AAC_OPTION_IGNORE_RESET)))
-               aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
-       return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
+       return ret;
 }
 
 /**
@@ -911,10 +1168,16 @@ static ssize_t aac_store_reset_adapter(struct device *device,
                                       const char *buf, size_t count)
 {
        int retval = -EACCES;
+       int bled = 0;
+       struct aac_dev *aac;
+
 
        if (!capable(CAP_SYS_ADMIN))
                return retval;
-       retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
+
+       aac = (struct aac_dev *)class_to_shost(device)->hostdata;
+       bled = buf[0] == '!' ? 1:0;
+       retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
        if (retval >= 0)
                retval = count;
        return retval;
@@ -1070,6 +1333,7 @@ static void __aac_shutdown(struct aac_dev * aac)
 {
        int i;
 
+       aac->adapter_shutdown = 1;
        aac_send_shutdown(aac);
 
        if (aac->aif_thread) {
@@ -1285,7 +1549,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        else
                shost->this_id = shost->max_id;
 
-       if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+       if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
                aac_intr_normal(aac, 0, 2, 0, NULL);
 
        /*
@@ -1327,35 +1591,12 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
 static void aac_release_resources(struct aac_dev *aac)
 {
-       int i;
-
        aac_adapter_disable_int(aac);
-       if (aac->pdev->device == PMC_DEVICE_S6 ||
-           aac->pdev->device == PMC_DEVICE_S7 ||
-           aac->pdev->device == PMC_DEVICE_S8 ||
-           aac->pdev->device == PMC_DEVICE_S9) {
-               if (aac->max_msix > 1) {
-                       for (i = 0; i < aac->max_msix; i++)
-                               free_irq(pci_irq_vector(aac->pdev, i),
-                                       &(aac->aac_msix[i]));
-               } else {
-                       free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
-               }
-       } else {
-               free_irq(aac->pdev->irq, aac);
-       }
-       if (aac->msi)
-               pci_disable_msi(aac->pdev);
-       else if (aac->max_msix > 1)
-               pci_disable_msix(aac->pdev);
-
+       aac_free_irq(aac);
 }
 
 static int aac_acquire_resources(struct aac_dev *dev)
 {
-       int i, j;
-       int instance = dev->id;
-       const char *name = dev->name;
        unsigned long status;
        /*
         *      First clear out all interrupts.  Then enable the one's that we
@@ -1377,37 +1618,8 @@ static int aac_acquire_resources(struct aac_dev *dev)
        if (dev->msi_enabled)
                aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
 
-       if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
-               for (i = 0; i < dev->max_msix; i++) {
-                       dev->aac_msix[i].vector_no = i;
-                       dev->aac_msix[i].dev = dev;
-
-                       if (request_irq(pci_irq_vector(dev->pdev, i),
-                                       dev->a_ops.adapter_intr,
-                                       0, "aacraid", &(dev->aac_msix[i]))) {
-                               printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
-                                               name, instance, i);
-                               for (j = 0 ; j < i ; j++)
-                                       free_irq(pci_irq_vector(dev->pdev, j),
-                                                &(dev->aac_msix[j]));
-                               pci_disable_msix(dev->pdev);
-                               goto error_iounmap;
-                       }
-               }
-       } else {
-               dev->aac_msix[0].vector_no = 0;
-               dev->aac_msix[0].dev = dev;
-
-               if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
-                       IRQF_SHARED, "aacraid",
-                       &(dev->aac_msix[0])) < 0) {
-                       if (dev->msi)
-                               pci_disable_msi(dev->pdev);
-                       printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
-                                       name, instance);
-                       goto error_iounmap;
-               }
-       }
+       if (aac_acquire_irq(dev))
+               goto error_iounmap;
 
        aac_adapter_enable_int(dev);
 
@@ -1420,7 +1632,7 @@ static int aac_acquire_resources(struct aac_dev *dev)
                /* After EEH recovery or suspend resume, max_msix count
                 * may change, therfore updating in init as well.
                 */
-               dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+               dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
                aac_adapter_start(dev);
        }
        return 0;
index 6c53b1d..c59074e 100644 (file)
@@ -5,7 +5,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 7d8013f..a1bc5bb 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -60,7 +61,7 @@ static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
                 * case warrants this half baked, but convenient, check here.
                 */
                if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
-                       dev->init->MaxIoCommands =
+                       dev->init->r7.max_io_commands =
                                cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
                        dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
                }
index ac16380..0e69a80 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -315,10 +316,10 @@ static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
 
 static void aac_rx_start_adapter(struct aac_dev *dev)
 {
-       struct aac_init *init;
+       union aac_init *init;
 
        init = dev->init;
-       init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+       init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
        // We can only use a 32 bit address here
        rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
          0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
@@ -470,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
        return 0;
 }
 
-static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
+static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
 {
        u32 var = 0;
 
@@ -559,7 +560,7 @@ int _aac_rx_init(struct aac_dev *dev)
        dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
        dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
        if ((((status & 0x0c) != 0x0c) || aac_reset_devices || reset_devices) &&
-         !aac_rx_restart_adapter(dev, 0))
+         !aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
                /* Make sure the Hardware FIFO is empty */
                while ((++restart < 512) &&
                  (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
@@ -568,7 +569,8 @@ int _aac_rx_init(struct aac_dev *dev)
         */
        status = rx_readl(dev, MUnit.OMRx[0]);
        if (status & KERNEL_PANIC) {
-               if (aac_rx_restart_adapter(dev, aac_rx_check_health(dev)))
+               if (aac_rx_restart_adapter(dev,
+                       aac_rx_check_health(dev), IOP_HWSOFT_RESET))
                        goto error_iounmap;
                ++restart;
        }
@@ -606,7 +608,8 @@ int _aac_rx_init(struct aac_dev *dev)
                  ((startup_timeout > 60)
                    ? (startup_timeout - 60)
                    : (startup_timeout / 2))))) {
-                       if (likely(!aac_rx_restart_adapter(dev, aac_rx_check_health(dev))))
+                       if (likely(!aac_rx_restart_adapter(dev,
+                               aac_rx_check_health(dev), IOP_HWSOFT_RESET)))
                                start = jiffies;
                        ++restart;
                }
index 869aea2..553922f 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -245,19 +246,19 @@ static void aac_sa_interrupt_adapter (struct aac_dev *dev)
 
 static void aac_sa_start_adapter(struct aac_dev *dev)
 {
-       struct aac_init *init;
+       union aac_init *init;
        /*
         * Fill in the remaining pieces of the init.
         */
        init = dev->init;
-       init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+       init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
        /* We can only use a 32 bit address here */
        sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 
                        (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
                        NULL, NULL, NULL, NULL, NULL);
 }
 
-static int aac_sa_restart_adapter(struct aac_dev *dev, int bled)
+static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
 {
        return -EINVAL;
 }
index 0c45388..8e4e2dd 100644 (file)
@@ -6,7 +6,8 @@
  * Adaptec aacraid device driver for Linux.
  *
  * Copyright (c) 2000-2010 Adaptec, Inc.
- *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *              2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -135,8 +136,16 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
 
        if (mode & AAC_INT_MODE_AIF) {
                /* handle AIF */
-               if (dev->aif_thread && dev->fsa_dev)
-                       aac_intr_normal(dev, 0, 2, 0, NULL);
+               if (dev->sa_firmware) {
+                       u32 events = src_readl(dev, MUnit.SCR0);
+
+                       aac_intr_normal(dev, events, 1, 0, NULL);
+                       writel(events, &dev->IndexRegs->Mailbox[0]);
+                       src_writel(dev, MUnit.IDR, 1 << 23);
+               } else {
+                       if (dev->aif_thread && dev->fsa_dev)
+                               aac_intr_normal(dev, 0, 2, 0, NULL);
+               }
                if (dev->msi_enabled)
                        aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
                mode = 0;
@@ -148,17 +157,19 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
                for (;;) {
                        isFastResponse = 0;
                        /* remove toggle bit (31) */
-                       handle = (dev->host_rrq[index] & 0x7fffffff);
-                       /* check fast response bit (30) */
+                       handle = le32_to_cpu((dev->host_rrq[index])
+                               & 0x7fffffff);
+                       /* check fast response bits (30, 1) */
                        if (handle & 0x40000000)
                                isFastResponse = 1;
                        handle &= 0x0000ffff;
                        if (handle == 0)
                                break;
+                       handle >>= 2;
                        if (dev->msi_enabled && dev->max_msix > 1)
                                atomic_dec(&dev->rrq_outstanding[vector_no]);
+                       aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
                        dev->host_rrq[index++] = 0;
-                       aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
                        if (index == (vector_no + 1) * dev->vector_cap)
                                index = vector_no * dev->vector_cap;
                        dev->host_rrq_idx[vector_no] = index;
@@ -384,7 +395,7 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
 
 static void aac_src_start_adapter(struct aac_dev *dev)
 {
-       struct aac_init *init;
+       union aac_init *init;
        int i;
 
         /* reset host_rrq_idx first */
@@ -392,14 +403,26 @@ static void aac_src_start_adapter(struct aac_dev *dev)
                dev->host_rrq_idx[i] = i * dev->vector_cap;
                atomic_set(&dev->rrq_outstanding[i], 0);
        }
+       atomic_set(&dev->msix_counter, 0);
        dev->fibs_pushed_no = 0;
 
        init = dev->init;
-       init->HostElapsedSeconds = cpu_to_le32(get_seconds());
+       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+               init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
+               src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+                       lower_32_bits(dev->init_pa),
+                       upper_32_bits(dev->init_pa),
+                       sizeof(struct _r8) +
+                       (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
+                       0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+       } else {
+               init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
+               // We can only use a 32 bit address here
+               src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+                       (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
+                       NULL, NULL, NULL, NULL, NULL);
+       }
 
-       /* We can only use a 32 bit address here */
-       src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
-         0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
 }
 
 /**
@@ -435,6 +458,11 @@ static int aac_src_check_health(struct aac_dev *dev)
        return 0;
 }
 
+static inline u32 aac_get_vector(struct aac_dev *dev)
+{
+       return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
+}
+
 /**
  *     aac_src_deliver_message
  *     @fib: fib to issue
@@ -448,66 +476,125 @@ static int aac_src_deliver_message(struct fib *fib)
        u32 fibsize;
        dma_addr_t address;
        struct aac_fib_xporthdr *pFibX;
+       int native_hba;
 #if !defined(writeq)
        unsigned long flags;
 #endif
 
-       u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
        u16 vector_no;
 
        atomic_inc(&q->numpending);
 
-       if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
-           dev->max_msix > 1) {
-               vector_no = fib->vector_no;
-               fib->hw_fib_va->header.Handle += (vector_no << 16);
+       native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
+
+
+       if (dev->msi_enabled && dev->max_msix > 1 &&
+               (native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
+
+               if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+                       && dev->sa_firmware)
+                       vector_no = aac_get_vector(dev);
+               else
+                       vector_no = fib->vector_no;
+
+               if (native_hba) {
+                       if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+                               struct aac_hba_tm_req *tm_req;
+
+                               tm_req = (struct aac_hba_tm_req *)
+                                               fib->hw_fib_va;
+                               if (tm_req->iu_type ==
+                                       HBA_IU_TYPE_SCSI_TM_REQ) {
+                                       ((struct aac_hba_tm_req *)
+                                               fib->hw_fib_va)->reply_qid
+                                                       = vector_no;
+                                       ((struct aac_hba_tm_req *)
+                                               fib->hw_fib_va)->request_id
+                                                       += (vector_no << 16);
+                               } else {
+                                       ((struct aac_hba_reset_req *)
+                                               fib->hw_fib_va)->reply_qid
+                                                       = vector_no;
+                                       ((struct aac_hba_reset_req *)
+                                               fib->hw_fib_va)->request_id
+                                                       += (vector_no << 16);
+                               }
+                       } else {
+                               ((struct aac_hba_cmd_req *)
+                                       fib->hw_fib_va)->reply_qid
+                                               = vector_no;
+                               ((struct aac_hba_cmd_req *)
+                                       fib->hw_fib_va)->request_id
+                                               += (vector_no << 16);
+                       }
+               } else {
+                       fib->hw_fib_va->header.Handle += (vector_no << 16);
+               }
        } else {
                vector_no = 0;
        }
 
        atomic_inc(&dev->rrq_outstanding[vector_no]);
 
-       if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
-               /* Calculate the amount to the fibsize bits */
-               fibsize = (hdr_size + 127) / 128 - 1;
-               if (fibsize > (ALIGN32 - 1))
-                       return -EMSGSIZE;
-               /* New FIB header, 32-bit */
+       if (native_hba) {
                address = fib->hw_fib_pa;
-               fib->hw_fib_va->header.StructType = FIB_MAGIC2;
-               fib->hw_fib_va->header.SenderFibAddress = (u32)address;
-               fib->hw_fib_va->header.u.TimeStamp = 0;
-               BUG_ON(upper_32_bits(address) != 0L);
+               fibsize = (fib->hbacmd_size + 127) / 128 - 1;
+               if (fibsize > 31)
+                       fibsize = 31;
                address |= fibsize;
+#if defined(writeq)
+               src_writeq(dev, MUnit.IQN_L, (u64)address);
+#else
+               spin_lock_irqsave(&fib->dev->iq_lock, flags);
+               src_writel(dev, MUnit.IQN_H,
+                       upper_32_bits(address) & 0xffffffff);
+               src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
+               spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
+#endif
        } else {
-               /* Calculate the amount to the fibsize bits */
-               fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
-               if (fibsize > (ALIGN32 - 1))
-                       return -EMSGSIZE;
-
-               /* Fill XPORT header */
-               pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
-               pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
-               pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
-               pFibX->Size = cpu_to_le32(hdr_size);
-
-               /*
-                * The xport header has been 32-byte aligned for us so that fibsize
-                * can be masked out of this address by hardware. -- BenC
-                */
-               address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
-               if (address & (ALIGN32 - 1))
-                       return -EINVAL;
+               if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+                       dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+                       /* Calculate the amount to the fibsize bits */
+                       fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
+                               + 127) / 128 - 1;
+                       /* New FIB header, 32-bit */
+                       address = fib->hw_fib_pa;
+                       fib->hw_fib_va->header.StructType = FIB_MAGIC2;
+                       fib->hw_fib_va->header.SenderFibAddress =
+                               cpu_to_le32((u32)address);
+                       fib->hw_fib_va->header.u.TimeStamp = 0;
+                       WARN_ON(upper_32_bits(address) != 0L);
+               } else {
+                       /* Calculate the amount to the fibsize bits */
+                       fibsize = (sizeof(struct aac_fib_xporthdr) +
+                               le16_to_cpu(fib->hw_fib_va->header.Size)
+                               + 127) / 128 - 1;
+                       /* Fill XPORT header */
+                       pFibX = (struct aac_fib_xporthdr *)
+                               ((unsigned char *)fib->hw_fib_va -
+                               sizeof(struct aac_fib_xporthdr));
+                       pFibX->Handle = fib->hw_fib_va->header.Handle;
+                       pFibX->HostAddress =
+                               cpu_to_le64((u64)fib->hw_fib_pa);
+                       pFibX->Size = cpu_to_le32(
+                               le16_to_cpu(fib->hw_fib_va->header.Size));
+                       address = fib->hw_fib_pa -
+                               (u64)sizeof(struct aac_fib_xporthdr);
+               }
+               if (fibsize > 31)
+                       fibsize = 31;
                address |= fibsize;
-       }
+
 #if defined(writeq)
-       src_writeq(dev, MUnit.IQ_L, (u64)address);
+               src_writeq(dev, MUnit.IQ_L, (u64)address);
 #else
-       spin_lock_irqsave(&fib->dev->iq_lock, flags);
-       src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
-       src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
-       spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
+               spin_lock_irqsave(&fib->dev->iq_lock, flags);
+               src_writel(dev, MUnit.IQ_H,
+                       upper_32_bits(address) & 0xffffffff);
+               src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
+               spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
 #endif
+       }
        return 0;
 }
 
@@ -553,52 +640,117 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
                dev->base = dev->regs.src.bar0 = NULL;
                return 0;
        }
+
+       dev->regs.src.bar1 =
+       ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
+       dev->base = NULL;
+       if (dev->regs.src.bar1 == NULL)
+               return -1;
        dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
-       if (dev->base == NULL)
+       if (dev->base == NULL) {
+               iounmap(dev->regs.src.bar1);
+               dev->regs.src.bar1 = NULL;
                return -1;
+       }
        dev->IndexRegs = &((struct src_registers __iomem *)
                dev->base)->u.denali.IndexRegs;
        return 0;
 }
 
-static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
+static void aac_set_intx_mode(struct aac_dev *dev)
+{
+       if (dev->msi_enabled) {
+               aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+               dev->msi_enabled = 0;
+               msleep(5000); /* Delay 5 seconds */
+       }
+}
+
+static void aac_send_iop_reset(struct aac_dev *dev, int bled)
 {
        u32 var, reset_mask;
 
-       if (bled >= 0) {
-               if (bled)
-                       printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+       bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+                                   0, 0, 0, 0, 0, 0, &var,
+                                   &reset_mask, NULL, NULL, NULL);
+
+       if ((bled || var != 0x00000001) && !dev->doorbell_mask)
+               bled = -EINVAL;
+       else if (dev->doorbell_mask) {
+               reset_mask = dev->doorbell_mask;
+               bled = 0;
+               var = 0x00000001;
+       }
+
+       aac_set_intx_mode(dev);
+
+       if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
+           AAC_OPTION_DOORBELL_RESET)) {
+               src_writel(dev, MUnit.IDR, reset_mask);
+       } else {
+               src_writel(dev, MUnit.IDR, 0x100);
+       }
+       msleep(30000);
+}
+
+static void aac_send_hardware_soft_reset(struct aac_dev *dev)
+{
+       u_int32_t val;
+
+       val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
+       val |= 0x01;
+       writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
+       msleep_interruptible(20000);
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
+{
+       unsigned long status, start;
+
+       if (bled < 0)
+               goto invalid_out;
+
+       if (bled)
+               pr_err("%s%d: adapter kernel panic'd %x.\n",
                                dev->name, dev->id, bled);
-               dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
-               bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
-                       0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
-               if ((bled || (var != 0x00000001)) &&
-                   !dev->doorbell_mask)
-                       return -EINVAL;
-               else if (dev->doorbell_mask) {
-                       reset_mask = dev->doorbell_mask;
-                       bled = 0;
-                       var = 0x00000001;
-               }
 
-               if ((dev->pdev->device == PMC_DEVICE_S7 ||
-                   dev->pdev->device == PMC_DEVICE_S8 ||
-                   dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) {
-                       aac_src_access_devreg(dev, AAC_ENABLE_INTX);
-                       dev->msi_enabled = 0;
-                       msleep(5000); /* Delay 5 seconds */
-               }
+       dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
 
-               if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
-                   AAC_OPTION_DOORBELL_RESET)) {
-                       src_writel(dev, MUnit.IDR, reset_mask);
-                       ssleep(45);
-               } else {
-                       src_writel(dev, MUnit.IDR, 0x100);
-                       ssleep(45);
+       switch (reset_type) {
+       case IOP_HWSOFT_RESET:
+               aac_send_iop_reset(dev, bled);
+               /*
+                * Check to see if KERNEL_UP_AND_RUNNING
+                * Wait for the adapter to be up and running.
+                * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset
+                */
+               status = src_readl(dev, MUnit.OMR);
+               if (dev->sa_firmware
+                && !(status & KERNEL_UP_AND_RUNNING)) {
+                       start = jiffies;
+                       do {
+                               status = src_readl(dev, MUnit.OMR);
+                               if (time_after(jiffies,
+                                start+HZ*SOFT_RESET_TIME)) {
+                                       aac_send_hardware_soft_reset(dev);
+                                       start = jiffies;
+                               }
+                       } while (!(status & KERNEL_UP_AND_RUNNING));
                }
+               break;
+       case HW_SOFT_RESET:
+               if (dev->sa_firmware) {
+                       aac_send_hardware_soft_reset(dev);
+                       aac_set_intx_mode(dev);
+               }
+               break;
+       default:
+               aac_send_iop_reset(dev, bled);
+               break;
        }
 
+invalid_out:
+
        if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
                return -ENODEV;
 
@@ -653,14 +805,15 @@ int aac_src_init(struct aac_dev *dev)
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
        if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0))
+               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
                ++restart;
        /*
         *      Check to see if the board panic'd while booting.
         */
        status = src_readl(dev, MUnit.OMR);
        if (status & KERNEL_PANIC) {
-               if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+               if (aac_src_restart_adapter(dev,
+                       aac_src_check_health(dev), IOP_HWSOFT_RESET))
                        goto error_iounmap;
                ++restart;
        }
@@ -701,7 +854,7 @@ int aac_src_init(struct aac_dev *dev)
                    ? (startup_timeout - 60)
                    : (startup_timeout / 2))))) {
                        if (likely(!aac_src_restart_adapter(dev,
-                           aac_src_check_health(dev))))
+                               aac_src_check_health(dev), IOP_HWSOFT_RESET)))
                                start = jiffies;
                        ++restart;
                }
@@ -798,7 +951,7 @@ int aac_srcv_init(struct aac_dev *dev)
        dev->a_ops.adapter_sync_cmd = src_sync_cmd;
        dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
        if ((aac_reset_devices || reset_devices) &&
-               !aac_src_restart_adapter(dev, 0))
+               !aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
                ++restart;
        /*
         *      Check to see if flash update is running.
@@ -827,7 +980,8 @@ int aac_srcv_init(struct aac_dev *dev)
         */
        status = src_readl(dev, MUnit.OMR);
        if (status & KERNEL_PANIC) {
-               if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
+               if (aac_src_restart_adapter(dev,
+                       aac_src_check_health(dev), IOP_HWSOFT_RESET))
                        goto error_iounmap;
                ++restart;
        }
@@ -866,7 +1020,8 @@ int aac_srcv_init(struct aac_dev *dev)
                  ((startup_timeout > 60)
                    ? (startup_timeout - 60)
                    : (startup_timeout / 2))))) {
-                       if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
+                       if (likely(!aac_src_restart_adapter(dev,
+                               aac_src_check_health(dev), IOP_HWSOFT_RESET)))
                                start = jiffies;
                        ++restart;
                }
@@ -897,7 +1052,8 @@ int aac_srcv_init(struct aac_dev *dev)
 
        if (aac_init_adapter(dev) == NULL)
                goto error_iounmap;
-       if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
+       if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
+               (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
                goto error_iounmap;
        if (dev->msi_enabled)
                aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
@@ -905,9 +1061,9 @@ int aac_srcv_init(struct aac_dev *dev)
        if (aac_acquire_irq(dev))
                goto error_iounmap;
 
-       dev->dbg_base = dev->base_start;
-       dev->dbg_base_mapped = dev->base;
-       dev->dbg_size = dev->base_size;
+       dev->dbg_base = pci_resource_start(dev->pdev, 2);
+       dev->dbg_base_mapped = dev->regs.src.bar1;
+       dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
        dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
 
        aac_adapter_enable_int(dev);
index 105b353..f792420 100644 (file)
@@ -178,37 +178,6 @@ static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
 }
 
 
-#if 0
-/* Dead code... wasn't called anyway :-) and causes some trouble, because at
- * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has
- * to clear the DMA int pending bit before it allows other level 6 interrupts.
- */
-static void scsi_dma_buserr(int irq, void *dummy)
-{
-       unsigned char dma_stat = tt_scsi_dma.dma_ctrl;
-
-       /* Don't do anything if a NCR interrupt is pending. Probably it's just
-        * masked... */
-       if (atari_irq_pending(IRQ_TT_MFP_SCSI))
-               return;
-
-       printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n",
-              SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt));
-       if (dma_stat & 0x80) {
-               if (!scsi_dma_is_ignored_buserr(dma_stat))
-                       printk("SCSI DMA bus error -- bad DMA programming!\n");
-       } else {
-               /* Under normal circumstances we never should get to this point,
-                * since both interrupts are triggered simultaneously and the 5380
-                * int has higher priority. When this irq is handled, that DMA
-                * interrupt is cleared. So a warning message is printed here.
-                */
-               printk("SCSI DMA intr ?? -- this shouldn't happen!\n");
-       }
-}
-#endif
-
-
 static irqreturn_t scsi_tt_intr(int irq, void *dev)
 {
        struct Scsi_Host *instance = dev;
@@ -713,7 +682,8 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
        if (IS_A_TT()) {
                tt_scsi_dma.dma_ctrl = 0;
        } else {
-               st_dma.dma_mode_status = 0x90;
+               if (stdma_is_locked_by(scsi_falcon_intr))
+                       st_dma.dma_mode_status = 0x90;
                atari_dma_active = 0;
                atari_dma_orig_addr = NULL;
        }
@@ -813,7 +783,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
                        return -ENOMEM;
                }
                atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
-               atari_dma_orig_addr = 0;
+               atari_dma_orig_addr = NULL;
        }
 
        instance = scsi_host_alloc(&atari_scsi_template,
index b1d0fdc..ca9440f 100644 (file)
@@ -84,7 +84,6 @@ static inline void queue_tail_inc(struct be_queue_info *q)
 /*ISCSI */
 
 struct be_aic_obj {            /* Adaptive interrupt coalescing (AIC) info */
-       bool enable;
        u32 min_eqd;            /* in usecs */
        u32 max_eqd;            /* in usecs */
        u32 prev_eqd;           /* in usecs */
@@ -94,8 +93,6 @@ struct be_aic_obj {           /* Adaptive interrupt coalescing (AIC) info */
 };
 
 struct be_eq_obj {
-       bool todo_mcc_cq;
-       bool todo_cq;
        u32 cq_count;
        struct be_queue_info q;
        struct beiscsi_hba *phba;
index be65da2..5d59e26 100644 (file)
@@ -676,10 +676,10 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
                                bool embedded, u8 sge_cnt)
 {
        if (embedded)
-               wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
+               wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
        else
-               wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
-                                               MCC_WRB_SGE_CNT_SHIFT;
+               wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
+                                          MCC_WRB_SGE_CNT_SHIFT;
        wrb->payload_length = payload_len;
        be_dws_cpu_to_le(wrb, 8);
 }
@@ -1599,7 +1599,7 @@ int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+       struct be_post_sgl_pages_req *req;
        int status;
 
        mutex_lock(&ctrl->mbox_lock);
@@ -1700,31 +1700,34 @@ int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct iscsi_cleanup_req_v1 *req_v1;
        struct iscsi_cleanup_req *req;
+       u16 hdr_ring_id, data_ring_id;
        struct be_mcc_wrb *wrb;
        int status;
 
        mutex_lock(&ctrl->mbox_lock);
        wrb = wrb_from_mbox(&ctrl->mbox_mem);
-       req = embedded_payload(wrb);
-       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-                          OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
 
-       /**
-       * TODO: Check with FW folks the chute value to be set.
-       * For now, use the ULP_MASK as the chute value.
-       */
+       hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
+       data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
        if (is_chip_be2_be3r(phba)) {
+               req = embedded_payload(wrb);
+               be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+               be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+                                  OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
                req->chute = (1 << ulp);
-               req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
-               req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
+               /* BE2/BE3 FW creates 8-bit ring id */
+               req->hdr_ring_id = hdr_ring_id;
+               req->data_ring_id = data_ring_id;
        } else {
-               req_v1 = (struct iscsi_cleanup_req_v1 *)req;
+               req_v1 = embedded_payload(wrb);
+               be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
+               be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
+                                  OPCODE_COMMON_ISCSI_CLEANUP,
+                                  sizeof(*req_v1));
                req_v1->hdr.version = 1;
-               req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba,
-                                                                     ulp));
-               req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba,
-                                                                      ulp));
+               req_v1->chute = (1 << ulp);
+               req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
+               req_v1->data_ring_id = cpu_to_le16(data_ring_id);
        }
 
        status = be_mbox_notify(ctrl);
index 328fb5b..1d40e83 100644 (file)
@@ -31,10 +31,16 @@ struct be_sge {
        __le32 len;
 };
 
-#define MCC_WRB_SGE_CNT_SHIFT 3        /* bits 3 - 7 of dword 0 */
-#define MCC_WRB_SGE_CNT_MASK 0x1F      /* bits 3 - 7 of dword 0 */
 struct be_mcc_wrb {
-       u32 embedded;           /* dword 0 */
+       u32 emb_sgecnt_special; /* dword 0 */
+       /* bits 0 - embedded    */
+       /* bits 1 - 2 reserved  */
+       /* bits 3 - 7 sge count */
+       /* bits 8 - 23 reserved */
+       /* bits 24 - 31 special */
+#define MCC_WRB_EMBEDDED_MASK 1
+#define MCC_WRB_SGE_CNT_SHIFT 3
+#define MCC_WRB_SGE_CNT_MASK 0x1F
        u32 payload_length;     /* dword 1 */
        u32 tag0;               /* dword 2 */
        u32 tag1;               /* dword 3 */
@@ -1133,11 +1139,6 @@ struct tcp_connect_and_offload_out {
 
 } __packed;
 
-struct be_mcc_wrb_context {
-       struct MCC_WRB *wrb;
-       int *users_final_status;
-} __packed;
-
 #define DB_DEF_PDU_RING_ID_MASK        0x3FFF  /* bits 0 - 13 */
 #define DB_DEF_PDU_CQPROC_MASK         0x3FFF  /* bits 16 - 29 */
 #define DB_DEF_PDU_REARM_SHIFT         14
index ba25821..a484457 100644 (file)
@@ -165,33 +165,6 @@ beiscsi_conn_create(struct iscsi_cls_session *cls_session, u32 cid)
        return cls_conn;
 }
 
-/**
- * beiscsi_bindconn_cid - Bind the beiscsi_conn with phba connection table
- * @beiscsi_conn: The pointer to  beiscsi_conn structure
- * @phba: The phba instance
- * @cid: The cid to free
- */
-static int beiscsi_bindconn_cid(struct beiscsi_hba *phba,
-                               struct beiscsi_conn *beiscsi_conn,
-                               unsigned int cid)
-{
-       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
-
-       if (phba->conn_table[cri_index]) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Connection table already occupied. Detected clash\n");
-
-               return -EINVAL;
-       } else {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                           "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n",
-                           cri_index, beiscsi_conn);
-
-               phba->conn_table[cri_index] = beiscsi_conn;
-       }
-       return 0;
-}
-
 /**
  * beiscsi_conn_bind - Binds iscsi session/connection with TCP connection
  * @cls_session: pointer to iscsi cls session
@@ -212,6 +185,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
        struct hwi_wrb_context *pwrb_context;
        struct beiscsi_endpoint *beiscsi_ep;
        struct iscsi_endpoint *ep;
+       uint16_t cri_index;
 
        ep = iscsi_lookup_endpoint(transport_fd);
        if (!ep)
@@ -229,20 +203,34 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
 
                return -EEXIST;
        }
-
-       pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(
-                                               beiscsi_ep->ep_cid)];
+       cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+       if (phba->conn_table[cri_index]) {
+               if (beiscsi_conn != phba->conn_table[cri_index] ||
+                   beiscsi_ep != phba->conn_table[cri_index]->ep) {
+                       __beiscsi_log(phba, KERN_ERR,
+                                     "BS_%d : conn_table not empty at %u: cid %u conn %p:%p\n",
+                                     cri_index,
+                                     beiscsi_ep->ep_cid,
+                                     beiscsi_conn,
+                                     phba->conn_table[cri_index]);
+                       return -EINVAL;
+               }
+       }
 
        beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;
        beiscsi_conn->ep = beiscsi_ep;
        beiscsi_ep->conn = beiscsi_conn;
+       /**
+        * Each connection is associated with a WRBQ kept in wrb_context.
+        * Store doorbell offset for transmit path.
+        */
+       pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;
-
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                   "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n",
-                   beiscsi_conn, conn, beiscsi_ep->ep_cid);
-
-       return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
+                   "BS_%d : cid %d phba->conn_table[%u]=%p\n",
+                   beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
+       phba->conn_table[cri_index] = beiscsi_conn;
+       return 0;
 }
 
 static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
@@ -973,9 +961,9 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
  */
 static int beiscsi_get_cid(struct beiscsi_hba *phba)
 {
-       unsigned short cid = 0xFFFF, cid_from_ulp;
-       struct ulp_cid_info *cid_info = NULL;
        uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1;
+       unsigned short cid, cid_from_ulp;
+       struct ulp_cid_info *cid_info;
 
        /* Find the ULP which has more CID available */
        cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ?
@@ -984,20 +972,27 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
                          BEISCSI_ULP1_AVLBL_CID(phba) : 0;
        cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ?
                        BEISCSI_ULP0 : BEISCSI_ULP1;
-
-       if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) {
-               cid_info = phba->cid_array_info[cid_from_ulp];
-               if (!cid_info->avlbl_cids)
-                       return cid;
-
-               cid = cid_info->cid_array[cid_info->cid_alloc++];
-
-               if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(
-                                          phba, cid_from_ulp))
-                       cid_info->cid_alloc = 0;
-
-               cid_info->avlbl_cids--;
+       /**
+        * If iSCSI protocol is loaded only on ULP 0, and when cid_avlbl_ulp
+        * is ZERO for both, ULP 1 is returned.
+        * Check if ULP is loaded before getting new CID.
+        */
+       if (!test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported))
+               return BE_INVALID_CID;
+
+       cid_info = phba->cid_array_info[cid_from_ulp];
+       cid = cid_info->cid_array[cid_info->cid_alloc];
+       if (!cid_info->avlbl_cids || cid == BE_INVALID_CID) {
+               __beiscsi_log(phba, KERN_ERR,
+                               "BS_%d : failed to get cid: available %u:%u\n",
+                               cid_info->avlbl_cids, cid_info->cid_free);
+               return BE_INVALID_CID;
        }
+       /* empty the slot */
+       cid_info->cid_array[cid_info->cid_alloc++] = BE_INVALID_CID;
+       if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT(phba, cid_from_ulp))
+               cid_info->cid_alloc = 0;
+       cid_info->avlbl_cids--;
        return cid;
 }
 
@@ -1008,22 +1003,28 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)
  */
 static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)
 {
-       uint16_t cid_post_ulp;
-       struct hwi_controller *phwi_ctrlr;
-       struct hwi_wrb_context *pwrb_context;
-       struct ulp_cid_info *cid_info = NULL;
        uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
+       struct hwi_wrb_context *pwrb_context;
+       struct hwi_controller *phwi_ctrlr;
+       struct ulp_cid_info *cid_info;
+       uint16_t cid_post_ulp;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        cid_post_ulp = pwrb_context->ulp_num;
 
        cid_info = phba->cid_array_info[cid_post_ulp];
-       cid_info->avlbl_cids++;
-
+       /* fill only in empty slot */
+       if (cid_info->cid_array[cid_info->cid_free] != BE_INVALID_CID) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BS_%d : failed to put cid %u: available %u:%u\n",
+                             cid, cid_info->avlbl_cids, cid_info->cid_free);
+               return;
+       }
        cid_info->cid_array[cid_info->cid_free++] = cid;
        if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp))
                cid_info->cid_free = 0;
+       cid_info->avlbl_cids++;
 }
 
 /**
@@ -1037,8 +1038,8 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
 
        beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
        beiscsi_ep->phba = NULL;
-       phba->ep_array[BE_GET_CRI_FROM_CID
-                      (beiscsi_ep->ep_cid)] = NULL;
+       /* clear this to track freeing in beiscsi_ep_disconnect */
+       phba->ep_array[BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid)] = NULL;
 
        /**
         * Check if any connection resource allocated by driver
@@ -1049,6 +1050,11 @@ static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep)
                return;
 
        beiscsi_conn = beiscsi_ep->conn;
+       /**
+        * Break ep->conn link here so that completions after
+        * this are ignored.
+        */
+       beiscsi_ep->conn = NULL;
        if (beiscsi_conn->login_in_progress) {
                beiscsi_free_mgmt_task_handles(beiscsi_conn,
                                               beiscsi_conn->task);
@@ -1079,7 +1085,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                    "BS_%d : In beiscsi_open_conn\n");
 
        beiscsi_ep->ep_cid = beiscsi_get_cid(phba);
-       if (beiscsi_ep->ep_cid == 0xFFFF) {
+       if (beiscsi_ep->ep_cid == BE_INVALID_CID) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : No free cid available\n");
                return ret;
@@ -1114,7 +1120,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        nonemb_cmd.size = req_memsize;
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
-       if (tag <= 0) {
+       if (!tag) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
                            "BS_%d : mgmt_open_connection Failed for cid=%d\n",
                            beiscsi_ep->ep_cid);
@@ -1284,26 +1290,6 @@ static int beiscsi_close_conn(struct  beiscsi_endpoint *beiscsi_ep, int flag)
        return ret;
 }
 
-/**
- * beiscsi_unbind_conn_to_cid - Unbind the beiscsi_conn from phba conn table
- * @phba: The phba instance
- * @cid: The cid to free
- */
-static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba,
-                                     unsigned int cid)
-{
-       uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
-
-       if (phba->conn_table[cri_index])
-               phba->conn_table[cri_index] = NULL;
-       else {
-               beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                           "BS_%d : Connection table Not occupied.\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
 /**
  * beiscsi_ep_disconnect - Tears down the TCP connection
  * @ep:        endpoint to be used
@@ -1318,13 +1304,23 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
        unsigned int tag;
        uint8_t mgmt_invalidate_flag, tcp_upload_flag;
        unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH;
+       uint16_t cri_index;
 
        beiscsi_ep = ep->dd_data;
        phba = beiscsi_ep->phba;
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-                   "BS_%d : In beiscsi_ep_disconnect for ep_cid = %d\n",
+                   "BS_%d : In beiscsi_ep_disconnect for ep_cid = %u\n",
                    beiscsi_ep->ep_cid);
 
+       cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
+       if (!phba->ep_array[cri_index]) {
+               __beiscsi_log(phba, KERN_ERR,
+                             "BS_%d : ep_array at %u cid %u empty\n",
+                             cri_index,
+                             beiscsi_ep->ep_cid);
+               return;
+       }
+
        if (beiscsi_ep->conn) {
                beiscsi_conn = beiscsi_ep->conn;
                iscsi_suspend_queue(beiscsi_conn->conn);
@@ -1356,7 +1352,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
 free_ep:
        msleep(BEISCSI_LOGOUT_SYNC_DELAY);
        beiscsi_free_ep(beiscsi_ep);
-       beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
+       if (!phba->conn_table[cri_index])
+               __beiscsi_log(phba, KERN_ERR,
+                               "BS_%d : conn_table empty at %u: cid %u\n",
+                               cri_index,
+                               beiscsi_ep->ep_cid);
+       phba->conn_table[cri_index] = NULL;
        iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
 }
 
index b5112d6..32b2713 100644 (file)
@@ -67,8 +67,6 @@ beiscsi_##_name##_disp(struct device *dev,\
 {      \
        struct Scsi_Host *shost = class_to_shost(dev);\
        struct beiscsi_hba *phba = iscsi_host_priv(shost); \
-       uint32_t param_val = 0; \
-       param_val = phba->attr_##_name;\
        return snprintf(buf, PAGE_SIZE, "%d\n",\
                        phba->attr_##_name);\
 }
@@ -218,160 +216,156 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
 
 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
 {
+       struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
        struct iscsi_cls_session *cls_session;
-       struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
-       struct beiscsi_io_task *aborted_io_task;
-       struct iscsi_conn *conn;
+       struct beiscsi_io_task *abrt_io_task;
        struct beiscsi_conn *beiscsi_conn;
-       struct beiscsi_hba *phba;
        struct iscsi_session *session;
-       struct invalidate_command_table *inv_tbl;
-       struct be_dma_mem nonemb_cmd;
-       unsigned int cid, tag, num_invalidate;
+       struct invldt_cmd_tbl inv_tbl;
+       struct beiscsi_hba *phba;
+       struct iscsi_conn *conn;
        int rc;
 
        cls_session = starget_to_session(scsi_target(sc->device));
        session = cls_session->dd_data;
 
-       spin_lock_bh(&session->frwd_lock);
-       if (!aborted_task || !aborted_task->sc) {
-               /* we raced */
-               spin_unlock_bh(&session->frwd_lock);
-               return SUCCESS;
-       }
-
-       aborted_io_task = aborted_task->dd_data;
-       if (!aborted_io_task->scsi_cmnd) {
-               /* raced or invalid command */
-               spin_unlock_bh(&session->frwd_lock);
+       /* check if we raced, task just got cleaned up under us */
+       spin_lock_bh(&session->back_lock);
+       if (!abrt_task || !abrt_task->sc) {
+               spin_unlock_bh(&session->back_lock);
                return SUCCESS;
        }
-       spin_unlock_bh(&session->frwd_lock);
-       /* Invalidate WRB Posted for this Task */
-       AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
-                     aborted_io_task->pwrb_handle->pwrb,
-                     1);
-
-       conn = aborted_task->conn;
+       /* get a task ref till FW processes the req for the ICD used */
+       __iscsi_get_task(abrt_task);
+       abrt_io_task = abrt_task->dd_data;
+       conn = abrt_task->conn;
        beiscsi_conn = conn->dd_data;
        phba = beiscsi_conn->phba;
-
-       /* invalidate iocb */
-       cid = beiscsi_conn->beiscsi_conn_cid;
-       inv_tbl = phba->inv_tbl;
-       memset(inv_tbl, 0x0, sizeof(*inv_tbl));
-       inv_tbl->cid = cid;
-       inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
-       num_invalidate = 1;
-       nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
-                               sizeof(struct invalidate_commands_params_in),
-                               &nonemb_cmd.dma);
-       if (nonemb_cmd.va == NULL) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
-                           "BM_%d : Failed to allocate memory for"
-                           "mgmt_invalidate_icds\n");
-               return FAILED;
+       /* mark WRB invalid which have been not processed by FW yet */
+       if (is_chip_be2_be3r(phba)) {
+               AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+                             abrt_io_task->pwrb_handle->pwrb, 1);
+       } else {
+               AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
+                             abrt_io_task->pwrb_handle->pwrb, 1);
        }
-       nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
+       inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
+       inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
+       spin_unlock_bh(&session->back_lock);
 
-       tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
-                                  cid, &nonemb_cmd);
-       if (!tag) {
+       rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1);
+       iscsi_put_task(abrt_task);
+       if (rc) {
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
-                           "BM_%d : mgmt_invalidate_icds could not be"
-                           "submitted\n");
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                                   nonemb_cmd.va, nonemb_cmd.dma);
-
+                           "BM_%d : sc %p invalidation failed %d\n",
+                           sc, rc);
                return FAILED;
        }
 
-       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
-       if (rc != -EBUSY)
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                                   nonemb_cmd.va, nonemb_cmd.dma);
-
        return iscsi_eh_abort(sc);
 }
 
 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
 {
-       struct iscsi_task *abrt_task;
-       struct beiscsi_io_task *abrt_io_task;
-       struct iscsi_conn *conn;
+       struct beiscsi_invldt_cmd_tbl {
+               struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ];
+               struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
+       } *inv_tbl;
+       struct iscsi_cls_session *cls_session;
        struct beiscsi_conn *beiscsi_conn;
-       struct beiscsi_hba *phba;
+       struct beiscsi_io_task *io_task;
        struct iscsi_session *session;
-       struct iscsi_cls_session *cls_session;
-       struct invalidate_command_table *inv_tbl;
-       struct be_dma_mem nonemb_cmd;
-       unsigned int cid, tag, i, num_invalidate;
-       int rc;
+       struct beiscsi_hba *phba;
+       struct iscsi_conn *conn;
+       struct iscsi_task *task;
+       unsigned int i, nents;
+       int rc, more = 0;
 
-       /* invalidate iocbs */
        cls_session = starget_to_session(scsi_target(sc->device));
        session = cls_session->dd_data;
+
        spin_lock_bh(&session->frwd_lock);
        if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
                spin_unlock_bh(&session->frwd_lock);
                return FAILED;
        }
+
        conn = session->leadconn;
        beiscsi_conn = conn->dd_data;
        phba = beiscsi_conn->phba;
-       cid = beiscsi_conn->beiscsi_conn_cid;
-       inv_tbl = phba->inv_tbl;
-       memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
-       num_invalidate = 0;
+
+       inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC);
+       if (!inv_tbl) {
+               spin_unlock_bh(&session->frwd_lock);
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+                           "BM_%d : invldt_cmd_tbl alloc failed\n");
+               return FAILED;
+       }
+       nents = 0;
+       /* take back_lock to prevent task from getting cleaned up under us */
+       spin_lock(&session->back_lock);
        for (i = 0; i < conn->session->cmds_max; i++) {
-               abrt_task = conn->session->cmds[i];
-               abrt_io_task = abrt_task->dd_data;
-               if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
+               task = conn->session->cmds[i];
+               if (!task->sc)
                        continue;
 
-               if (sc->device->lun != abrt_task->sc->device->lun)
+               if (sc->device->lun != task->sc->device->lun)
                        continue;
+               /**
+                * Can't fit in more cmds? Normally this won't happen b'coz
+                * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
+                */
+               if (nents == BE_INVLDT_CMD_TBL_SZ) {
+                       more = 1;
+                       break;
+               }
 
-               /* Invalidate WRB Posted for this Task */
-               AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
-                             abrt_io_task->pwrb_handle->pwrb,
-                             1);
+               /* get a task ref till FW processes the req for the ICD used */
+               __iscsi_get_task(task);
+               io_task = task->dd_data;
+               /* mark WRB invalid which have been not processed by FW yet */
+               if (is_chip_be2_be3r(phba)) {
+                       AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
+                                     io_task->pwrb_handle->pwrb, 1);
+               } else {
+                       AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
+                                     io_task->pwrb_handle->pwrb, 1);
+               }
 
-               inv_tbl->cid = cid;
-               inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
-               num_invalidate++;
-               inv_tbl++;
+               inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid;
+               inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index;
+               inv_tbl->task[nents] = task;
+               nents++;
        }
+       spin_unlock_bh(&session->back_lock);
        spin_unlock_bh(&session->frwd_lock);
-       inv_tbl = phba->inv_tbl;
 
-       nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
-                               sizeof(struct invalidate_commands_params_in),
-                               &nonemb_cmd.dma);
-       if (nonemb_cmd.va == NULL) {
+       rc = SUCCESS;
+       if (!nents)
+               goto end_reset;
+
+       if (more) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
-                           "BM_%d : Failed to allocate memory for"
-                           "mgmt_invalidate_icds\n");
-               return FAILED;
+                           "BM_%d : number of cmds exceeds size of invalidation table\n");
+               rc = FAILED;
+               goto end_reset;
        }
-       nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
-       memset(nonemb_cmd.va, 0, nonemb_cmd.size);
-       tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
-                                  cid, &nonemb_cmd);
-       if (!tag) {
+
+       if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) {
                beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
-                           "BM_%d : mgmt_invalidate_icds could not be"
-                           " submitted\n");
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                                   nonemb_cmd.va, nonemb_cmd.dma);
-               return FAILED;
+                           "BM_%d : cid %u scmds invalidation failed\n",
+                           beiscsi_conn->beiscsi_conn_cid);
+               rc = FAILED;
        }
 
-       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
-       if (rc != -EBUSY)
-               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-                                   nonemb_cmd.va, nonemb_cmd.dma);
-       return iscsi_eh_device_reset(sc);
+end_reset:
+       for (i = 0; i < nents; i++)
+               iscsi_put_task(inv_tbl->task[i]);
+       kfree(inv_tbl);
+
+       if (rc == SUCCESS)
+               rc = iscsi_eh_device_reset(sc);
+       return rc;
 }
 
 /*------------------- PCI Driver operations and data ----------------- */
@@ -395,6 +389,7 @@ static struct scsi_host_template beiscsi_sht = {
        .change_queue_depth = scsi_change_queue_depth,
        .slave_configure = beiscsi_slave_configure,
        .target_alloc = iscsi_target_alloc,
+       .eh_timed_out = iscsi_eh_cmd_timed_out,
        .eh_abort_handler = beiscsi_eh_abort,
        .eh_device_reset_handler = beiscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_session_reset,
@@ -646,7 +641,6 @@ static void beiscsi_get_params(struct beiscsi_hba *phba)
        phba->params.num_sge_per_io = BE2_SGE;
        phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
        phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
-       phba->params.eq_timer = 64;
        phba->params.num_eq_entries = 1024;
        phba->params.num_cq_entries = 1024;
        phba->params.wrbs_per_cxn = 256;
@@ -964,6 +958,10 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
        unsigned long flags;
 
        spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
+       if (!pwrb_context->wrb_handles_available) {
+               spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
+               return NULL;
+       }
        pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
        pwrb_context->wrb_handles_available--;
        if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
@@ -1014,6 +1012,7 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
                pwrb_context->free_index = 0;
        else
                pwrb_context->free_index++;
+       pwrb_handle->pio_handle = NULL;
        spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
 }
 
@@ -1224,6 +1223,7 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        uint16_t wrb_index, cid, cri_index;
        struct hwi_controller *phwi_ctrlr;
        struct wrb_handle *pwrb_handle;
+       struct iscsi_session *session;
        struct iscsi_task *task;
 
        phwi_ctrlr = phba->phwi_ctrlr;
@@ -1242,8 +1242,12 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
        cri_index = BE_GET_CRI_FROM_CID(cid);
        pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
        pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
+       session = beiscsi_conn->conn->session;
+       spin_lock_bh(&session->back_lock);
        task = pwrb_handle->pio_handle;
-       iscsi_put_task(task);
+       if (task)
+               __iscsi_put_task(task);
+       spin_unlock_bh(&session->back_lock);
 }
 
 static void
@@ -1323,16 +1327,15 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
                             struct beiscsi_hba *phba, struct sol_cqe *psol)
 {
-       struct hwi_wrb_context *pwrb_context;
-       struct wrb_handle *pwrb_handle;
-       struct iscsi_wrb *pwrb = NULL;
-       struct hwi_controller *phwi_ctrlr;
-       struct iscsi_task *task;
-       unsigned int type;
        struct iscsi_conn *conn = beiscsi_conn->conn;
        struct iscsi_session *session = conn->session;
        struct common_sol_cqe csol_cqe = {0};
+       struct hwi_wrb_context *pwrb_context;
+       struct hwi_controller *phwi_ctrlr;
+       struct wrb_handle *pwrb_handle;
+       struct iscsi_task *task;
        uint16_t cri_index = 0;
+       uint8_t type;
 
        phwi_ctrlr = phba->phwi_ctrlr;
 
@@ -1345,11 +1348,14 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
        pwrb_handle = pwrb_context->pwrb_handle_basestd[
                      csol_cqe.wrb_index];
 
+       spin_lock_bh(&session->back_lock);
        task = pwrb_handle->pio_handle;
-       pwrb = pwrb_handle->pwrb;
+       if (!task) {
+               spin_unlock_bh(&session->back_lock);
+               return;
+       }
        type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
 
-       spin_lock_bh(&session->back_lock);
        switch (type) {
        case HWH_TYPE_IO:
        case HWH_TYPE_IO_RD:
@@ -1711,13 +1717,12 @@ beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
        struct list_head *hfree_list;
        struct phys_addr *pasync_sge;
        u32 ring_id, doorbell = 0;
-       u16 index, num_entries;
        u32 doorbell_offset;
        u16 prod = 0, cons;
+       u16 index;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
-       num_entries = pasync_ctx->num_entries;
        if (header) {
                cons = pasync_ctx->async_header.free_entries;
                hfree_list = &pasync_ctx->async_header.free_list;
@@ -2374,13 +2379,10 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 {
        uint8_t mem_descr_index, ulp_num;
-       unsigned int num_cq_pages, num_async_pdu_buf_pages;
+       unsigned int num_async_pdu_buf_pages;
        unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
        unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
 
-       num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
-                                     sizeof(struct sol_cqe));
-
        phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
 
        phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
@@ -2737,7 +2739,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 
        for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
                if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-                        /* get async_ctx for each ULP */
+                       /* get async_ctx for each ULP */
                        mem_descr = (struct be_mem_descriptor *)phba->init_mem;
                        mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
                                     (ulp_num * MEM_DESCR_OFFSET));
@@ -3367,7 +3369,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                         struct hwi_context_memory *phwi_context,
                         struct hwi_controller *phwi_ctrlr)
 {
-       unsigned int wrb_mem_index, offset, size, num_wrb_rings;
+       unsigned int num_wrb_rings;
        u64 pa_addr_lo;
        unsigned int idx, num, i, ulp_num;
        struct mem_array *pwrb_arr;
@@ -3432,10 +3434,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
                }
 
        for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-               wrb_mem_index = 0;
-               offset = 0;
-               size = 0;
-
                if (ulp_count > 1) {
                        ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
 
@@ -3663,7 +3661,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
-       struct hd_async_context *pasync_ctx;
        int i, eq_for_mcc, ulp_num;
 
        for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
@@ -3700,8 +3697,6 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba)
                        q = &phwi_context->be_def_dataq[ulp_num];
                        if (q->created)
                                beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
-                       pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
                }
        }
 
@@ -3804,7 +3799,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
                        /**
                         * Now that the default PDU rings have been created,
                         * let EP know about it.
-                        * Call beiscsi_cmd_iscsi_cleanup before posting?
                         */
                        beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
                                                 ulp_num);
@@ -3850,14 +3844,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
                                        phwi_ctrlr->wrb_context[cri].cid] =
                                        async_arr_idx++;
                        }
-                       /**
-                        * Now that the default PDU rings have been created,
-                        * let EP know about it.
-                        */
-                       beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
-                                                ulp_num);
-                       beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
-                                                ulp_num);
                }
        }
 
@@ -3934,31 +3920,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
        kfree(phba->phwi_ctrlr);
 }
 
-static int beiscsi_init_controller(struct beiscsi_hba *phba)
-{
-       int ret = -ENOMEM;
-
-       ret = beiscsi_get_memory(phba);
-       if (ret < 0) {
-               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_dev_probe -"
-                           "Failed in beiscsi_alloc_memory\n");
-               return ret;
-       }
-
-       ret = hwi_init_controller(phba);
-       if (ret)
-               goto free_init;
-       beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-                   "BM_%d : Return success from beiscsi_init_controller");
-
-       return 0;
-
-free_init:
-       beiscsi_free_mem(phba);
-       return ret;
-}
-
 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
 {
        struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
@@ -4089,9 +4050,10 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                        }
 
                        /* Allocate memory for CID array */
-                       ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
-                                                 BEISCSI_GET_CID_COUNT(phba,
-                                                 ulp_num), GFP_KERNEL);
+                       ptr_cid_info->cid_array =
+                               kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
+                                       sizeof(*ptr_cid_info->cid_array),
+                                       GFP_KERNEL);
                        if (!ptr_cid_info->cid_array) {
                                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                                            "BM_%d : Failed to allocate memory"
@@ -4231,33 +4193,30 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
 {
        int ret;
 
-       ret = beiscsi_init_controller(phba);
+       ret = hwi_init_controller(phba);
        if (ret < 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_dev_probe - Failed in"
-                           "beiscsi_init_controller\n");
+                           "BM_%d : init controller failed\n");
                return ret;
        }
        ret = beiscsi_init_sgl_handle(phba);
        if (ret < 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_dev_probe - Failed in"
-                           "beiscsi_init_sgl_handle\n");
-               goto do_cleanup_ctrlr;
+                           "BM_%d : init sgl handles failed\n");
+               goto cleanup_port;
        }
 
        ret = hba_setup_cid_tbls(phba);
        if (ret < 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : Failed in hba_setup_cid_tbls\n");
+                           "BM_%d : setup CID table failed\n");
                kfree(phba->io_sgl_hndl_base);
                kfree(phba->eh_sgl_hndl_base);
-               goto do_cleanup_ctrlr;
+               goto cleanup_port;
        }
-
        return ret;
 
-do_cleanup_ctrlr:
+cleanup_port:
        hwi_cleanup_port(phba);
        return ret;
 }
@@ -5417,10 +5376,10 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
 
        phba->shost->max_id = phba->params.cxns_per_ctrl;
        phba->shost->can_queue = phba->params.ios_per_ctrl;
-       ret = hwi_init_controller(phba);
-       if (ret) {
+       ret = beiscsi_init_port(phba);
+       if (ret < 0) {
                __beiscsi_log(phba, KERN_ERR,
-                             "BM_%d : init controller failed %d\n", ret);
+                             "BM_%d : init port failed\n");
                goto disable_msix;
        }
 
@@ -5526,6 +5485,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
                cancel_work_sync(&pbe_eq->mcc_work);
        }
        hwi_cleanup_port(phba);
+       beiscsi_cleanup_port(phba);
 }
 
 static void beiscsi_sess_work(struct work_struct *work)
@@ -5638,11 +5598,12 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
 static int beiscsi_dev_probe(struct pci_dev *pcidev,
                             const struct pci_device_id *id)
 {
-       struct beiscsi_hba *phba = NULL;
-       struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
+       struct hwi_controller *phwi_ctrlr;
+       struct beiscsi_hba *phba = NULL;
        struct be_eq_obj *pbe_eq;
        unsigned int s_handle;
+       char wq_name[20];
        int ret, i;
 
        ret = beiscsi_enable_pci(pcidev);
@@ -5680,6 +5641,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        case OC_DEVICE_ID2:
                phba->generation = BE_GEN2;
                phba->iotask_fn = beiscsi_iotask;
+               dev_warn(&pcidev->dev,
+                        "Obsolete/Unsupported BE2 Adapter Family\n");
                break;
        case BE_DEVICE_ID2:
        case OC_DEVICE_ID3:
@@ -5735,11 +5698,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 
        phba->shost->max_id = phba->params.cxns_per_ctrl;
        phba->shost->can_queue = phba->params.ios_per_ctrl;
+       ret = beiscsi_get_memory(phba);
+       if (ret < 0) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+                           "BM_%d : alloc host mem failed\n");
+               goto free_port;
+       }
+
        ret = beiscsi_init_port(phba);
        if (ret < 0) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-                           "BM_%d : beiscsi_dev_probe-"
-                           "Failed in beiscsi_init_port\n");
+                           "BM_%d : init port failed\n");
+               beiscsi_free_mem(phba);
                goto free_port;
        }
 
@@ -5754,9 +5724,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 
        phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
 
-       snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
+       snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
                 phba->shost->host_no);
-       phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
+       phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
        if (!phba->wq) {
                beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
                            "BM_%d : beiscsi_dev_probe-"
@@ -5881,7 +5851,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
 
        /* free all resources */
        destroy_workqueue(phba->wq);
-       beiscsi_cleanup_port(phba);
        beiscsi_free_mem(phba);
 
        /* ctrl uninit */
index 6376657..2188579 100644 (file)
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "11.2.0.0"
+#define BUILD_STR              "11.2.1.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -57,7 +57,6 @@
 
 #define BE2_IO_DEPTH           1024
 #define BE2_MAX_SESSIONS       256
-#define BE2_CMDS_PER_CXN       128
 #define BE2_TMFS               16
 #define BE2_NOPOUT_REQ         16
 #define BE2_SGE                        32
 
 #define BEISCSI_SGLIST_ELEMENTS        30
 
-#define BEISCSI_CMD_PER_LUN    128 /* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS    1024 /* scsi_host->max_sectors */
+/**
+ * BE_INVLDT_CMD_TBL_SZ is 128 which is total number commands that can
+ * be invalidated at a time, consider it before changing the value of
+ * BEISCSI_CMD_PER_LUN.
+ */
+#define BEISCSI_CMD_PER_LUN    128     /* scsi_host->cmd_per_lun */
+#define BEISCSI_MAX_SECTORS    1024    /* scsi_host->max_sectors */
 #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
 
 #define BEISCSI_MAX_CMD_LEN    16      /* scsi_host->max_cmd_len */
@@ -239,19 +243,7 @@ struct hba_parameters {
        unsigned int num_cq_entries;
        unsigned int num_eq_entries;
        unsigned int wrbs_per_cxn;
-       unsigned int crashmode;
-       unsigned int hba_num;
-
-       unsigned int mgmt_ws_sz;
        unsigned int hwi_ws_sz;
-
-       unsigned int eto;
-       unsigned int ldto;
-
-       unsigned int dbg_flags;
-       unsigned int num_cxn;
-
-       unsigned int eq_timer;
        /**
         * These are calculated from other params. They're here
         * for debug purposes
@@ -272,11 +264,6 @@ struct hba_parameters {
        unsigned int num_sge;
 };
 
-struct invalidate_command_table {
-       unsigned short icd;
-       unsigned short cid;
-} __packed;
-
 #define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \
        (phwi_ctrlr->wrb_context[cri].ulp_num)
 struct hwi_wrb_context {
@@ -334,7 +321,6 @@ struct beiscsi_hba {
        struct be_bus_address pci_pa;   /* CSR */
        /* PCI representation of our HBA */
        struct pci_dev *pcidev;
-       unsigned short asic_revision;
        unsigned int num_cpus;
        unsigned int nxt_cqid;
        struct msix_entry msix_entries[MAX_CPUS];
@@ -355,9 +341,9 @@ struct beiscsi_hba {
        spinlock_t io_sgl_lock;
        spinlock_t mgmt_sgl_lock;
        spinlock_t async_pdu_lock;
-       unsigned int age;
        struct list_head hba_queue;
 #define BE_MAX_SESSION 2048
+#define BE_INVALID_CID 0xffff
 #define BE_SET_CID_TO_CRI(cri_index, cid) \
                          (phba->cid_to_cri_map[cid] = cri_index)
 #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])
@@ -425,12 +411,10 @@ struct beiscsi_hba {
        u8 port_name;
        u8 port_speed;
        char fw_ver_str[BEISCSI_VER_STRLEN];
-       char wq_name[20];
        struct workqueue_struct *wq;    /* The actuak work queue */
        struct be_ctrl_info ctrl;
        unsigned int generation;
        unsigned int interface_handle;
-       struct invalidate_command_table inv_tbl[128];
 
        struct be_aic_obj aic_obj[MAX_CPUS];
        unsigned int attr_log_enable;
@@ -525,10 +509,6 @@ struct beiscsi_io_task {
        struct scsi_cmnd *scsi_cmnd;
        int num_sg;
        struct hwi_wrb_context *pwrb_context;
-       unsigned int cmd_sn;
-       unsigned int flags;
-       unsigned short cid;
-       unsigned short header_len;
        itt_t libiscsi_itt;
        struct be_cmd_bhs *cmd_bhs;
        struct be_bus_address bhs_pa;
@@ -842,7 +822,7 @@ struct amap_iscsi_wrb_v2 {
        u8 diff_enbl;   /* DWORD 11 */
        u8 u_run;       /* DWORD 11 */
        u8 o_run;       /* DWORD 11 */
-       u8 invalid;     /* DWORD 11 */
+       u8 invld;     /* DWORD 11 */
        u8 dsp;         /* DWORD 11 */
        u8 dmsg;        /* DWORD 11 */
        u8 rsvd4;       /* DWORD 11 */
@@ -1042,10 +1022,8 @@ struct hwi_controller {
        struct list_head io_sgl_list;
        struct list_head eh_sgl_list;
        struct sgl_handle *psgl_handle_base;
-       unsigned int wrb_mem_index;
 
        struct hwi_wrb_context *wrb_context;
-       struct mcc_wrb *pmcc_wrb_base;
        struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT];
        struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];
        struct hwi_context_memory *phwi_ctxt;
@@ -1062,9 +1040,7 @@ enum hwh_type_enum {
 };
 
 struct wrb_handle {
-       enum hwh_type_enum type;
        unsigned short wrb_index;
-
        struct iscsi_task *pio_handle;
        struct iscsi_wrb *pwrb;
 };
index ac05317..2f6d5c2 100644 (file)
@@ -66,7 +66,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                                         struct bsg_job *job,
                                         struct be_dma_mem *nonemb_cmd)
 {
-       struct be_cmd_resp_hdr *resp;
        struct be_mcc_wrb *wrb;
        struct be_sge *mcc_sge;
        unsigned int tag = 0;
@@ -76,7 +75,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
 
        nonemb_cmd->size = job->request_payload.payload_len;
        memset(nonemb_cmd->va, 0, nonemb_cmd->size);
-       resp = nonemb_cmd->va;
        region =  bsg_req->rqst_data.h_vendor.vendor_cmd[1];
        sector_size =  bsg_req->rqst_data.h_vendor.vendor_cmd[2];
        sector =  bsg_req->rqst_data.h_vendor.vendor_cmd[3];
@@ -128,50 +126,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
        return tag;
 }
 
-unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
-                               struct invalidate_command_table *inv_tbl,
-                               unsigned int num_invalidate, unsigned int cid,
-                               struct be_dma_mem *nonemb_cmd)
-
-{
-       struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb;
-       struct be_sge *sge;
-       struct invalidate_commands_params_in *req;
-       unsigned int i, tag;
-
-       mutex_lock(&ctrl->mbox_lock);
-       wrb = alloc_mcc_wrb(phba, &tag);
-       if (!wrb) {
-               mutex_unlock(&ctrl->mbox_lock);
-               return 0;
-       }
-
-       req = nonemb_cmd->va;
-       memset(req, 0, sizeof(*req));
-       sge = nonembedded_sgl(wrb);
-
-       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
-       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-                       OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
-                       sizeof(*req));
-       req->ref_handle = 0;
-       req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
-       for (i = 0; i < num_invalidate; i++) {
-               req->table[i].icd = inv_tbl->icd;
-               req->table[i].cid = inv_tbl->cid;
-               req->icd_count++;
-               inv_tbl++;
-       }
-       sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
-       sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
-       sge->len = cpu_to_le32(nonemb_cmd->size);
-
-       be_mcc_notify(phba, tag);
-       mutex_unlock(&ctrl->mbox_lock);
-       return tag;
-}
-
 unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
                                         struct beiscsi_endpoint *beiscsi_ep,
                                         unsigned short cid,
@@ -1066,7 +1020,6 @@ unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
 unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_cmd_get_session_resp *resp;
        struct be_cmd_get_session_req *req;
        struct be_dma_mem *nonemb_cmd;
        struct be_mcc_wrb *wrb;
@@ -1081,7 +1034,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
        }
 
        nonemb_cmd = &phba->boot_struct.nonemb_cmd;
-       nonemb_cmd->size = sizeof(*resp);
+       nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
        nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
                                              nonemb_cmd->size,
                                              &nonemb_cmd->dma);
@@ -1096,7 +1049,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
        be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
                           OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
-                          sizeof(*resp));
+                          sizeof(struct be_cmd_get_session_resp));
        req->session_handle = phba->boot_struct.s_handle;
        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
@@ -1309,7 +1262,8 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
        case BE_DEVICE_ID1:
        case OC_DEVICE_ID1:
        case OC_DEVICE_ID2:
-               return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n");
+               return snprintf(buf, PAGE_SIZE,
+                               "Obsolete/Unsupported BE2 Adapter Family\n");
                break;
        case BE_DEVICE_ID2:
        case OC_DEVICE_ID3:
@@ -1341,7 +1295,7 @@ beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct beiscsi_hba *phba = iscsi_host_priv(shost);
 
-       return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n",
+       return snprintf(buf, PAGE_SIZE, "Port Identifier : %u\n",
                        phba->fw_config.phys_port);
 }
 
@@ -1494,3 +1448,64 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
                     (params->dw[offsetof(struct amap_beiscsi_offload_params,
                      exp_statsn) / 32] + 1));
 }
+
+int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
+                                struct invldt_cmd_tbl *inv_tbl,
+                                unsigned int nents)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct invldt_cmds_params_in *req;
+       struct be_dma_mem nonemb_cmd;
+       struct be_mcc_wrb *wrb;
+       unsigned int i, tag;
+       struct be_sge *sge;
+       int rc;
+
+       if (!nents || nents > BE_INVLDT_CMD_TBL_SZ)
+               return -EINVAL;
+
+       nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
+       nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+                                             nonemb_cmd.size,
+                                             &nonemb_cmd.dma);
+       if (!nonemb_cmd.va) {
+               beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
+                           "BM_%d : invldt_cmds_params alloc failed\n");
+               return -ENOMEM;
+       }
+
+       mutex_lock(&ctrl->mbox_lock);
+       wrb = alloc_mcc_wrb(phba, &tag);
+       if (!wrb) {
+               mutex_unlock(&ctrl->mbox_lock);
+               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                                   nonemb_cmd.va, nonemb_cmd.dma);
+               return -ENOMEM;
+       }
+
+       req = nonemb_cmd.va;
+       be_wrb_hdr_prepare(wrb, nonemb_cmd.size, false, 1);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+                       OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
+                       sizeof(*req));
+       req->ref_handle = 0;
+       req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
+       for (i = 0; i < nents; i++) {
+               req->table[i].icd = inv_tbl[i].icd;
+               req->table[i].cid = inv_tbl[i].cid;
+               req->icd_count++;
+       }
+       sge = nonembedded_sgl(wrb);
+       sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+       sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd.dma));
+       sge->len = cpu_to_le32(nonemb_cmd.size);
+
+       be_mcc_notify(phba, tag);
+       mutex_unlock(&ctrl->mbox_lock);
+
+       rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
+       if (rc != -EBUSY)
+               pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+                                   nonemb_cmd.va, nonemb_cmd.dma);
+       return rc;
+}
index b897cfd..308f147 100644 (file)
 #define PCICFG_UE_STATUS_MASK_LOW       0xA8
 #define PCICFG_UE_STATUS_MASK_HI        0xAC
 
-/**
- * Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field
- */
-struct amap_mcc_sge {
-       u8 pa_lo[32];           /* dword 0 */
-       u8 pa_hi[32];           /* dword 1 */
-       u8 length[32];          /* DWORD 2 */
-} __packed;
-
-/**
- * Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field
- */
-struct amap_mcc_wrb_payload {
-       union {
-               struct amap_mcc_sge sgl[19];
-               u8 embedded[59 * 32];   /* DWORDS 57 to 115 */
-       } u;
-} __packed;
-
-/**
- * Pseudo amap definition in which each bit of the actual structure is defined
- * as a byte: used to calculate offset/shift/mask of each field
- */
-struct amap_mcc_wrb {
-       u8 embedded;            /* DWORD 0 */
-       u8 rsvd0[2];            /* DWORD 0 */
-       u8 sge_count[5];        /* DWORD 0 */
-       u8 rsvd1[16];           /* DWORD 0 */
-       u8 special[8];          /* DWORD 0 */
-       u8 payload_length[32];
-       u8 tag[64];             /* DWORD 2 */
-       u8 rsvd2[32];           /* DWORD 4 */
-       struct amap_mcc_wrb_payload payload;
-};
-
-struct mcc_sge {
-       u32 pa_lo;              /* dword 0 */
-       u32 pa_hi;              /* dword 1 */
-       u32 length;             /* DWORD 2 */
-} __packed;
-
-struct mcc_wrb_payload {
-       union {
-               struct mcc_sge sgl[19];
-               u32 embedded[59];       /* DWORDS 57 to 115 */
-       } u;
-} __packed;
-
-#define MCC_WRB_EMBEDDED_MASK                0x00000001
-
-struct mcc_wrb {
-       u32 dw[0];              /* DWORD 0 */
-       u32 payload_length;
-       u32 tag[2];             /* DWORD 2 */
-       u32 rsvd2[1];           /* DWORD 4 */
-       struct mcc_wrb_payload payload;
-};
-
 int mgmt_open_connection(struct beiscsi_hba *phba,
                         struct sockaddr *dst_addr,
                         struct beiscsi_endpoint *beiscsi_ep,
@@ -104,10 +44,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,
                                     unsigned short cid,
                                     unsigned int upload_flag);
-unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
-                               struct invalidate_command_table *inv_tbl,
-                               unsigned int num_invalidate, unsigned int cid,
-                               struct be_dma_mem *nonemb_cmd);
 unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                                         struct beiscsi_hba *phba,
                                         struct bsg_job *job,
@@ -134,24 +70,31 @@ union iscsi_invalidate_connection_params {
        struct iscsi_invalidate_connection_params_out response;
 } __packed;
 
-struct invalidate_commands_params_in {
+#define BE_INVLDT_CMD_TBL_SZ   128
+struct invldt_cmd_tbl {
+       unsigned short icd;
+       unsigned short cid;
+} __packed;
+
+struct invldt_cmds_params_in {
        struct be_cmd_req_hdr hdr;
        unsigned int ref_handle;
        unsigned int icd_count;
-       struct invalidate_command_table table[128];
+       struct invldt_cmd_tbl table[BE_INVLDT_CMD_TBL_SZ];
        unsigned short cleanup_type;
        unsigned short unused;
 } __packed;
 
-struct invalidate_commands_params_out {
+struct invldt_cmds_params_out {
+       struct be_cmd_resp_hdr hdr;
        unsigned int ref_handle;
        unsigned int icd_count;
-       unsigned int icd_status[128];
+       unsigned int icd_status[BE_INVLDT_CMD_TBL_SZ];
 } __packed;
 
-union invalidate_commands_params {
-       struct invalidate_commands_params_in request;
-       struct invalidate_commands_params_out response;
+union be_invldt_cmds_params {
+       struct invldt_cmds_params_in request;
+       struct invldt_cmds_params_out response;
 } __packed;
 
 struct mgmt_hba_attributes {
@@ -231,16 +174,6 @@ struct be_bsg_vendor_cmd {
 
 #define GET_MGMT_CONTROLLER_WS(phba)    (phba->pmgmt_ws)
 
-/* MGMT CMD flags */
-
-#define MGMT_CMDH_FREE                (1<<0)
-
-/*  --- MGMT_ERROR_CODES --- */
-/*  Error Codes returned in the status field of the CMD response header */
-#define MGMT_STATUS_SUCCESS 0  /* The CMD completed without errors */
-#define MGMT_STATUS_FAILED 1   /* Error status in the Status field of */
-                               /* the CMD_RESPONSE_HEADER  */
-
 #define ISCSI_GET_PDU_TEMPLATE_ADDRESS(pc, pa) {\
        pa->lo = phba->init_mem[ISCSI_MEM_GLOBAL_HEADER].mem_array[0].\
                                        bus_address.u.a32.address_lo;  \
@@ -270,6 +203,9 @@ unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
                                         unsigned short cid,
                                         unsigned short issue_reset,
                                         unsigned short savecfg_flag);
+int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
+                                struct invldt_cmd_tbl *inv_tbl,
+                                unsigned int nents);
 
 int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
 
index 1e7e139..4aa61e2 100644 (file)
 
 BFA_TRC_FILE(FCS, FCS);
 
-/*
- * FCS sub-modules
- */
-struct bfa_fcs_mod_s {
-       void            (*attach) (struct bfa_fcs_s *fcs);
-       void            (*modinit) (struct bfa_fcs_s *fcs);
-       void            (*modexit) (struct bfa_fcs_s *fcs);
-};
-
-#define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
-
-static struct bfa_fcs_mod_s fcs_modules[] = {
-       { bfa_fcs_port_attach, NULL, NULL },
-       { bfa_fcs_uf_attach, NULL, NULL },
-       { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
-         bfa_fcs_fabric_modexit },
-};
-
 /*
  *  fcs_api BFA FCS API
  */
@@ -58,52 +40,19 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
        complete(&bfad->comp);
 }
 
-
-
 /*
- *  fcs_api BFA FCS API
- */
-
-/*
- * fcs attach -- called once to initialize data structures at driver attach time
+ * fcs initialization, called once after bfa initialization is complete
  */
 void
-bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
-              bfa_boolean_t min_cfg)
+bfa_fcs_init(struct bfa_fcs_s *fcs)
 {
-       int             i;
-       struct bfa_fcs_mod_s  *mod;
-
-       fcs->bfa = bfa;
-       fcs->bfad = bfad;
-       fcs->min_cfg = min_cfg;
-       fcs->num_rport_logins = 0;
-
-       bfa->fcs = BFA_TRUE;
-       fcbuild_init();
-
-       for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
-               mod = &fcs_modules[i];
-               if (mod->attach)
-                       mod->attach(fcs);
-       }
+       bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
+       bfa_trc(fcs, 0);
 }
 
 /*
- * fcs initialization, called once after bfa initialization is complete
+ *  fcs_api BFA FCS API
  */
-void
-bfa_fcs_init(struct bfa_fcs_s *fcs)
-{
-       int     i;
-       struct bfa_fcs_mod_s  *mod;
-
-       for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
-               mod = &fcs_modules[i];
-               if (mod->modinit)
-                       mod->modinit(fcs);
-       }
-}
 
 /*
  * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
@@ -180,26 +129,14 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 void
 bfa_fcs_exit(struct bfa_fcs_s *fcs)
 {
-       struct bfa_fcs_mod_s  *mod;
-       int             nmods, i;
-
        bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
-
-       nmods = ARRAY_SIZE(fcs_modules);
-
-       for (i = 0; i < nmods; i++) {
-
-               mod = &fcs_modules[i];
-               if (mod->modexit) {
-                       bfa_wc_up(&fcs->wc);
-                       mod->modexit(fcs);
-               }
-       }
-
+       bfa_wc_up(&fcs->wc);
+       bfa_trc(fcs, 0);
+       bfa_lps_delete(fcs->fabric.lps);
+       bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_DELETE);
        bfa_wc_wait(&fcs->wc);
 }
 
-
 /*
  * Fabric module implementation.
  */
@@ -1127,62 +1064,6 @@ bfa_fcs_fabric_stop_comp(void *cbarg)
  *  fcs_fabric_public fabric public functions
  */
 
-/*
- * Attach time initialization.
- */
-void
-bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
-{
-       struct bfa_fcs_fabric_s *fabric;
-
-       fabric = &fcs->fabric;
-       memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
-
-       /*
-        * Initialize base fabric.
-        */
-       fabric->fcs = fcs;
-       INIT_LIST_HEAD(&fabric->vport_q);
-       INIT_LIST_HEAD(&fabric->vf_q);
-       fabric->lps = bfa_lps_alloc(fcs->bfa);
-       WARN_ON(!fabric->lps);
-
-       /*
-        * Initialize fabric delete completion handler. Fabric deletion is
-        * complete when the last vport delete is complete.
-        */
-       bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
-       bfa_wc_up(&fabric->wc); /* For the base port */
-
-       bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-       bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
-}
-
-void
-bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
-{
-       bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
-       bfa_trc(fcs, 0);
-}
-
-/*
- *   Module cleanup
- */
-void
-bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
-{
-       struct bfa_fcs_fabric_s *fabric;
-
-       bfa_trc(fcs, 0);
-
-       /*
-        * Cleanup base fabric.
-        */
-       fabric = &fcs->fabric;
-       bfa_lps_delete(fabric->lps);
-       bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
-}
-
 /*
  * Fabric module stop -- stop FCS actions
  */
@@ -1633,12 +1514,6 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
        }
 }
 
-void
-bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
-{
-       bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
-}
-
 /*
  * BFA FCS UF ( Unsolicited Frames)
  */
@@ -1706,8 +1581,44 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
        bfa_uf_free(uf);
 }
 
+/*
+ * fcs attach -- called once to initialize data structures at driver attach time
+ */
 void
-bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
+bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+              bfa_boolean_t min_cfg)
 {
+       struct bfa_fcs_fabric_s *fabric = &fcs->fabric;
+
+       fcs->bfa = bfa;
+       fcs->bfad = bfad;
+       fcs->min_cfg = min_cfg;
+       fcs->num_rport_logins = 0;
+
+       bfa->fcs = BFA_TRUE;
+       fcbuild_init();
+
+       bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
        bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
+
+       memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+
+       /*
+        * Initialize base fabric.
+        */
+       fabric->fcs = fcs;
+       INIT_LIST_HEAD(&fabric->vport_q);
+       INIT_LIST_HEAD(&fabric->vf_q);
+       fabric->lps = bfa_lps_alloc(fcs->bfa);
+       WARN_ON(!fabric->lps);
+
+       /*
+        * Initialize fabric delete completion handler. Fabric deletion is
+        * complete when the last vport delete is complete.
+        */
+       bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
+       bfa_wc_up(&fabric->wc); /* For the base port */
+
+       bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+       bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
 }
index 0f797a5..e60f72b 100644 (file)
@@ -808,9 +808,7 @@ void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
 /*
  * fabric protected interface functions
  */
-void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
@@ -827,8 +825,6 @@ void        bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
               wwn_t fabric_name);
 u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
-void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
-void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_modstop(struct bfa_fcs_s *fcs);
 void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
                        enum bfa_fcs_fabric_event event);
index 02d8060..7eb0eef 100644 (file)
@@ -813,6 +813,7 @@ struct scsi_host_template bfad_im_scsi_host_template = {
        .name = BFAD_DRIVER_NAME,
        .info = bfad_im_info,
        .queuecommand = bfad_im_queuecommand,
+       .eh_timed_out = fc_eh_timed_out,
        .eh_abort_handler = bfad_im_abort_handler,
        .eh_device_reset_handler = bfad_im_reset_lun_handler,
        .eh_bus_reset_handler = bfad_im_reset_bus_handler,
@@ -835,6 +836,7 @@ struct scsi_host_template bfad_im_vport_template = {
        .name = BFAD_DRIVER_NAME,
        .info = bfad_im_info,
        .queuecommand = bfad_im_queuecommand,
+       .eh_timed_out = fc_eh_timed_out,
        .eh_abort_handler = bfad_im_abort_handler,
        .eh_device_reset_handler = bfad_im_reset_lun_handler,
        .eh_bus_reset_handler = bfad_im_reset_bus_handler,
index c639d5a..b1e39f9 100644 (file)
@@ -2947,6 +2947,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
        .module                 = THIS_MODULE,
        .name                   = "QLogic Offload FCoE Initiator",
        .queuecommand           = bnx2fc_queuecommand,
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = bnx2fc_eh_abort,        /* abts */
        .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
        .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
index 133901f..f32a66f 100644 (file)
@@ -2259,6 +2259,7 @@ static struct scsi_host_template bnx2i_host_template = {
        .name                   = "QLogic Offload iSCSI Initiator",
        .proc_name              = "bnx2i",
        .queuecommand           = iscsi_queuecommand,
+       .eh_timed_out           = iscsi_eh_cmd_timed_out,
        .eh_abort_handler       = iscsi_eh_abort,
        .eh_device_reset_handler = iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
index 89a52b9..a1ff75f 100644 (file)
@@ -2270,6 +2270,7 @@ struct scsi_host_template csio_fcoe_shost_template = {
        .name                   = CSIO_DRV_DESC,
        .proc_name              = KBUILD_MODNAME,
        .queuecommand           = csio_queuecommand,
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = csio_eh_abort_handler,
        .eh_device_reset_handler = csio_eh_lun_reset_handler,
        .slave_alloc            = csio_slave_alloc,
@@ -2289,6 +2290,7 @@ struct scsi_host_template csio_fcoe_shost_vport_template = {
        .name                   = CSIO_DRV_DESC,
        .proc_name              = KBUILD_MODNAME,
        .queuecommand           = csio_queuecommand,
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = csio_eh_abort_handler,
        .eh_device_reset_handler = csio_eh_lun_reset_handler,
        .slave_alloc            = csio_slave_alloc,
index 33e8346..1880eb6 100644 (file)
@@ -90,6 +90,7 @@ static struct scsi_host_template cxgb3i_host_template = {
        .sg_tablesize   = SG_ALL,
        .max_sectors    = 0xFFFF,
        .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
+       .eh_timed_out   = iscsi_eh_cmd_timed_out,
        .eh_abort_handler = iscsi_eh_abort,
        .eh_device_reset_handler = iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
index 9a2fdc3..3fb3f57 100644 (file)
@@ -103,6 +103,7 @@ static struct scsi_host_template cxgb4i_host_template = {
        .sg_tablesize   = SG_ALL,
        .max_sectors    = 0xFFFF,
        .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
+       .eh_timed_out   = iscsi_eh_cmd_timed_out,
        .eh_abort_handler = iscsi_eh_abort,
        .eh_device_reset_handler = iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
index 0e9de5d..d11dcc5 100644 (file)
@@ -54,6 +54,9 @@ extern const struct file_operations cxlflash_cxl_fops;
 /* RRQ for master issued cmds */
 #define NUM_RRQ_ENTRY                   CXLFLASH_MAX_CMDS
 
+/* SQ for master issued cmds */
+#define NUM_SQ_ENTRY                   CXLFLASH_MAX_CMDS
+
 
 static inline void check_sizes(void)
 {
@@ -155,8 +158,8 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
 
 struct afu {
        /* Stuff requiring alignment go first. */
-
-       u64 rrq_entry[NUM_RRQ_ENTRY];   /* 2K RRQ */
+       struct sisl_ioarcb sq[NUM_SQ_ENTRY];            /* 16K SQ */
+       u64 rrq_entry[NUM_RRQ_ENTRY];                   /* 2K RRQ */
 
        /* Beware of alignment till here. Preferably introduce new
         * fields after this point
@@ -171,9 +174,13 @@ struct afu {
        struct sisl_host_map __iomem *host_map;         /* MC host map */
        struct sisl_ctrl_map __iomem *ctrl_map;         /* MC control map */
 
-       struct kref mapcount;
-
        ctx_hndl_t ctx_hndl;    /* master's context handle */
+
+       atomic_t hsq_credits;
+       spinlock_t hsq_slock;
+       struct sisl_ioarcb *hsq_start;
+       struct sisl_ioarcb *hsq_end;
+       struct sisl_ioarcb *hsq_curr;
        u64 *hrrq_start;
        u64 *hrrq_end;
        u64 *hrrq_curr;
@@ -191,6 +198,23 @@ struct afu {
 
 };
 
+static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
+{
+       u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
+
+       return afu_cap & cmd_mode;
+}
+
+static inline bool afu_is_sq_cmd_mode(struct afu *afu)
+{
+       return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
+}
+
+static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
+{
+       return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
+}
+
 static inline u64 lun_to_lunid(u64 lun)
 {
        __be64 lun_id;
index 6c318db..0efed17 100644 (file)
  */
 static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
 {
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = NULL;
 
        lli = kzalloc(sizeof(*lli), GFP_KERNEL);
        if (unlikely(!lli)) {
-               pr_err("%s: could not allocate lli\n", __func__);
+               dev_err(dev, "%s: could not allocate lli\n", __func__);
                goto out;
        }
 
@@ -58,11 +60,13 @@ out:
  */
 static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
 {
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct glun_info *gli = NULL;
 
        gli = kzalloc(sizeof(*gli), GFP_KERNEL);
        if (unlikely(!gli)) {
-               pr_err("%s: could not allocate gli\n", __func__);
+               dev_err(dev, "%s: could not allocate gli\n", __func__);
                goto out;
        }
 
@@ -129,10 +133,10 @@ static struct glun_info *lookup_global(u8 *wwid)
  */
 static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
 {
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = NULL;
        struct glun_info *gli = NULL;
-       struct Scsi_Host *shost = sdev->host;
-       struct cxlflash_cfg *cfg = shost_priv(shost);
 
        if (unlikely(!wwid))
                goto out;
@@ -165,7 +169,7 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
        list_add(&gli->list, &global.gluns);
 
 out:
-       pr_debug("%s: returning %p\n", __func__, lli);
+       dev_dbg(dev, "%s: returning lli=%p, gli=%p\n", __func__, lli, gli);
        return lli;
 }
 
@@ -225,17 +229,18 @@ void cxlflash_term_global_luns(void)
 int cxlflash_manage_lun(struct scsi_device *sdev,
                        struct dk_cxlflash_manage_lun *manage)
 {
-       int rc = 0;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = NULL;
+       int rc = 0;
        u64 flags = manage->hdr.flags;
        u32 chan = sdev->channel;
 
        mutex_lock(&global.mutex);
        lli = find_and_create_lun(sdev, manage->wwid);
-       pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
-                __func__, get_unaligned_be64(&manage->wwid[0]),
-                get_unaligned_be64(&manage->wwid[8]),
-                manage->hdr.flags, lli);
+       dev_dbg(dev, "%s: WWID=%016llx%016llx, flags=%016llx lli=%p\n",
+               __func__, get_unaligned_be64(&manage->wwid[0]),
+               get_unaligned_be64(&manage->wwid[8]), manage->hdr.flags, lli);
        if (unlikely(!lli)) {
                rc = -ENOMEM;
                goto out;
@@ -265,11 +270,11 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
                }
        }
 
-       pr_debug("%s: port_sel = %08X chan = %u lun_id = %016llX\n", __func__,
-                lli->port_sel, chan, lli->lun_id[chan]);
+       dev_dbg(dev, "%s: port_sel=%08x chan=%u lun_id=%016llx\n",
+               __func__, lli->port_sel, chan, lli->lun_id[chan]);
 
 out:
        mutex_unlock(&global.mutex);
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
index b17ebf6..7069639 100644 (file)
@@ -43,6 +43,9 @@ MODULE_LICENSE("GPL");
  */
 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 {
+       struct afu *afu = cmd->parent;
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
        struct sisl_ioarcb *ioarcb;
        struct sisl_ioasa *ioasa;
        u32 resid;
@@ -56,21 +59,20 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
        if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
                resid = ioasa->resid;
                scsi_set_resid(scp, resid);
-               pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
-                        __func__, cmd, scp, resid);
+               dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
+                       __func__, cmd, scp, resid);
        }
 
        if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
-               pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
-                        __func__, cmd, scp);
+               dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
+                       __func__, cmd, scp);
                scp->result = (DID_ERROR << 16);
        }
 
-       pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
-                "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
-                __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
-                ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
-                ioasa->fc_extra);
+       dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
+               "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
+               ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
+               ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
 
        if (ioasa->rc.scsi_rc) {
                /* We have a SCSI status */
@@ -159,6 +161,7 @@ static void cmd_complete(struct afu_cmd *cmd)
        ulong lock_flags;
        struct afu *afu = cmd->parent;
        struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
        bool cmd_is_tmf;
 
        if (cmd->scp) {
@@ -170,9 +173,8 @@ static void cmd_complete(struct afu_cmd *cmd)
 
                cmd_is_tmf = cmd->cmd_tmf;
 
-               pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
-                                    "ioasc=%d\n", __func__, scp, scp->result,
-                                    cmd->sa.ioasc);
+               dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
+                                   __func__, scp, scp->result, cmd->sa.ioasc);
 
                scsi_dma_unmap(scp);
                scp->scsi_done(scp);
@@ -188,10 +190,11 @@ static void cmd_complete(struct afu_cmd *cmd)
 }
 
 /**
- * context_reset_ioarrin() - reset command owner context via IOARRIN register
+ * context_reset() - reset command owner context via specified register
  * @cmd:       AFU command that timed out.
+ * @reset_reg: MMIO register to perform reset.
  */
-static void context_reset_ioarrin(struct afu_cmd *cmd)
+static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
 {
        int nretry = 0;
        u64 rrin = 0x1;
@@ -199,21 +202,43 @@ static void context_reset_ioarrin(struct afu_cmd *cmd)
        struct cxlflash_cfg *cfg = afu->parent;
        struct device *dev = &cfg->dev->dev;
 
-       pr_debug("%s: cmd=%p\n", __func__, cmd);
+       dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
 
-       writeq_be(rrin, &afu->host_map->ioarrin);
+       writeq_be(rrin, reset_reg);
        do {
-               rrin = readq_be(&afu->host_map->ioarrin);
+               rrin = readq_be(reset_reg);
                if (rrin != 0x1)
                        break;
                /* Double delay each time */
                udelay(1 << nretry);
        } while (nretry++ < MC_ROOM_RETRY_CNT);
 
-       dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
+       dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
                __func__, rrin, nretry);
 }
 
+/**
+ * context_reset_ioarrin() - reset command owner context via IOARRIN register
+ * @cmd:       AFU command that timed out.
+ */
+static void context_reset_ioarrin(struct afu_cmd *cmd)
+{
+       struct afu *afu = cmd->parent;
+
+       context_reset(cmd, &afu->host_map->ioarrin);
+}
+
+/**
+ * context_reset_sq() - reset command owner context w/ SQ Context Reset register
+ * @cmd:       AFU command that timed out.
+ */
+static void context_reset_sq(struct afu_cmd *cmd)
+{
+       struct afu *afu = cmd->parent;
+
+       context_reset(cmd, &afu->host_map->sq_ctx_reset);
+}
+
 /**
  * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
  * @afu:       AFU associated with the host.
@@ -251,8 +276,51 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
        writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
 out:
        spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
-       pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
-                cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+       dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
+               cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
+       return rc;
+}
+
+/**
+ * send_cmd_sq() - sends an AFU command via SQ ring
+ * @afu:       AFU associated with the host.
+ * @cmd:       AFU command to send.
+ *
+ * Return:
+ *     0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
+ */
+static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
+{
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
+       int rc = 0;
+       int newval;
+       ulong lock_flags;
+
+       newval = atomic_dec_if_positive(&afu->hsq_credits);
+       if (newval <= 0) {
+               rc = SCSI_MLQUEUE_HOST_BUSY;
+               goto out;
+       }
+
+       cmd->rcb.ioasa = &cmd->sa;
+
+       spin_lock_irqsave(&afu->hsq_slock, lock_flags);
+
+       *afu->hsq_curr = cmd->rcb;
+       if (afu->hsq_curr < afu->hsq_end)
+               afu->hsq_curr++;
+       else
+               afu->hsq_curr = afu->hsq_start;
+       writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
+
+       spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
+out:
+       dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
+              "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
+              cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
+              readq_be(&afu->host_map->sq_head),
+              readq_be(&afu->host_map->sq_tail));
        return rc;
 }
 
@@ -266,6 +334,8 @@ out:
  */
 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 {
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
        ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
 
@@ -276,10 +346,8 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
        }
 
        if (unlikely(cmd->sa.ioasc != 0)) {
-               pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
-                      "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
-                      cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
-                      cmd->sa.rc.fc_rc);
+               dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
+                       __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
                rc = -1;
        }
 
@@ -298,8 +366,7 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 {
        u32 port_sel = scp->device->channel + 1;
-       struct Scsi_Host *host = scp->device->host;
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
        struct afu_cmd *cmd = sc_to_afucz(scp);
        struct device *dev = &cfg->dev->dev;
        ulong lock_flags;
@@ -344,7 +411,7 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
                                                       to);
        if (!to) {
                cfg->tmf_active = false;
-               dev_err(dev, "%s: TMF timed out!\n", __func__);
+               dev_err(dev, "%s: TMF timed out\n", __func__);
                rc = -1;
        }
        spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
@@ -352,16 +419,6 @@ out:
        return rc;
 }
 
-static void afu_unmap(struct kref *ref)
-{
-       struct afu *afu = container_of(ref, struct afu, mapcount);
-
-       if (likely(afu->afu_map)) {
-               cxl_psa_unmap((void __iomem *)afu->afu_map);
-               afu->afu_map = NULL;
-       }
-}
-
 /**
  * cxlflash_driver_info() - information handler for this host driver
  * @host:      SCSI host associated with device.
@@ -382,7 +439,7 @@ static const char *cxlflash_driver_info(struct Scsi_Host *host)
  */
 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(host);
        struct afu *afu = cfg->afu;
        struct device *dev = &cfg->dev->dev;
        struct afu_cmd *cmd = sc_to_afucz(scp);
@@ -392,10 +449,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
        ulong lock_flags;
        int nseg = 0;
        int rc = 0;
-       int kref_got = 0;
 
        dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
-                           "cdb=(%08X-%08X-%08X-%08X)\n",
+                           "cdb=(%08x-%08x-%08x-%08x)\n",
                            __func__, scp, host->host_no, scp->device->channel,
                            scp->device->id, scp->device->lun,
                            get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
@@ -417,11 +473,11 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 
        switch (cfg->state) {
        case STATE_RESET:
-               dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
+               dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
                rc = SCSI_MLQUEUE_HOST_BUSY;
                goto out;
        case STATE_FAILTERM:
-               dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
+               dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
                scp->result = (DID_NO_CONNECT << 16);
                scp->scsi_done(scp);
                rc = 0;
@@ -430,13 +486,10 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
                break;
        }
 
-       kref_get(&cfg->afu->mapcount);
-       kref_got = 1;
-
        if (likely(sg)) {
                nseg = scsi_dma_map(scp);
                if (unlikely(nseg < 0)) {
-                       dev_err(dev, "%s: Fail DMA map!\n", __func__);
+                       dev_err(dev, "%s: Fail DMA map\n", __func__);
                        rc = SCSI_MLQUEUE_HOST_BUSY;
                        goto out;
                }
@@ -463,9 +516,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
        if (unlikely(rc))
                scsi_dma_unmap(scp);
 out:
-       if (kref_got)
-               kref_put(&afu->mapcount, afu_unmap);
-       pr_devel("%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -503,13 +553,15 @@ static void free_mem(struct cxlflash_cfg *cfg)
  *
  * Safe to call with AFU in a partially allocated/initialized state.
  *
- * Waits for any active internal AFU commands to timeout and then unmaps
- * the MMIO space.
+ * Cancels scheduled worker threads, waits for any active internal AFU
+ * commands to timeout and then unmaps the MMIO space.
  */
 static void stop_afu(struct cxlflash_cfg *cfg)
 {
        struct afu *afu = cfg->afu;
 
+       cancel_work_sync(&cfg->work_q);
+
        if (likely(afu)) {
                while (atomic_read(&afu->cmds_active))
                        ssleep(1);
@@ -517,7 +569,6 @@ static void stop_afu(struct cxlflash_cfg *cfg)
                        cxl_psa_unmap((void __iomem *)afu->afu_map);
                        afu->afu_map = NULL;
                }
-               kref_put(&afu->mapcount, afu_unmap);
        }
 }
 
@@ -585,6 +636,8 @@ static void term_mc(struct cxlflash_cfg *cfg)
  */
 static void term_afu(struct cxlflash_cfg *cfg)
 {
+       struct device *dev = &cfg->dev->dev;
+
        /*
         * Tear down is carefully orchestrated to ensure
         * no interrupts can come in when the problem state
@@ -600,7 +653,7 @@ static void term_afu(struct cxlflash_cfg *cfg)
 
        term_mc(cfg);
 
-       pr_debug("%s: returning\n", __func__);
+       dev_dbg(dev, "%s: returning\n", __func__);
 }
 
 /**
@@ -627,8 +680,7 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
                return;
 
        if (!afu || !afu->afu_map) {
-               dev_dbg(dev, "%s: The problem state area is not mapped\n",
-                       __func__);
+               dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
                return;
        }
 
@@ -670,10 +722,11 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 static void cxlflash_remove(struct pci_dev *pdev)
 {
        struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+       struct device *dev = &pdev->dev;
        ulong lock_flags;
 
        if (!pci_is_enabled(pdev)) {
-               pr_debug("%s: Device is disabled\n", __func__);
+               dev_dbg(dev, "%s: Device is disabled\n", __func__);
                return;
        }
 
@@ -699,7 +752,6 @@ static void cxlflash_remove(struct pci_dev *pdev)
                scsi_remove_host(cfg->host);
                /* fall through */
        case INIT_STATE_AFU:
-               cancel_work_sync(&cfg->work_q);
                term_afu(cfg);
        case INIT_STATE_PCI:
                pci_disable_device(pdev);
@@ -709,7 +761,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
                break;
        }
 
-       pr_debug("%s: returning\n", __func__);
+       dev_dbg(dev, "%s: returning\n", __func__);
 }
 
 /**
@@ -727,7 +779,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
        int rc = 0;
        struct device *dev = &cfg->dev->dev;
 
-       /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
+       /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
        cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                            get_order(sizeof(struct afu)));
        if (unlikely(!cfg->afu)) {
@@ -751,6 +803,7 @@ out:
 static int init_pci(struct cxlflash_cfg *cfg)
 {
        struct pci_dev *pdev = cfg->dev;
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
 
        rc = pci_enable_device(pdev);
@@ -761,15 +814,14 @@ static int init_pci(struct cxlflash_cfg *cfg)
                }
 
                if (rc) {
-                       dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
-                               __func__);
+                       dev_err(dev, "%s: Cannot enable adapter\n", __func__);
                        cxlflash_wait_for_pci_err_recovery(cfg);
                        goto out;
                }
        }
 
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -782,19 +834,19 @@ out:
 static int init_scsi(struct cxlflash_cfg *cfg)
 {
        struct pci_dev *pdev = cfg->dev;
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
 
        rc = scsi_add_host(cfg->host, &pdev->dev);
        if (rc) {
-               dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
-                       __func__, rc);
+               dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
                goto out;
        }
 
        scsi_scan_host(cfg->host);
 
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -844,16 +896,12 @@ static void set_port_offline(__be64 __iomem *fc_regs)
  * Return:
  *     TRUE (1) when the specified port is online
  *     FALSE (0) when the specified port fails to come online after timeout
- *     -EINVAL when @delay_us is less than 1000
  */
-static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
+static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 {
        u64 status;
 
-       if (delay_us < 1000) {
-               pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
-               return -EINVAL;
-       }
+       WARN_ON(delay_us < 1000);
 
        do {
                msleep(delay_us / 1000);
@@ -877,16 +925,12 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
  * Return:
  *     TRUE (1) when the specified port is offline
  *     FALSE (0) when the specified port fails to go offline after timeout
- *     -EINVAL when @delay_us is less than 1000
  */
-static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
+static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 {
        u64 status;
 
-       if (delay_us < 1000) {
-               pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
-               return -EINVAL;
-       }
+       WARN_ON(delay_us < 1000);
 
        do {
                msleep(delay_us / 1000);
@@ -915,11 +959,14 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
                         u64 wwpn)
 {
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
+
        set_port_offline(fc_regs);
        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
                               FC_PORT_STATUS_RETRY_CNT)) {
-               pr_debug("%s: wait on port %d to go offline timed out\n",
-                        __func__, port);
+               dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
+                       __func__, port);
        }
 
        writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
@@ -927,8 +974,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
        set_port_online(fc_regs);
        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
                              FC_PORT_STATUS_RETRY_CNT)) {
-               pr_debug("%s: wait on port %d to go online timed out\n",
-                        __func__, port);
+               dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
+                       __func__, port);
        }
 }
 
@@ -947,6 +994,8 @@ static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
  */
 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
 {
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
        u64 port_sel;
 
        /* first switch the AFU to the other links, if any */
@@ -958,21 +1007,21 @@ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
        set_port_offline(fc_regs);
        if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
                               FC_PORT_STATUS_RETRY_CNT))
-               pr_err("%s: wait on port %d to go offline timed out\n",
-                      __func__, port);
+               dev_err(dev, "%s: wait on port %d to go offline timed out\n",
+                       __func__, port);
 
        set_port_online(fc_regs);
        if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
                              FC_PORT_STATUS_RETRY_CNT))
-               pr_err("%s: wait on port %d to go online timed out\n",
-                      __func__, port);
+               dev_err(dev, "%s: wait on port %d to go online timed out\n",
+                       __func__, port);
 
        /* switch back to include this port */
        port_sel |= (1ULL << port);
        writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
        cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
 
-       pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
+       dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
 }
 
 /*
@@ -1082,6 +1131,8 @@ static void afu_err_intr_init(struct afu *afu)
 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
 {
        struct afu *afu = (struct afu *)data;
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
        u64 reg;
        u64 reg_unmasked;
 
@@ -1089,18 +1140,17 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
        reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
 
        if (reg_unmasked == 0UL) {
-               pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
-                      __func__, (u64)afu, reg);
+               dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
+                       __func__, reg);
                goto cxlflash_sync_err_irq_exit;
        }
 
-       pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
-              __func__, (u64)afu, reg);
+       dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
+               __func__, reg);
 
        writeq_be(reg_unmasked, &afu->host_map->intr_clear);
 
 cxlflash_sync_err_irq_exit:
-       pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
        return IRQ_HANDLED;
 }
 
@@ -1115,6 +1165,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
 {
        struct afu *afu = (struct afu *)data;
        struct afu_cmd *cmd;
+       struct sisl_ioasa *ioasa;
+       struct sisl_ioarcb *ioarcb;
        bool toggle = afu->toggle;
        u64 entry,
            *hrrq_start = afu->hrrq_start,
@@ -1128,7 +1180,16 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
                if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
                        break;
 
-               cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
+               entry &= ~SISL_RESP_HANDLE_T_BIT;
+
+               if (afu_is_sq_cmd_mode(afu)) {
+                       ioasa = (struct sisl_ioasa *)entry;
+                       cmd = container_of(ioasa, struct afu_cmd, sa);
+               } else {
+                       ioarcb = (struct sisl_ioarcb *)entry;
+                       cmd = container_of(ioarcb, struct afu_cmd, rcb);
+               }
+
                cmd_complete(cmd);
 
                /* Advance to next entry or wrap and flip the toggle bit */
@@ -1138,6 +1199,8 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
                        hrrq_curr = hrrq_start;
                        toggle ^= SISL_RESP_HANDLE_T_BIT;
                }
+
+               atomic_inc(&afu->hsq_credits);
        }
 
        afu->hrrq_curr = hrrq_curr;
@@ -1169,7 +1232,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
        reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
 
        if (reg_unmasked == 0) {
-               dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
+               dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
                        __func__, reg);
                goto out;
        }
@@ -1185,7 +1248,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
 
                port = info->port;
 
-               dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
+               dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
                        __func__, port, info->desc,
                       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
 
@@ -1198,7 +1261,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
                                __func__, port);
                        cfg->lr_state = LINK_RESET_REQUIRED;
                        cfg->lr_port = port;
-                       kref_get(&cfg->afu->mapcount);
                        schedule_work(&cfg->work_q);
                }
 
@@ -1210,7 +1272,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
                         * should be the same and tracing one is sufficient.
                         */
 
-                       dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
+                       dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
                                __func__, port, reg);
 
                        writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
@@ -1219,13 +1281,11 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
 
                if (info->action & SCAN_HOST) {
                        atomic_inc(&cfg->scan_host_needed);
-                       kref_get(&cfg->afu->mapcount);
                        schedule_work(&cfg->work_q);
                }
        }
 
 out:
-       dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
        return IRQ_HANDLED;
 }
 
@@ -1237,13 +1297,14 @@ out:
  */
 static int start_context(struct cxlflash_cfg *cfg)
 {
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
 
        rc = cxl_start_context(cfg->mcctx,
                               cfg->afu->work.work_element_descriptor,
                               NULL);
 
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1256,7 +1317,8 @@ static int start_context(struct cxlflash_cfg *cfg)
  */
 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 {
-       struct pci_dev *dev = cfg->dev;
+       struct device *dev = &cfg->dev->dev;
+       struct pci_dev *pdev = cfg->dev;
        int rc = 0;
        int ro_start, ro_size, i, j, k;
        ssize_t vpd_size;
@@ -1265,10 +1327,10 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
        char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
 
        /* Get the VPD data from the device */
-       vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
+       vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
        if (unlikely(vpd_size <= 0)) {
-               dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
-                      __func__, vpd_size);
+               dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
+                       __func__, vpd_size);
                rc = -ENODEV;
                goto out;
        }
@@ -1277,8 +1339,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
        ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
                                    PCI_VPD_LRDT_RO_DATA);
        if (unlikely(ro_start < 0)) {
-               dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
-                       __func__);
+               dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
                rc = -ENODEV;
                goto out;
        }
@@ -1288,8 +1349,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
        j = ro_size;
        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
        if (unlikely((i + j) > vpd_size)) {
-               pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
-                        __func__, (i + j), vpd_size);
+               dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
+                       __func__, (i + j), vpd_size);
                ro_size = vpd_size - i;
        }
 
@@ -1307,8 +1368,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 
                i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
                if (unlikely(i < 0)) {
-                       dev_err(&dev->dev, "%s: Port %d WWPN not found "
-                               "in VPD\n", __func__, k);
+                       dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
+                               __func__, k);
                        rc = -ENODEV;
                        goto out;
                }
@@ -1316,9 +1377,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
                j = pci_vpd_info_field_size(&vpd_data[i]);
                i += PCI_VPD_INFO_FLD_HDR_SIZE;
                if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
-                       dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
-                               "VPD corrupt\n",
-                              __func__, k);
+                       dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
+                               __func__, k);
                        rc = -ENODEV;
                        goto out;
                }
@@ -1326,15 +1386,15 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
                memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
                rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
                if (unlikely(rc)) {
-                       dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
-                               "to integer\n", __func__, k);
+                       dev_err(dev, "%s: WWPN conversion failed for port %d\n",
+                               __func__, k);
                        rc = -ENODEV;
                        goto out;
                }
        }
 
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1388,12 +1448,18 @@ static int init_global(struct cxlflash_cfg *cfg)
                goto out;
        }
 
-       pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
+       dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
+               __func__, wwpn[0], wwpn[1]);
 
-       /* Set up RRQ in AFU for master issued cmds */
+       /* Set up RRQ and SQ in AFU for master issued cmds */
        writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
        writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
 
+       if (afu_is_sq_cmd_mode(afu)) {
+               writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
+               writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
+       }
+
        /* AFU configuration */
        reg = readq_be(&afu->afu_map->global.regs.afu_config);
        reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
@@ -1443,7 +1509,6 @@ static int init_global(struct cxlflash_cfg *cfg)
                  &afu->ctrl_map->ctx_cap);
        /* Initialize heartbeat */
        afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
-
 out:
        return rc;
 }
@@ -1455,6 +1520,7 @@ out:
 static int start_afu(struct cxlflash_cfg *cfg)
 {
        struct afu *afu = cfg->afu;
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
 
        init_pcr(cfg);
@@ -1468,9 +1534,20 @@ static int start_afu(struct cxlflash_cfg *cfg)
        afu->hrrq_curr = afu->hrrq_start;
        afu->toggle = 1;
 
+       /* Initialize SQ */
+       if (afu_is_sq_cmd_mode(afu)) {
+               memset(&afu->sq, 0, sizeof(afu->sq));
+               afu->hsq_start = &afu->sq[0];
+               afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
+               afu->hsq_curr = afu->hsq_start;
+
+               spin_lock_init(&afu->hsq_slock);
+               atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
+       }
+
        rc = init_global(cfg);
 
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1490,7 +1567,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
 
        rc = cxl_allocate_afu_irqs(ctx, 3);
        if (unlikely(rc)) {
-               dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
+               dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
                        __func__, rc);
                level = UNDO_NOOP;
                goto out;
@@ -1499,8 +1576,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
        rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
                             "SISL_MSI_SYNC_ERROR");
        if (unlikely(rc <= 0)) {
-               dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
-                       __func__);
+               dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
                level = FREE_IRQ;
                goto out;
        }
@@ -1508,8 +1584,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
        rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
                             "SISL_MSI_RRQ_UPDATED");
        if (unlikely(rc <= 0)) {
-               dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
-                       __func__);
+               dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
                level = UNMAP_ONE;
                goto out;
        }
@@ -1517,8 +1592,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
        rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
                             "SISL_MSI_ASYNC_ERROR");
        if (unlikely(rc <= 0)) {
-               dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
-                       __func__);
+               dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
                level = UNMAP_TWO;
                goto out;
        }
@@ -1552,15 +1626,13 @@ static int init_mc(struct cxlflash_cfg *cfg)
        /* During initialization reset the AFU to start from a clean slate */
        rc = cxl_afu_reset(cfg->mcctx);
        if (unlikely(rc)) {
-               dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
-                       __func__, rc);
+               dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
                goto ret;
        }
 
        level = init_intr(cfg, ctx);
        if (unlikely(level)) {
-               dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
-                       __func__, rc);
+               dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
                goto out;
        }
 
@@ -1575,7 +1647,7 @@ static int init_mc(struct cxlflash_cfg *cfg)
                goto out;
        }
 ret:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 out:
        term_intr(cfg, level);
@@ -1602,7 +1674,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
 
        rc = init_mc(cfg);
        if (rc) {
-               dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
+               dev_err(dev, "%s: init_mc failed rc=%d\n",
                        __func__, rc);
                goto out;
        }
@@ -1610,11 +1682,10 @@ static int init_afu(struct cxlflash_cfg *cfg)
        /* Map the entire MMIO space of the AFU */
        afu->afu_map = cxl_psa_map(cfg->mcctx);
        if (!afu->afu_map) {
-               dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
+               dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
                rc = -ENOMEM;
                goto err1;
        }
-       kref_init(&afu->mapcount);
 
        /* No byte reverse on reading afu_version or string will be backwards */
        reg = readq(&afu->afu_map->global.regs.afu_version);
@@ -1622,24 +1693,28 @@ static int init_afu(struct cxlflash_cfg *cfg)
        afu->interface_version =
            readq_be(&afu->afu_map->global.regs.interface_version);
        if ((afu->interface_version + 1) == 0) {
-               pr_err("Back level AFU, please upgrade. AFU version %s "
-                      "interface version 0x%llx\n", afu->version,
+               dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
+                       "interface version %016llx\n", afu->version,
                       afu->interface_version);
                rc = -EINVAL;
-               goto err2;
+               goto err1;
        }
 
-       afu->send_cmd = send_cmd_ioarrin;
-       afu->context_reset = context_reset_ioarrin;
+       if (afu_is_sq_cmd_mode(afu)) {
+               afu->send_cmd = send_cmd_sq;
+               afu->context_reset = context_reset_sq;
+       } else {
+               afu->send_cmd = send_cmd_ioarrin;
+               afu->context_reset = context_reset_ioarrin;
+       }
 
-       pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
-                afu->version, afu->interface_version);
+       dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
+               afu->version, afu->interface_version);
 
        rc = start_afu(cfg);
        if (rc) {
-               dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
-                       __func__, rc);
-               goto err2;
+               dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
+               goto err1;
        }
 
        afu_err_intr_init(cfg->afu);
@@ -1649,11 +1724,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
        /* Restore the LUN mappings */
        cxlflash_restore_luntable(cfg);
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 
-err2:
-       kref_put(&afu->mapcount, afu_unmap);
 err1:
        term_intr(cfg, UNMAP_THREE);
        term_mc(cfg);
@@ -1693,7 +1766,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
        static DEFINE_MUTEX(sync_active);
 
        if (cfg->state != STATE_NORMAL) {
-               pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
+               dev_dbg(dev, "%s: Sync not required state=%u\n",
+                       __func__, cfg->state);
                return 0;
        }
 
@@ -1710,7 +1784,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
        init_completion(&cmd->cevent);
        cmd->parent = afu;
 
-       pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+       dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
 
        cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
        cmd->rcb.ctx_id = afu->ctx_hndl;
@@ -1735,7 +1809,7 @@ out:
        atomic_dec(&afu->cmds_active);
        mutex_unlock(&sync_active);
        kfree(buf);
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1747,16 +1821,17 @@ out:
  */
 static int afu_reset(struct cxlflash_cfg *cfg)
 {
+       struct device *dev = &cfg->dev->dev;
        int rc = 0;
+
        /* Stop the context before the reset. Since the context is
         * no longer available restart it after the reset is complete
         */
-
        term_afu(cfg);
 
        rc = init_afu(cfg);
 
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1785,18 +1860,18 @@ static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
 {
        int rc = SUCCESS;
        struct Scsi_Host *host = scp->device->host;
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(host);
+       struct device *dev = &cfg->dev->dev;
        struct afu *afu = cfg->afu;
        int rcr = 0;
 
-       pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
-                "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
-                host->host_no, scp->device->channel,
-                scp->device->id, scp->device->lun,
-                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
-                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
-                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
-                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+       dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+               "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
+               scp->device->channel, scp->device->id, scp->device->lun,
+               get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+               get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+               get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+               get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 
 retry:
        switch (cfg->state) {
@@ -1813,7 +1888,7 @@ retry:
                break;
        }
 
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1835,16 +1910,16 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
        int rc = SUCCESS;
        int rcr = 0;
        struct Scsi_Host *host = scp->device->host;
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(host);
+       struct device *dev = &cfg->dev->dev;
 
-       pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
-                "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
-                host->host_no, scp->device->channel,
-                scp->device->id, scp->device->lun,
-                get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
-                get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
-                get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
-                get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+       dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+               "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
+               scp->device->channel, scp->device->id, scp->device->lun,
+               get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+               get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+               get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+               get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 
        switch (cfg->state) {
        case STATE_NORMAL:
@@ -1870,7 +1945,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
                break;
        }
 
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -1936,8 +2011,7 @@ static ssize_t port0_show(struct device *dev,
                          struct device_attribute *attr,
                          char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
        struct afu *afu = cfg->afu;
 
        return cxlflash_show_port_status(0, afu, buf);
@@ -1955,8 +2029,7 @@ static ssize_t port1_show(struct device *dev,
                          struct device_attribute *attr,
                          char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
        struct afu *afu = cfg->afu;
 
        return cxlflash_show_port_status(1, afu, buf);
@@ -1973,8 +2046,7 @@ static ssize_t port1_show(struct device *dev,
 static ssize_t lun_mode_show(struct device *dev,
                             struct device_attribute *attr, char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
        struct afu *afu = cfg->afu;
 
        return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
@@ -2007,7 +2079,7 @@ static ssize_t lun_mode_store(struct device *dev,
                              const char *buf, size_t count)
 {
        struct Scsi_Host *shost = class_to_shost(dev);
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(shost);
        struct afu *afu = cfg->afu;
        int rc;
        u32 lun_mode;
@@ -2069,7 +2141,7 @@ static ssize_t cxlflash_show_port_lun_table(u32 port,
 
        for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
                bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
-                                  "%03d: %016llX\n", i, readq_be(&fc_port[i]));
+                                  "%03d: %016llx\n", i, readq_be(&fc_port[i]));
        return bytes;
 }
 
@@ -2085,8 +2157,7 @@ static ssize_t port0_lun_table_show(struct device *dev,
                                    struct device_attribute *attr,
                                    char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
        struct afu *afu = cfg->afu;
 
        return cxlflash_show_port_lun_table(0, afu, buf);
@@ -2104,8 +2175,7 @@ static ssize_t port1_lun_table_show(struct device *dev,
                                    struct device_attribute *attr,
                                    char *buf)
 {
-       struct Scsi_Host *shost = class_to_shost(dev);
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
        struct afu *afu = cfg->afu;
 
        return cxlflash_show_port_lun_table(1, afu, buf);
@@ -2250,7 +2320,6 @@ static void cxlflash_worker_thread(struct work_struct *work)
 
        if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
                scsi_scan_host(cfg->host);
-       kref_put(&afu->mapcount, afu_unmap);
 }
 
 /**
@@ -2265,6 +2334,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
 {
        struct Scsi_Host *host;
        struct cxlflash_cfg *cfg = NULL;
+       struct device *dev = &pdev->dev;
        struct dev_dependent_vals *ddv;
        int rc = 0;
 
@@ -2276,8 +2346,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
 
        host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
        if (!host) {
-               dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
-                       __func__);
+               dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
                rc = -ENOMEM;
                goto out;
        }
@@ -2288,12 +2357,11 @@ static int cxlflash_probe(struct pci_dev *pdev,
        host->unique_id = host->host_no;
        host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
 
-       cfg = (struct cxlflash_cfg *)host->hostdata;
+       cfg = shost_priv(host);
        cfg->host = host;
        rc = alloc_mem(cfg);
        if (rc) {
-               dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
-                       __func__);
+               dev_err(dev, "%s: alloc_mem failed\n", __func__);
                rc = -ENOMEM;
                scsi_host_put(cfg->host);
                goto out;
@@ -2334,30 +2402,27 @@ static int cxlflash_probe(struct pci_dev *pdev,
 
        rc = init_pci(cfg);
        if (rc) {
-               dev_err(&pdev->dev, "%s: call to init_pci "
-                       "failed rc=%d!\n", __func__, rc);
+               dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
                goto out_remove;
        }
        cfg->init_state = INIT_STATE_PCI;
 
        rc = init_afu(cfg);
        if (rc) {
-               dev_err(&pdev->dev, "%s: call to init_afu "
-                       "failed rc=%d!\n", __func__, rc);
+               dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
                goto out_remove;
        }
        cfg->init_state = INIT_STATE_AFU;
 
        rc = init_scsi(cfg);
        if (rc) {
-               dev_err(&pdev->dev, "%s: call to init_scsi "
-                       "failed rc=%d!\n", __func__, rc);
+               dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
                goto out_remove;
        }
        cfg->init_state = INIT_STATE_SCSI;
 
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 
 out_remove:
@@ -2395,7 +2460,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
                drain_ioctls(cfg);
                rc = cxlflash_mark_contexts_error(cfg);
                if (unlikely(rc))
-                       dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
+                       dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
                                __func__, rc);
                term_afu(cfg);
                return PCI_ERS_RESULT_NEED_RESET;
@@ -2429,7 +2494,7 @@ static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
 
        rc = init_afu(cfg);
        if (unlikely(rc)) {
-               dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
+               dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
@@ -2477,8 +2542,6 @@ static struct pci_driver cxlflash_driver = {
  */
 static int __init init_cxlflash(void)
 {
-       pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
-
        cxlflash_list_init();
 
        return pci_register_driver(&cxlflash_driver);
index 1a2d09c..a6e48a8 100644 (file)
@@ -72,7 +72,10 @@ struct sisl_ioarcb {
        u16 timeout;            /* in units specified by req_flags */
        u32 rsvd1;
        u8 cdb[16];             /* must be in big endian */
-       u64 reserved;           /* Reserved area */
+       union {
+               u64 reserved;                   /* Reserved for IOARRIN mode */
+               struct sisl_ioasa *ioasa;       /* IOASA EA for SQ Mode */
+       };
 } __packed;
 
 struct sisl_rc {
@@ -260,6 +263,11 @@ struct sisl_host_map {
        __be64 cmd_room;
        __be64 ctx_ctrl;        /* least significant byte or b56:63 is LISN# */
        __be64 mbox_w;          /* restricted use */
+       __be64 sq_start;        /* Submission Queue (R/W): write sequence and */
+       __be64 sq_end;          /* inclusion semantics are the same as RRQ    */
+       __be64 sq_head;         /* Submission Queue Head (R): for debugging   */
+       __be64 sq_tail;         /* Submission Queue TAIL (R/W): next IOARCB   */
+       __be64 sq_ctx_reset;    /* Submission Queue Context Reset (R/W)       */
 };
 
 /* per context provisioning & control MMIO */
@@ -348,6 +356,15 @@ struct sisl_global_regs {
        __be64 rsvd[0xf8];
        __le64 afu_version;
        __be64 interface_version;
+#define SISL_INTVER_CAP_SHIFT                  16
+#define SISL_INTVER_MAJ_SHIFT                  8
+#define SISL_INTVER_CAP_MASK                   0xFFFFFFFF00000000ULL
+#define SISL_INTVER_MAJ_MASK                   0x00000000FFFF0000ULL
+#define SISL_INTVER_MIN_MASK                   0x000000000000FFFFULL
+#define SISL_INTVER_CAP_IOARRIN_CMD_MODE       0x800000000000ULL
+#define SISL_INTVER_CAP_SQ_CMD_MODE            0x400000000000ULL
+#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A    0x200000000000ULL
+#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B    0x100000000000ULL
 };
 
 #define CXLFLASH_NUM_FC_PORTS   2
index 9636970..90869ce 100644 (file)
@@ -212,7 +212,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
        }
 
 out:
-       dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
+       dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
                "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
                ctx_ctrl);
 
@@ -260,7 +260,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
        writeq_be(val, &ctrl_map->ctx_cap);
        val = readq_be(&ctrl_map->ctx_cap);
        if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
-               dev_err(dev, "%s: ctx may be closed val=%016llX\n",
+               dev_err(dev, "%s: ctx may be closed val=%016llx\n",
                        __func__, val);
                rc = -EAGAIN;
                goto out;
@@ -302,7 +302,7 @@ out:
  */
 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct glun_info *gli = lli->parent;
        u8 *cmd_buf = NULL;
@@ -326,7 +326,7 @@ retry:
        scsi_cmd[1] = SAI_READ_CAPACITY_16;     /* service action */
        put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
 
-       dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
+       dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
                retry_cnt ? "re" : "", scsi_cmd[0]);
 
        /* Drop the ioctl read semahpore across lengthy call */
@@ -336,7 +336,7 @@ retry:
        down_read(&cfg->ioctl_rwsem);
        rc = check_state(cfg);
        if (rc) {
-               dev_err(dev, "%s: Failed state! result=0x08%X\n",
+               dev_err(dev, "%s: Failed state result=%08x\n",
                        __func__, result);
                rc = -ENODEV;
                goto out;
@@ -378,7 +378,7 @@ retry:
        }
 
        if (result) {
-               dev_err(dev, "%s: command failed, result=0x%x\n",
+               dev_err(dev, "%s: command failed, result=%08x\n",
                        __func__, result);
                rc = -EIO;
                goto out;
@@ -415,29 +415,32 @@ out:
 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
                                struct llun_info *lli)
 {
+       struct cxlflash_cfg *cfg = ctxi->cfg;
+       struct device *dev = &cfg->dev->dev;
        struct sisl_rht_entry *rhte = NULL;
 
        if (unlikely(!ctxi->rht_start)) {
-               pr_debug("%s: Context does not have allocated RHT!\n",
+               dev_dbg(dev, "%s: Context does not have allocated RHT\n",
                         __func__);
                goto out;
        }
 
        if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
-               pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
+               dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
+                       __func__, rhndl);
                goto out;
        }
 
        if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
-               pr_debug("%s: Bad resource handle LUN! (%d)\n",
-                        __func__, rhndl);
+               dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
+                       __func__, rhndl);
                goto out;
        }
 
        rhte = &ctxi->rht_start[rhndl];
        if (unlikely(rhte->nmask == 0)) {
-               pr_debug("%s: Unopened resource handle! (%d)\n",
-                        __func__, rhndl);
+               dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
+                       __func__, rhndl);
                rhte = NULL;
                goto out;
        }
@@ -456,6 +459,8 @@ out:
 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
                                     struct llun_info *lli)
 {
+       struct cxlflash_cfg *cfg = ctxi->cfg;
+       struct device *dev = &cfg->dev->dev;
        struct sisl_rht_entry *rhte = NULL;
        int i;
 
@@ -470,7 +475,7 @@ struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
        if (likely(rhte))
                ctxi->rht_lun[i] = lli;
 
-       pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
+       dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
        return rhte;
 }
 
@@ -547,7 +552,7 @@ int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
        if (gli->mode == MODE_NONE)
                gli->mode = mode;
        else if (gli->mode != mode) {
-               pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
+               pr_debug("%s: gli_mode=%d requested_mode=%d\n",
                         __func__, gli->mode, mode);
                rc = -EINVAL;
                goto out;
@@ -605,7 +610,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
                           struct ctx_info *ctxi,
                           struct dk_cxlflash_release *release)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
@@ -622,13 +627,13 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
        struct sisl_rht_entry *rhte;
        struct sisl_rht_entry_f1 *rhte_f1;
 
-       dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
+       dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
                __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
 
        if (!ctxi) {
                ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
                if (unlikely(!ctxi)) {
-                       dev_dbg(dev, "%s: Bad context! (%llu)\n",
+                       dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
                                __func__, ctxid);
                        rc = -EINVAL;
                        goto out;
@@ -639,7 +644,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
 
        rhte = get_rhte(ctxi, rhndl, lli);
        if (unlikely(!rhte)) {
-               dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+               dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
                        __func__, rhndl);
                rc = -EINVAL;
                goto out;
@@ -758,13 +763,13 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
        lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
        ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
        if (unlikely(!ctxi || !lli || !ws)) {
-               dev_err(dev, "%s: Unable to allocate context!\n", __func__);
+               dev_err(dev, "%s: Unable to allocate context\n", __func__);
                goto err;
        }
 
        rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
        if (unlikely(!rhte)) {
-               dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
+               dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
                goto err;
        }
 
@@ -858,7 +863,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
                                 struct ctx_info *ctxi,
                                 struct dk_cxlflash_detach *detach)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct lun_access *lun_access, *t;
@@ -875,7 +880,7 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
        if (!ctxi) {
                ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
                if (unlikely(!ctxi)) {
-                       dev_dbg(dev, "%s: Bad context! (%llu)\n",
+                       dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
                                __func__, ctxid);
                        rc = -EINVAL;
                        goto out;
@@ -964,7 +969,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
 
        ctxid = cxl_process_element(ctx);
        if (unlikely(ctxid < 0)) {
-               dev_err(dev, "%s: Context %p was closed! (%d)\n",
+               dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
                        __func__, ctx, ctxid);
                goto out;
        }
@@ -973,18 +978,18 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
        if (unlikely(!ctxi)) {
                ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
                if (!ctxi) {
-                       dev_dbg(dev, "%s: Context %d already free!\n",
+                       dev_dbg(dev, "%s: ctxid=%d already free\n",
                                __func__, ctxid);
                        goto out_release;
                }
 
-               dev_dbg(dev, "%s: Another process owns context %d!\n",
+               dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
                        __func__, ctxid);
                put_context(ctxi);
                goto out;
        }
 
-       dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid);
+       dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
 
        detach.context_id = ctxi->ctxid;
        list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
@@ -1011,17 +1016,20 @@ static void unmap_context(struct ctx_info *ctxi)
 
 /**
  * get_err_page() - obtains and allocates the error notification page
+ * @cfg:       Internal structure associated with the host.
  *
  * Return: error notification page on success, NULL on failure
  */
-static struct page *get_err_page(void)
+static struct page *get_err_page(struct cxlflash_cfg *cfg)
 {
        struct page *err_page = global.err_page;
+       struct device *dev = &cfg->dev->dev;
 
        if (unlikely(!err_page)) {
                err_page = alloc_page(GFP_KERNEL);
                if (unlikely(!err_page)) {
-                       pr_err("%s: Unable to allocate err_page!\n", __func__);
+                       dev_err(dev, "%s: Unable to allocate err_page\n",
+                               __func__);
                        goto out;
                }
 
@@ -1039,7 +1047,7 @@ static struct page *get_err_page(void)
        }
 
 out:
-       pr_debug("%s: returning err_page=%p\n", __func__, err_page);
+       dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
        return err_page;
 }
 
@@ -1074,14 +1082,14 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        ctxid = cxl_process_element(ctx);
        if (unlikely(ctxid < 0)) {
-               dev_err(dev, "%s: Context %p was closed! (%d)\n",
+               dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
                        __func__, ctx, ctxid);
                goto err;
        }
 
        ctxi = get_context(cfg, ctxid, file, ctrl);
        if (unlikely(!ctxi)) {
-               dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+               dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
                goto err;
        }
 
@@ -1091,13 +1099,12 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
                rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
        } else {
-               dev_dbg(dev, "%s: err recovery active, use err_page!\n",
+               dev_dbg(dev, "%s: err recovery active, use err_page\n",
                        __func__);
 
-               err_page = get_err_page();
+               err_page = get_err_page(cfg);
                if (unlikely(!err_page)) {
-                       dev_err(dev, "%s: Could not obtain error page!\n",
-                               __func__);
+                       dev_err(dev, "%s: Could not get err_page\n", __func__);
                        rc = VM_FAULT_RETRY;
                        goto out;
                }
@@ -1147,7 +1154,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
 
        ctxid = cxl_process_element(ctx);
        if (unlikely(ctxid < 0)) {
-               dev_err(dev, "%s: Context %p was closed! (%d)\n",
+               dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
                        __func__, ctx, ctxid);
                rc = -EIO;
                goto out;
@@ -1155,7 +1162,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
 
        ctxi = get_context(cfg, ctxid, file, ctrl);
        if (unlikely(!ctxi)) {
-               dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+               dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
                rc = -EIO;
                goto out;
        }
@@ -1251,7 +1258,7 @@ retry:
                        break;
                goto retry;
        case STATE_FAILTERM:
-               dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
+               dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
                rc = -ENODEV;
                break;
        default:
@@ -1276,7 +1283,7 @@ retry:
 static int cxlflash_disk_attach(struct scsi_device *sdev,
                                struct dk_cxlflash_attach *attach)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct afu *afu = cfg->afu;
        struct llun_info *lli = sdev->hostdata;
@@ -1287,6 +1294,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
        int rc = 0;
        u32 perms;
        int ctxid = -1;
+       u64 flags = 0UL;
        u64 rctxid = 0UL;
        struct file *file = NULL;
 
@@ -1302,24 +1310,24 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
        }
 
        if (gli->max_lba == 0) {
-               dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
+               dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
                        __func__, lli->lun_id[sdev->channel]);
                rc = read_cap16(sdev, lli);
                if (rc) {
-                       dev_err(dev, "%s: Invalid device! (%d)\n",
+                       dev_err(dev, "%s: Invalid device rc=%d\n",
                                __func__, rc);
                        rc = -ENODEV;
                        goto out;
                }
-               dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
-               dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
+               dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
+               dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
        }
 
        if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
                rctxid = attach->context_id;
                ctxi = get_context(cfg, rctxid, NULL, 0);
                if (!ctxi) {
-                       dev_dbg(dev, "%s: Bad context! (%016llX)\n",
+                       dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
                                __func__, rctxid);
                        rc = -EINVAL;
                        goto out;
@@ -1327,7 +1335,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
                list_for_each_entry(lun_access, &ctxi->luns, list)
                        if (lun_access->lli == lli) {
-                               dev_dbg(dev, "%s: Already attached!\n",
+                               dev_dbg(dev, "%s: Already attached\n",
                                        __func__);
                                rc = -EINVAL;
                                goto out;
@@ -1336,13 +1344,13 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
        rc = scsi_device_get(sdev);
        if (unlikely(rc)) {
-               dev_err(dev, "%s: Unable to get sdev reference!\n", __func__);
+               dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
                goto out;
        }
 
        lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
        if (unlikely(!lun_access)) {
-               dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
+               dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
                rc = -ENOMEM;
                goto err;
        }
@@ -1352,7 +1360,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
        /* Non-NULL context indicates reuse (another context reference) */
        if (ctxi) {
-               dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
+               dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
                        __func__, rctxid);
                kref_get(&ctxi->kref);
                list_add(&lun_access->list, &ctxi->luns);
@@ -1361,7 +1369,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
        ctxi = create_context(cfg);
        if (unlikely(!ctxi)) {
-               dev_err(dev, "%s: Failed to create context! (%d)\n",
+               dev_err(dev, "%s: Failed to create context ctxid=%d\n",
                        __func__, ctxid);
                goto err;
        }
@@ -1387,7 +1395,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
        ctxid = cxl_process_element(ctx);
        if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
-               dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+               dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
                rc = -EPERM;
                goto err;
        }
@@ -1426,10 +1434,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
 out_attach:
        if (fd != -1)
-               attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD;
-       else
-               attach->hdr.return_flags = 0;
+               flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
+       if (afu_is_sq_cmd_mode(afu))
+               flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
 
+       attach->hdr.return_flags = flags;
        attach->context_id = ctxi->ctxid;
        attach->block_size = gli->blk_len;
        attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1520,7 +1529,7 @@ static int recover_context(struct cxlflash_cfg *cfg,
 
        ctxid = cxl_process_element(ctx);
        if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
-               dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+               dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
                rc = -EPERM;
                goto err2;
        }
@@ -1611,12 +1620,13 @@ err1:
 static int cxlflash_afu_recover(struct scsi_device *sdev,
                                struct dk_cxlflash_recover_afu *recover)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct afu *afu = cfg->afu;
        struct ctx_info *ctxi = NULL;
        struct mutex *mutex = &cfg->ctx_recovery_mutex;
+       u64 flags;
        u64 ctxid = DECODE_CTXID(recover->context_id),
            rctxid = recover->context_id;
        long reg;
@@ -1632,19 +1642,19 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
                goto out;
        rc = check_state(cfg);
        if (rc) {
-               dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
+               dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
                rc = -ENODEV;
                goto out;
        }
 
-       dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
+       dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
                __func__, recover->reason, rctxid);
 
 retry:
        /* Ensure that this process is attached to the context */
        ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
        if (unlikely(!ctxi)) {
-               dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+               dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
                rc = -EINVAL;
                goto out;
        }
@@ -1653,12 +1663,12 @@ retry:
 retry_recover:
                rc = recover_context(cfg, ctxi, &new_adap_fd);
                if (unlikely(rc)) {
-                       dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
+                       dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
                                __func__, ctxid, rc);
                        if ((rc == -ENODEV) &&
                            ((atomic_read(&cfg->recovery_threads) > 1) ||
                             (lretry--))) {
-                               dev_dbg(dev, "%s: Going to try again!\n",
+                               dev_dbg(dev, "%s: Going to try again\n",
                                        __func__);
                                mutex_unlock(mutex);
                                msleep(100);
@@ -1672,11 +1682,16 @@ retry_recover:
                }
 
                ctxi->err_recovery_active = false;
+
+               flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
+                       DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
+               if (afu_is_sq_cmd_mode(afu))
+                       flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
+
+               recover->hdr.return_flags = flags;
                recover->context_id = ctxi->ctxid;
                recover->adap_fd = new_adap_fd;
                recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
-               recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
-                       DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
                goto out;
        }
 
@@ -1699,7 +1714,7 @@ retry_recover:
                goto retry;
        }
 
-       dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
+       dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
 out:
        if (likely(ctxi))
                put_context(ctxi);
@@ -1718,7 +1733,7 @@ out:
 static int process_sense(struct scsi_device *sdev,
                         struct dk_cxlflash_verify *verify)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
@@ -1729,7 +1744,7 @@ static int process_sense(struct scsi_device *sdev,
        rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
                                  DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
        if (!rc) {
-               dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
+               dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
                rc = -EINVAL;
                goto out;
        }
@@ -1785,7 +1800,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
 {
        int rc = 0;
        struct ctx_info *ctxi = NULL;
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
@@ -1795,20 +1810,20 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
            rctxid = verify->context_id;
        u64 last_lba = 0;
 
-       dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
-               "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
+       dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
+               "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
                verify->hint, verify->hdr.flags);
 
        ctxi = get_context(cfg, rctxid, lli, 0);
        if (unlikely(!ctxi)) {
-               dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+               dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
                rc = -EINVAL;
                goto out;
        }
 
        rhte = get_rhte(ctxi, rhndl, lli);
        if (unlikely(!rhte)) {
-               dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+               dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
                        __func__, rhndl);
                rc = -EINVAL;
                goto out;
@@ -1855,7 +1870,7 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
 out:
        if (likely(ctxi))
                put_context(ctxi);
-       dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
+       dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
                __func__, rc, verify->last_lba);
        return rc;
 }
@@ -1907,7 +1922,7 @@ static char *decode_ioctl(int cmd)
  */
 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct afu *afu = cfg->afu;
        struct llun_info *lli = sdev->hostdata;
@@ -1927,25 +1942,25 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
        struct ctx_info *ctxi = NULL;
        struct sisl_rht_entry *rhte = NULL;
 
-       pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+       dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
 
        rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
        if (unlikely(rc)) {
-               dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
-                       __func__);
+               dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
                goto out;
        }
 
        ctxi = get_context(cfg, rctxid, lli, 0);
        if (unlikely(!ctxi)) {
-               dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+               dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
                rc = -EINVAL;
                goto err1;
        }
 
        rhte = rhte_checkout(ctxi, lli);
        if (unlikely(!rhte)) {
-               dev_dbg(dev, "%s: too many opens for this context\n", __func__);
+               dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
+                       __func__, ctxid);
                rc = -EMFILE;   /* too many opens  */
                goto err1;
        }
@@ -1963,7 +1978,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
 out:
        if (likely(ctxi))
                put_context(ctxi);
-       dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
+       dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
                __func__, rsrc_handle, rc, last_lba);
        return rc;
 
@@ -1985,7 +2000,7 @@ err1:
  */
 static int ioctl_common(struct scsi_device *sdev, int cmd)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        int rc = 0;
@@ -2002,7 +2017,7 @@ static int ioctl_common(struct scsi_device *sdev, int cmd)
                case DK_CXLFLASH_VLUN_RESIZE:
                case DK_CXLFLASH_RELEASE:
                case DK_CXLFLASH_DETACH:
-                       dev_dbg(dev, "%s: Command override! (%d)\n",
+                       dev_dbg(dev, "%s: Command override rc=%d\n",
                                __func__, rc);
                        rc = 0;
                        break;
@@ -2032,7 +2047,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
        typedef int (*sioctl) (struct scsi_device *, void *);
 
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct afu *afu = cfg->afu;
        struct dk_cxlflash_hdr *hdr;
@@ -2111,7 +2126,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
        }
 
        if (unlikely(copy_from_user(&buf, arg, size))) {
-               dev_err(dev, "%s: copy_from_user() fail! "
+               dev_err(dev, "%s: copy_from_user() fail "
                        "size=%lu cmd=%d (%s) arg=%p\n",
                        __func__, size, cmd, decode_ioctl(cmd), arg);
                rc = -EFAULT;
@@ -2127,7 +2142,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
        }
 
        if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
-               dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
+               dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
                rc = -EINVAL;
                goto cxlflash_ioctl_exit;
        }
@@ -2135,7 +2150,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
        rc = do_ioctl(sdev, (void *)&buf);
        if (likely(!rc))
                if (unlikely(copy_to_user(arg, &buf, size))) {
-                       dev_err(dev, "%s: copy_to_user() fail! "
+                       dev_err(dev, "%s: copy_to_user() fail "
                                "size=%lu cmd=%d (%s) arg=%p\n",
                                __func__, size, cmd, decode_ioctl(cmd), arg);
                        rc = -EFAULT;
index 90c5d7f..8fcc804 100644 (file)
@@ -66,8 +66,8 @@ static int ba_init(struct ba_lun *ba_lun)
        int last_word_underflow = 0;
        u64 *lam;
 
-       pr_debug("%s: Initializing LUN: lun_id = %llX, "
-                "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n",
+       pr_debug("%s: Initializing LUN: lun_id=%016llx "
+                "ba_lun->lsize=%lx ba_lun->au_size=%lX\n",
                __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
 
        /* Calculate bit map size */
@@ -80,7 +80,7 @@ static int ba_init(struct ba_lun *ba_lun)
        /* Allocate lun information container */
        bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
        if (unlikely(!bali)) {
-               pr_err("%s: Failed to allocate lun_info for lun_id %llX\n",
+               pr_err("%s: Failed to allocate lun_info lun_id=%016llx\n",
                       __func__, ba_lun->lun_id);
                return -ENOMEM;
        }
@@ -96,7 +96,7 @@ static int ba_init(struct ba_lun *ba_lun)
                                      GFP_KERNEL);
        if (unlikely(!bali->lun_alloc_map)) {
                pr_err("%s: Failed to allocate lun allocation map: "
-                      "lun_id = %llX\n", __func__, ba_lun->lun_id);
+                      "lun_id=%016llx\n", __func__, ba_lun->lun_id);
                kfree(bali);
                return -ENOMEM;
        }
@@ -125,7 +125,7 @@ static int ba_init(struct ba_lun *ba_lun)
        bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
                                      GFP_KERNEL);
        if (unlikely(!bali->aun_clone_map)) {
-               pr_err("%s: Failed to allocate clone map: lun_id = %llX\n",
+               pr_err("%s: Failed to allocate clone map: lun_id=%016llx\n",
                       __func__, ba_lun->lun_id);
                kfree(bali->lun_alloc_map);
                kfree(bali);
@@ -136,7 +136,7 @@ static int ba_init(struct ba_lun *ba_lun)
        ba_lun->ba_lun_handle = bali;
 
        pr_debug("%s: Successfully initialized the LUN: "
-                "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n",
+                "lun_id=%016llx bitmap size=%x, free_aun_cnt=%llx\n",
                __func__, ba_lun->lun_id, bali->lun_bmap_size,
                bali->free_aun_cnt);
        return 0;
@@ -165,10 +165,9 @@ static int find_free_range(u32 low,
                        num_bits = (sizeof(*lam) * BITS_PER_BYTE);
                        bit_pos = find_first_bit(lam, num_bits);
 
-                       pr_devel("%s: Found free bit %llX in LUN "
-                                "map entry %llX at bitmap index = %X\n",
-                                __func__, bit_pos, bali->lun_alloc_map[i],
-                                i);
+                       pr_devel("%s: Found free bit %llu in LUN "
+                                "map entry %016llx at bitmap index = %d\n",
+                                __func__, bit_pos, bali->lun_alloc_map[i], i);
 
                        *bit_word = i;
                        bali->free_aun_cnt--;
@@ -194,11 +193,11 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
        bali = ba_lun->ba_lun_handle;
 
        pr_debug("%s: Received block allocation request: "
-                "lun_id = %llX, free_aun_cnt = %llX\n",
+                "lun_id=%016llx free_aun_cnt=%llx\n",
                 __func__, ba_lun->lun_id, bali->free_aun_cnt);
 
        if (bali->free_aun_cnt == 0) {
-               pr_debug("%s: No space left on LUN: lun_id = %llX\n",
+               pr_debug("%s: No space left on LUN: lun_id=%016llx\n",
                         __func__, ba_lun->lun_id);
                return -1ULL;
        }
@@ -212,7 +211,7 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
                                          bali, &bit_word);
                if (bit_pos == -1) {
                        pr_debug("%s: Could not find an allocation unit on LUN:"
-                                " lun_id = %llX\n", __func__, ba_lun->lun_id);
+                                " lun_id=%016llx\n", __func__, ba_lun->lun_id);
                        return -1ULL;
                }
        }
@@ -223,8 +222,8 @@ static u64 ba_alloc(struct ba_lun *ba_lun)
        else
                bali->free_curr_idx = bit_word;
 
-       pr_debug("%s: Allocating AU number %llX, on lun_id %llX, "
-                "free_aun_cnt = %llX\n", __func__,
+       pr_debug("%s: Allocating AU number=%llx lun_id=%016llx "
+                "free_aun_cnt=%llx\n", __func__,
                 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
                 bali->free_aun_cnt);
 
@@ -266,18 +265,18 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
        bali = ba_lun->ba_lun_handle;
 
        if (validate_alloc(bali, to_free)) {
-               pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n",
+               pr_debug("%s: AUN %llx is not allocated on lun_id=%016llx\n",
                         __func__, to_free, ba_lun->lun_id);
                return -1;
        }
 
-       pr_debug("%s: Received a request to free AU %llX on lun_id %llX, "
-                "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id,
+       pr_debug("%s: Received a request to free AU=%llx lun_id=%016llx "
+                "free_aun_cnt=%llx\n", __func__, to_free, ba_lun->lun_id,
                 bali->free_aun_cnt);
 
        if (bali->aun_clone_map[to_free] > 0) {
-               pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone "
-                        "count = %X\n", __func__, to_free, ba_lun->lun_id,
+               pr_debug("%s: AUN %llx lun_id=%016llx cloned. Clone count=%x\n",
+                        __func__, to_free, ba_lun->lun_id,
                         bali->aun_clone_map[to_free]);
                bali->aun_clone_map[to_free]--;
                return 0;
@@ -294,8 +293,8 @@ static int ba_free(struct ba_lun *ba_lun, u64 to_free)
        else if (idx > bali->free_high_idx)
                bali->free_high_idx = idx;
 
-       pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on "
-                "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx,
+       pr_debug("%s: Successfully freed AU bit_pos=%x bit map index=%x "
+                "lun_id=%016llx free_aun_cnt=%llx\n", __func__, bit_pos, idx,
                 ba_lun->lun_id, bali->free_aun_cnt);
 
        return 0;
@@ -313,16 +312,16 @@ static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
        struct ba_lun_info *bali = ba_lun->ba_lun_handle;
 
        if (validate_alloc(bali, to_clone)) {
-               pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n",
+               pr_debug("%s: AUN=%llx not allocated on lun_id=%016llx\n",
                         __func__, to_clone, ba_lun->lun_id);
                return -1;
        }
 
-       pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n",
+       pr_debug("%s: Received a request to clone AUN %llx on lun_id=%016llx\n",
                 __func__, to_clone, ba_lun->lun_id);
 
        if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
-               pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n",
+               pr_debug("%s: AUN %llx on lun_id=%016llx hit max clones already\n",
                         __func__, to_clone, ba_lun->lun_id);
                return -1;
        }
@@ -433,7 +432,7 @@ static int write_same16(struct scsi_device *sdev,
        u64 offset = lba;
        int left = nblks;
        u32 to = sdev->request_queue->rq_timeout;
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
 
        cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
@@ -459,7 +458,7 @@ static int write_same16(struct scsi_device *sdev,
                down_read(&cfg->ioctl_rwsem);
                rc = check_state(cfg);
                if (rc) {
-                       dev_err(dev, "%s: Failed state! result=0x08%X\n",
+                       dev_err(dev, "%s: Failed state result=%08x\n",
                                __func__, result);
                        rc = -ENODEV;
                        goto out;
@@ -467,7 +466,7 @@ static int write_same16(struct scsi_device *sdev,
 
                if (result) {
                        dev_err_ratelimited(dev, "%s: command failed for "
-                                           "offset %lld result=0x%x\n",
+                                           "offset=%lld result=%08x\n",
                                            __func__, offset, result);
                        rc = -EIO;
                        goto out;
@@ -480,7 +479,7 @@ out:
        kfree(cmd_buf);
        kfree(scsi_cmd);
        kfree(sense_buf);
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -508,6 +507,8 @@ static int grow_lxt(struct afu *afu,
                    struct sisl_rht_entry *rhte,
                    u64 *new_size)
 {
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
@@ -527,7 +528,8 @@ static int grow_lxt(struct afu *afu,
        mutex_lock(&blka->mutex);
        av_size = ba_space(&blka->ba_lun);
        if (unlikely(av_size <= 0)) {
-               pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size);
+               dev_dbg(dev, "%s: ba_space error av_size=%d\n",
+                       __func__, av_size);
                mutex_unlock(&blka->mutex);
                rc = -ENOSPC;
                goto out;
@@ -568,8 +570,8 @@ static int grow_lxt(struct afu *afu,
                 */
                aun = ba_alloc(&blka->ba_lun);
                if ((aun == -1ULL) || (aun >= blka->nchunk))
-                       pr_debug("%s: ba_alloc error: allocated chunk# %llX, "
-                                "max %llX\n", __func__, aun, blka->nchunk - 1);
+                       dev_dbg(dev, "%s: ba_alloc error allocated chunk=%llu "
+                               "max=%llu\n", __func__, aun, blka->nchunk - 1);
 
                /* select both ports, use r/w perms from RHT */
                lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
@@ -599,7 +601,7 @@ static int grow_lxt(struct afu *afu,
                kfree(lxt_old);
        *new_size = my_new_size;
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -621,6 +623,8 @@ static int shrink_lxt(struct afu *afu,
                      struct ctx_info *ctxi,
                      u64 *new_size)
 {
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct sisl_lxt_entry *lxt, *lxt_old;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
@@ -706,7 +710,7 @@ static int shrink_lxt(struct afu *afu,
                kfree(lxt_old);
        *new_size = my_new_size;
 out:
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -728,7 +732,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
                          struct ctx_info *ctxi,
                          struct dk_cxlflash_resize *resize)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
        struct afu *afu = cfg->afu;
@@ -751,13 +756,13 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
        nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
        new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
 
-       pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx,"
-                "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle,
-                resize->req_size, new_size);
+       dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu req_size=%llu new_size=%llu\n",
+               __func__, ctxid, resize->rsrc_handle, resize->req_size,
+               new_size);
 
        if (unlikely(gli->mode != MODE_VIRTUAL)) {
-               pr_debug("%s: LUN mode does not support resize! (%d)\n",
-                        __func__, gli->mode);
+               dev_dbg(dev, "%s: LUN mode does not support resize mode=%d\n",
+                       __func__, gli->mode);
                rc = -EINVAL;
                goto out;
 
@@ -766,7 +771,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
        if (!ctxi) {
                ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
                if (unlikely(!ctxi)) {
-                       pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid);
+                       dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
+                               __func__, ctxid);
                        rc = -EINVAL;
                        goto out;
                }
@@ -776,7 +782,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
 
        rhte = get_rhte(ctxi, rhndl, lli);
        if (unlikely(!rhte)) {
-               pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl);
+               dev_dbg(dev, "%s: Bad resource handle rhndl=%u\n",
+                       __func__, rhndl);
                rc = -EINVAL;
                goto out;
        }
@@ -794,8 +801,8 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
 out:
        if (put_ctx)
                put_context(ctxi);
-       pr_debug("%s: resized to %lld returning rc=%d\n",
-                __func__, resize->last_lba, rc);
+       dev_dbg(dev, "%s: resized to %llu returning rc=%d\n",
+               __func__, resize->last_lba, rc);
        return rc;
 }
 
@@ -815,6 +822,7 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
        u32 chan;
        u32 lind;
        struct afu *afu = cfg->afu;
+       struct device *dev = &cfg->dev->dev;
        struct sisl_global_map __iomem *agm = &afu->afu_map->global;
 
        mutex_lock(&global.mutex);
@@ -828,15 +836,15 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
                if (lli->port_sel == BOTH_PORTS) {
                        writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
                        writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
-                       pr_debug("%s: Virtual LUN on slot %d  id0=%llx, "
-                                "id1=%llx\n", __func__, lind,
-                                lli->lun_id[0], lli->lun_id[1]);
+                       dev_dbg(dev, "%s: Virtual LUN on slot %d  id0=%llx "
+                               "id1=%llx\n", __func__, lind,
+                               lli->lun_id[0], lli->lun_id[1]);
                } else {
                        chan = PORT2CHAN(lli->port_sel);
                        writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
-                       pr_debug("%s: Virtual LUN on slot %d chan=%d, "
-                                "id=%llx\n", __func__, lind, chan,
-                                lli->lun_id[chan]);
+                       dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d "
+                               "id=%llx\n", __func__, lind, chan,
+                               lli->lun_id[chan]);
                }
        }
 
@@ -860,6 +868,7 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
        u32 lind;
        int rc = 0;
        struct afu *afu = cfg->afu;
+       struct device *dev = &cfg->dev->dev;
        struct sisl_global_map __iomem *agm = &afu->afu_map->global;
 
        mutex_lock(&global.mutex);
@@ -882,8 +891,8 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
                writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
                writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
                cfg->promote_lun_index++;
-               pr_debug("%s: Virtual LUN on slot %d  id0=%llx, id1=%llx\n",
-                        __func__, lind, lli->lun_id[0], lli->lun_id[1]);
+               dev_dbg(dev, "%s: Virtual LUN on slot %d  id0=%llx id1=%llx\n",
+                       __func__, lind, lli->lun_id[0], lli->lun_id[1]);
        } else {
                /*
                 * If this LUN is visible only from one port, we will put
@@ -898,14 +907,14 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
                lind = lli->lun_index = cfg->last_lun_index[chan];
                writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
                cfg->last_lun_index[chan]--;
-               pr_debug("%s: Virtual LUN on slot %d  chan=%d, id=%llx\n",
-                        __func__, lind, chan, lli->lun_id[chan]);
+               dev_dbg(dev, "%s: Virtual LUN on slot %d  chan=%d id=%llx\n",
+                       __func__, lind, chan, lli->lun_id[chan]);
        }
 
        lli->in_table = true;
 out:
        mutex_unlock(&global.mutex);
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 }
 
@@ -923,7 +932,7 @@ out:
  */
 int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
        struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
@@ -942,14 +951,14 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
        struct ctx_info *ctxi = NULL;
        struct sisl_rht_entry *rhte = NULL;
 
-       pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+       dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
 
        /* Setup the LUNs block allocator on first call */
        mutex_lock(&gli->mutex);
        if (gli->mode == MODE_NONE) {
                rc = init_vlun(lli);
                if (rc) {
-                       dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
+                       dev_err(dev, "%s: init_vlun failed rc=%d\n",
                                __func__, rc);
                        rc = -ENOMEM;
                        goto err0;
@@ -958,29 +967,28 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
 
        rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
        if (unlikely(rc)) {
-               dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n",
-                       __func__);
+               dev_err(dev, "%s: Failed attach to LUN (VIRTUAL)\n", __func__);
                goto err0;
        }
        mutex_unlock(&gli->mutex);
 
        rc = init_luntable(cfg, lli);
        if (rc) {
-               dev_err(dev, "%s: call to init_luntable failed rc=%d!\n",
-                       __func__, rc);
+               dev_err(dev, "%s: init_luntable failed rc=%d\n", __func__, rc);
                goto err1;
        }
 
        ctxi = get_context(cfg, rctxid, lli, 0);
        if (unlikely(!ctxi)) {
-               dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+               dev_err(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
                rc = -EINVAL;
                goto err1;
        }
 
        rhte = rhte_checkout(ctxi, lli);
        if (unlikely(!rhte)) {
-               dev_err(dev, "%s: too many opens for this context\n", __func__);
+               dev_err(dev, "%s: too many opens ctxid=%llu\n",
+                       __func__, ctxid);
                rc = -EMFILE;   /* too many opens  */
                goto err1;
        }
@@ -996,7 +1004,7 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
        resize.rsrc_handle = rsrc_handle;
        rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
        if (rc) {
-               dev_err(dev, "%s: resize failed rc %d\n", __func__, rc);
+               dev_err(dev, "%s: resize failed rc=%d\n", __func__, rc);
                goto err2;
        }
        last_lba = resize.last_lba;
@@ -1013,8 +1021,8 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
 out:
        if (likely(ctxi))
                put_context(ctxi);
-       pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n",
-                __func__, rsrc_handle, rc, last_lba);
+       dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
+               __func__, rsrc_handle, rc, last_lba);
        return rc;
 
 err2:
@@ -1047,6 +1055,8 @@ static int clone_lxt(struct afu *afu,
                     struct sisl_rht_entry *rhte,
                     struct sisl_rht_entry *rhte_src)
 {
+       struct cxlflash_cfg *cfg = afu->parent;
+       struct device *dev = &cfg->dev->dev;
        struct sisl_lxt_entry *lxt;
        u32 ngrps;
        u64 aun;                /* chunk# allocated by block allocator */
@@ -1101,7 +1111,7 @@ static int clone_lxt(struct afu *afu,
 
        cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
 
-       pr_debug("%s: returning\n", __func__);
+       dev_dbg(dev, "%s: returning\n", __func__);
        return 0;
 }
 
@@ -1120,7 +1130,8 @@ static int clone_lxt(struct afu *afu,
 int cxlflash_disk_clone(struct scsi_device *sdev,
                        struct dk_cxlflash_clone *clone)
 {
-       struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+       struct cxlflash_cfg *cfg = shost_priv(sdev->host);
+       struct device *dev = &cfg->dev->dev;
        struct llun_info *lli = sdev->hostdata;
        struct glun_info *gli = lli->parent;
        struct blka *blka = &gli->blka;
@@ -1140,8 +1151,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
        bool found;
        LIST_HEAD(sidecar);
 
-       pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n",
-                __func__, ctxid_src, ctxid_dst);
+       dev_dbg(dev, "%s: ctxid_src=%llu ctxid_dst=%llu\n",
+               __func__, ctxid_src, ctxid_dst);
 
        /* Do not clone yourself */
        if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1151,16 +1162,16 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
 
        if (unlikely(gli->mode != MODE_VIRTUAL)) {
                rc = -EINVAL;
-               pr_debug("%s: Clone not supported on physical LUNs! (%d)\n",
-                        __func__, gli->mode);
+               dev_dbg(dev, "%s: Only supported on virtual LUNs mode=%u\n",
+                       __func__, gli->mode);
                goto out;
        }
 
        ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
        ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
        if (unlikely(!ctxi_src || !ctxi_dst)) {
-               pr_debug("%s: Bad context! (%llu,%llu)\n", __func__,
-                        ctxid_src, ctxid_dst);
+               dev_dbg(dev, "%s: Bad context ctxid_src=%llu ctxid_dst=%llu\n",
+                       __func__, ctxid_src, ctxid_dst);
                rc = -EINVAL;
                goto out;
        }
@@ -1185,8 +1196,8 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
                        lun_access_dst = kzalloc(sizeof(*lun_access_dst),
                                                 GFP_KERNEL);
                        if (unlikely(!lun_access_dst)) {
-                               pr_err("%s: Unable to allocate lun_access!\n",
-                                      __func__);
+                               dev_err(dev, "%s: lun_access allocation fail\n",
+                                       __func__);
                                rc = -ENOMEM;
                                goto out;
                        }
@@ -1197,7 +1208,7 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
        }
 
        if (unlikely(!ctxi_src->rht_out)) {
-               pr_debug("%s: Nothing to clone!\n", __func__);
+               dev_dbg(dev, "%s: Nothing to clone\n", __func__);
                goto out_success;
        }
 
@@ -1256,7 +1267,7 @@ out:
                put_context(ctxi_src);
        if (ctxi_dst)
                put_context(ctxi_dst);
-       pr_debug("%s: returning rc=%d\n", __func__, rc);
+       dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
        return rc;
 
 err:
index 5f75e63..256dd67 100644 (file)
@@ -2768,16 +2768,12 @@ static int adpt_i2o_activate_hba(adpt_hba* pHba)
  
 static int adpt_i2o_online_hba(adpt_hba* pHba)
 {
-       if (adpt_i2o_systab_send(pHba) < 0) {
-               adpt_i2o_delete_hba(pHba);
+       if (adpt_i2o_systab_send(pHba) < 0)
                return -1;
-       }
        /* In READY state */
 
-       if (adpt_i2o_enable_hba(pHba) < 0) {
-               adpt_i2o_delete_hba(pHba);
+       if (adpt_i2o_enable_hba(pHba) < 0)
                return -1;
-       }
 
        /* In OPERATIONAL state  */
        return 0;
index d6e53ae..6432a50 100644 (file)
@@ -237,7 +237,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)
                flags |= IRQF_SHARED;
 
        esas2r_log(ESAS2R_LOG_INFO,
-                  "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
+                  "esas2r_claim_interrupts irq=%d (%p, %s, %lx)",
                   a->pcid->irq, a, a->name, flags);
 
        if (request_irq(a->pcid->irq,
index 3e84834..b35ed38 100644 (file)
@@ -1301,7 +1301,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
        ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
        if (ioctl == NULL) {
                esas2r_log(ESAS2R_LOG_WARN,
-                          "ioctl_handler kzalloc failed for %d bytes",
+                          "ioctl_handler kzalloc failed for %zu bytes",
                           sizeof(struct atto_express_ioctl));
                return -ENOMEM;
        }
index 7b6397b..75b9d23 100644 (file)
@@ -61,8 +61,8 @@ enum {
 #endif
 };
 
-int esas2r_log(const long level, const char *format, ...);
-int esas2r_log_dev(const long level,
+__printf(2, 3) int esas2r_log(const long level, const char *format, ...);
+__printf(3, 4) int esas2r_log_dev(const long level,
                   const struct device *dev,
                   const char *format,
                   ...);
index 5092c82..f2e9d8a 100644 (file)
@@ -198,7 +198,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
                                              GFP_KERNEL);
                if (a->local_atto_ioctl == NULL) {
                        esas2r_log(ESAS2R_LOG_WARN,
-                                  "write_hw kzalloc failed for %d bytes",
+                                  "write_hw kzalloc failed for %zu bytes",
                                   sizeof(struct atto_ioctl));
                        return -ENOMEM;
                }
@@ -1186,7 +1186,7 @@ retry:
                } else {
                        esas2r_log(ESAS2R_LOG_CRIT,
                                   "unable to allocate a request for a "
-                                  "device reset (%d:%d)!",
+                                  "device reset (%d:%llu)!",
                                   cmd->device->id,
                                   cmd->device->lun);
                }
index 59150ca..86af57f 100644 (file)
@@ -277,6 +277,7 @@ static struct scsi_host_template fcoe_shost_template = {
        .name = "FCoE Driver",
        .proc_name = FCOE_NAME,
        .queuecommand = fc_queuecommand,
+       .eh_timed_out = fc_eh_timed_out,
        .eh_abort_handler = fc_eh_abort,
        .eh_device_reset_handler = fc_eh_device_reset,
        .eh_host_reset_handler = fc_eh_host_reset,
index 58ce902..ba58b79 100644 (file)
@@ -106,6 +106,7 @@ static struct scsi_host_template fnic_host_template = {
        .module = THIS_MODULE,
        .name = DRV_NAME,
        .queuecommand = fnic_queuecommand,
+       .eh_timed_out = fc_eh_timed_out,
        .eh_abort_handler = fnic_abort_cmd,
        .eh_device_reset_handler = fnic_device_reset,
        .eh_host_reset_handler = fnic_host_reset,
index 6f9665d..67c8dac 100644 (file)
 #include <linux/blkdev.h>
 #include <linux/module.h>
 #include <scsi/scsi_host.h>
-#include "g_NCR5380.h"
-#include "NCR5380.h"
 #include <linux/init.h>
 #include <linux/ioport.h>
 #include <linux/isa.h>
 #include <linux/pnp.h>
 #include <linux/interrupt.h>
 
+/* Definitions for the core NCR5380 driver. */
+
+#define NCR5380_read(reg) \
+       ioread8(hostdata->io + hostdata->offset + (reg))
+#define NCR5380_write(reg, value) \
+       iowrite8(value, hostdata->io + hostdata->offset + (reg))
+
+#define NCR5380_implementation_fields \
+       int offset; \
+       int c400_ctl_status; \
+       int c400_blk_cnt; \
+       int c400_host_buf; \
+       int io_width
+
+#define NCR5380_dma_xfer_len            generic_NCR5380_dma_xfer_len
+#define NCR5380_dma_recv_setup          generic_NCR5380_pread
+#define NCR5380_dma_send_setup          generic_NCR5380_pwrite
+#define NCR5380_dma_residual            NCR5380_dma_residual_none
+
+#define NCR5380_intr                    generic_NCR5380_intr
+#define NCR5380_queue_command           generic_NCR5380_queue_command
+#define NCR5380_abort                   generic_NCR5380_abort
+#define NCR5380_bus_reset               generic_NCR5380_bus_reset
+#define NCR5380_info                    generic_NCR5380_info
+
+#define NCR5380_io_delay(x)             udelay(x)
+
+#include "NCR5380.h"
+
+#define DRV_MODULE_NAME "g_NCR5380"
+
+#define NCR53C400_mem_base 0x3880
+#define NCR53C400_host_buffer 0x3900
+#define NCR53C400_region_size 0x3a00
+
+#define BOARD_NCR5380 0
+#define BOARD_NCR53C400 1
+#define BOARD_NCR53C400A 2
+#define BOARD_DTC3181E 3
+#define BOARD_HP_C2502 4
+
+#define IRQ_AUTO 254
+
 #define MAX_CARDS 8
 
 /* old-style parameters for compatibility */
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
deleted file mode 100644 (file)
index 81b22d9..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Generic Generic NCR5380 driver defines
- *
- * Copyright 1993, Drew Eckhardt
- *     Visionary Computing
- *     (Unix and Linux consulting and custom programming)
- *     drew@colorado.edu
- *      +1 (303) 440-4894
- *
- * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin
- *    K.Lentin@cs.monash.edu.au
- */
-
-#ifndef GENERIC_NCR5380_H
-#define GENERIC_NCR5380_H
-
-#define DRV_MODULE_NAME "g_NCR5380"
-
-#define NCR5380_read(reg) \
-       ioread8(hostdata->io + hostdata->offset + (reg))
-#define NCR5380_write(reg, value) \
-       iowrite8(value, hostdata->io + hostdata->offset + (reg))
-
-#define NCR5380_implementation_fields \
-       int offset; \
-       int c400_ctl_status; \
-       int c400_blk_cnt; \
-       int c400_host_buf; \
-       int io_width;
-
-#define NCR53C400_mem_base 0x3880
-#define NCR53C400_host_buffer 0x3900
-#define NCR53C400_region_size 0x3a00
-
-#define NCR5380_dma_xfer_len           generic_NCR5380_dma_xfer_len
-#define NCR5380_dma_recv_setup         generic_NCR5380_pread
-#define NCR5380_dma_send_setup         generic_NCR5380_pwrite
-#define NCR5380_dma_residual           NCR5380_dma_residual_none
-
-#define NCR5380_intr generic_NCR5380_intr
-#define NCR5380_queue_command generic_NCR5380_queue_command
-#define NCR5380_abort generic_NCR5380_abort
-#define NCR5380_bus_reset generic_NCR5380_bus_reset
-#define NCR5380_info generic_NCR5380_info
-
-#define NCR5380_io_delay(x)            udelay(x)
-
-#define BOARD_NCR5380  0
-#define BOARD_NCR53C400        1
-#define BOARD_NCR53C400A 2
-#define BOARD_DTC3181E 3
-#define BOARD_HP_C2502 4
-
-#define IRQ_AUTO       254
-
-#endif /* GENERIC_NCR5380_H */
index c0cd505..9216dea 100644 (file)
@@ -95,6 +95,7 @@ struct hisi_sas_port {
 
 struct hisi_sas_cq {
        struct hisi_hba *hisi_hba;
+       struct tasklet_struct tasklet;
        int     rd_point;
        int     id;
 };
index d50e9cf..53637a9 100644 (file)
@@ -71,6 +71,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
                             struct hisi_sas_slot *slot)
 {
        struct device *dev = &hisi_hba->pdev->dev;
+       struct domain_device *device = task->dev;
+       struct hisi_sas_device *sas_dev = device->lldd_dev;
 
        if (!slot->task)
                return;
@@ -97,6 +99,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
        slot->task = NULL;
        slot->port = NULL;
        hisi_sas_slot_index_free(hisi_hba, slot->idx);
+       if (sas_dev)
+               atomic64_dec(&sas_dev->running_req);
        /* slot memory is fully zeroed when it is reused */
 }
 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -141,11 +145,10 @@ static void hisi_sas_slot_abort(struct work_struct *work)
        struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
        struct scsi_cmnd *cmnd = task->uldd_task;
        struct hisi_sas_tmf_task tmf_task;
-       struct domain_device *device = task->dev;
-       struct hisi_sas_device *sas_dev = device->lldd_dev;
        struct scsi_lun lun;
        struct device *dev = &hisi_hba->pdev->dev;
        int tag = abort_slot->idx;
+       unsigned long flags;
 
        if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
                dev_err(dev, "cannot abort slot for non-ssp task\n");
@@ -159,11 +162,11 @@ static void hisi_sas_slot_abort(struct work_struct *work)
        hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
 out:
        /* Do cleanup for this task */
+       spin_lock_irqsave(&hisi_hba->lock, flags);
        hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
+       spin_unlock_irqrestore(&hisi_hba->lock, flags);
        if (task->task_done)
                task->task_done(task);
-       if (sas_dev)
-               atomic64_dec(&sas_dev->running_req);
 }
 
 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
@@ -1118,7 +1121,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
        }
 
 exit:
-       dev_info(dev, "internal task abort: task to dev %016llx task=%p "
+       dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
                "resp: 0x%x sts 0x%x\n",
                SAS_ADDR(device->sas_addr),
                task,
@@ -1450,7 +1453,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 
        refclk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(refclk))
-               dev_info(dev, "no ref clk property\n");
+               dev_dbg(dev, "no ref clk property\n");
        else
                hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
 
@@ -1549,10 +1552,6 @@ int hisi_sas_probe(struct platform_device *pdev,
 
        hisi_sas_init_add(hisi_hba);
 
-       rc = hisi_hba->hw->hw_init(hisi_hba);
-       if (rc)
-               goto err_out_ha;
-
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha;
@@ -1561,6 +1560,10 @@ int hisi_sas_probe(struct platform_device *pdev,
        if (rc)
                goto err_out_register_ha;
 
+       rc = hisi_hba->hw->hw_init(hisi_hba);
+       if (rc)
+               goto err_out_register_ha;
+
        scsi_scan_host(shost);
 
        return 0;
index 8a1be0b..854fbea 100644 (file)
@@ -1596,6 +1596,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
                        hisi_hba->complete_hdr[queue];
        u32 irq_value, rd_point = cq->rd_point, wr_point;
 
+       spin_lock(&hisi_hba->lock);
        irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
 
        hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
@@ -1628,6 +1629,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
        /* update rd_point */
        cq->rd_point = rd_point;
        hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+       spin_unlock(&hisi_hba->lock);
 
        return IRQ_HANDLED;
 }
index b934aec..1b21445 100644 (file)
 #define TXID_AUTO                      (PORT_BASE + 0xb8)
 #define TXID_AUTO_CT3_OFF              1
 #define TXID_AUTO_CT3_MSK              (0x1 << TXID_AUTO_CT3_OFF)
+#define TX_HARDRST_OFF          2
+#define TX_HARDRST_MSK          (0x1 << TX_HARDRST_OFF)
 #define RX_IDAF_DWORD0                 (PORT_BASE + 0xc4)
 #define RX_IDAF_DWORD1                 (PORT_BASE + 0xc8)
 #define RX_IDAF_DWORD2                 (PORT_BASE + 0xcc)
 #define RX_IDAF_DWORD5                 (PORT_BASE + 0xd8)
 #define RX_IDAF_DWORD6                 (PORT_BASE + 0xdc)
 #define RXOP_CHECK_CFG_H               (PORT_BASE + 0xfc)
+#define CON_CONTROL                    (PORT_BASE + 0x118)
 #define DONE_RECEIVED_TIME             (PORT_BASE + 0x11c)
 #define CHL_INT0                       (PORT_BASE + 0x1b4)
 #define CHL_INT0_HOTPLUG_TOUT_OFF      0
 #define ITCT_HDR_MCR_MSK               (0xf << ITCT_HDR_MCR_OFF)
 #define ITCT_HDR_VLN_OFF               9
 #define ITCT_HDR_VLN_MSK               (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_SMP_TIMEOUT_OFF       16
+#define ITCT_HDR_SMP_TIMEOUT_8US       1
+#define ITCT_HDR_SMP_TIMEOUT           (ITCT_HDR_SMP_TIMEOUT_8US * \
+                                        250) /* 2ms */
+#define ITCT_HDR_AWT_CONTINUE_OFF      25
 #define ITCT_HDR_PORT_ID_OFF           28
 #define ITCT_HDR_PORT_ID_MSK           (0xf << ITCT_HDR_PORT_ID_OFF)
 /* qw2 */
@@ -526,6 +534,8 @@ enum {
 #define SATA_PROTOCOL_FPDMA            0x8
 #define SATA_PROTOCOL_ATAPI            0x10
 
+static void hisi_sas_link_timeout_disable_link(unsigned long data);
+
 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
 {
        void __iomem *regs = hisi_hba->regs + off;
@@ -693,6 +703,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
        qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
                (device->linkrate << ITCT_HDR_MCR_OFF) |
                (1 << ITCT_HDR_VLN_OFF) |
+               (ITCT_HDR_SMP_TIMEOUT << ITCT_HDR_SMP_TIMEOUT_OFF) |
+               (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
                (port->id << ITCT_HDR_PORT_ID_OFF));
        itct->qw0 = cpu_to_le64(qw0);
 
@@ -702,7 +714,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 
        /* qw2 */
        if (!dev_is_sata(device))
-               itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+               itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
                                        (0x1ULL << ITCT_HDR_BITLT_OFF) |
                                        (0x32ULL << ITCT_HDR_MCTLT_OFF) |
                                        (0x1ULL << ITCT_HDR_RTOLT_OFF));
@@ -711,7 +723,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 static void free_device_v2_hw(struct hisi_hba *hisi_hba,
                              struct hisi_sas_device *sas_dev)
 {
-       u64 qw0, dev_id = sas_dev->device_id;
+       u64 dev_id = sas_dev->device_id;
        struct device *dev = &hisi_hba->pdev->dev;
        struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
        u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
@@ -735,8 +747,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
                        dev_dbg(dev, "got clear ITCT done interrupt\n");
 
                        /* invalid the itct state*/
-                       qw0 = cpu_to_le64(itct->qw0);
-                       qw0 &= ~(1 << ITCT_HDR_VALID_OFF);
+                       memset(itct, 0, sizeof(struct hisi_sas_itct));
                        hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
                                         ENT_INT_SRC3_ITC_INT_MSK);
 
@@ -978,6 +989,50 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
                         upper_32_bits(hisi_hba->initial_fis_dma));
 }
 
+static void hisi_sas_link_timeout_enable_link(unsigned long data)
+{
+       struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+       int i, reg_val;
+
+       for (i = 0; i < hisi_hba->n_phy; i++) {
+               reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL);
+               if (!(reg_val & BIT(0))) {
+                       hisi_sas_phy_write32(hisi_hba, i,
+                                       CON_CONTROL, 0x7);
+                       break;
+               }
+       }
+
+       hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
+       mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
+}
+
+static void hisi_sas_link_timeout_disable_link(unsigned long data)
+{
+       struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+       int i, reg_val;
+
+       reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
+       for (i = 0; i < hisi_hba->n_phy && reg_val; i++) {
+               if (reg_val & BIT(i)) {
+                       hisi_sas_phy_write32(hisi_hba, i,
+                                       CON_CONTROL, 0x6);
+                       break;
+               }
+       }
+
+       hisi_hba->timer.function = hisi_sas_link_timeout_enable_link;
+       mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
+{
+       hisi_hba->timer.data = (unsigned long)hisi_hba;
+       hisi_hba->timer.function = hisi_sas_link_timeout_disable_link;
+       hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
+       add_timer(&hisi_hba->timer);
+}
+
 static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
 {
        struct device *dev = &hisi_hba->pdev->dev;
@@ -1025,14 +1080,21 @@ static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 
 static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 {
+       struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+       u32 txid_auto;
+
        stop_phy_v2_hw(hisi_hba, phy_no);
+       if (phy->identify.device_type == SAS_END_DEVICE) {
+               txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+               hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+                                       txid_auto | TX_HARDRST_MSK);
+       }
        msleep(100);
        start_phy_v2_hw(hisi_hba, phy_no);
 }
 
-static void start_phys_v2_hw(unsigned long data)
+static void start_phys_v2_hw(struct hisi_hba *hisi_hba)
 {
-       struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
        int i;
 
        for (i = 0; i < hisi_hba->n_phy; i++)
@@ -1041,10 +1103,7 @@ static void start_phys_v2_hw(unsigned long data)
 
 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
 {
-       struct timer_list *timer = &hisi_hba->timer;
-
-       setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
-       mod_timer(timer, jiffies + HZ);
+       start_phys_v2_hw(hisi_hba);
 }
 
 static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -1771,8 +1830,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
        }
 
 out:
-       if (sas_dev)
-               atomic64_dec(&sas_dev->running_req);
 
        hisi_sas_slot_task_free(hisi_hba, task, slot);
        sts = ts->stat;
@@ -2020,9 +2077,12 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
        if (phy->identify.device_type == SAS_END_DEVICE)
                phy->identify.target_port_protocols =
                        SAS_PROTOCOL_SSP;
-       else if (phy->identify.device_type != SAS_PHY_UNUSED)
+       else if (phy->identify.device_type != SAS_PHY_UNUSED) {
                phy->identify.target_port_protocols =
                        SAS_PROTOCOL_SMP;
+               if (!timer_pending(&hisi_hba->timer))
+                       set_link_timer_quirk(hisi_hba);
+       }
        queue_work(hisi_hba->wq, &phy->phyup_ws);
 
 end:
@@ -2033,10 +2093,23 @@ end:
        return res;
 }
 
+static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba)
+{
+       u32 port_state;
+
+       port_state = hisi_sas_read32(hisi_hba, PORT_STATE);
+       if (port_state & 0x1ff)
+               return true;
+
+       return false;
+}
+
 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
        int res = 0;
        u32 phy_state, sl_ctrl, txid_auto;
+       struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+       struct hisi_sas_port *port = phy->port;
 
        hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
 
@@ -2046,6 +2119,10 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
        sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
        hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
                             sl_ctrl & ~SL_CONTROL_CTA_MSK);
+       if (port && !get_wideport_bitmap_v2_hw(hisi_hba, port->id))
+               if (!check_any_wideports_v2_hw(hisi_hba) &&
+                               timer_pending(&hisi_hba->timer))
+                       del_timer(&hisi_hba->timer);
 
        txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
        hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
@@ -2481,21 +2558,19 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+static void cq_tasklet_v2_hw(unsigned long val)
 {
-       struct hisi_sas_cq *cq = p;
+       struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
        struct hisi_hba *hisi_hba = cq->hisi_hba;
        struct hisi_sas_slot *slot;
        struct hisi_sas_itct *itct;
        struct hisi_sas_complete_v2_hdr *complete_queue;
-       u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id;
+       u32 rd_point = cq->rd_point, wr_point, dev_id;
        int queue = cq->id;
 
        complete_queue = hisi_hba->complete_hdr[queue];
-       irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
-
-       hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 
+       spin_lock(&hisi_hba->lock);
        wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
                                   (0x14 * queue));
 
@@ -2545,6 +2620,19 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
        /* update rd_point */
        cq->rd_point = rd_point;
        hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+       spin_unlock(&hisi_hba->lock);
+}
+
+static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
+{
+       struct hisi_sas_cq *cq = p;
+       struct hisi_hba *hisi_hba = cq->hisi_hba;
+       int queue = cq->id;
+
+       hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+       tasklet_schedule(&cq->tasklet);
+
        return IRQ_HANDLED;
 }
 
@@ -2726,6 +2814,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 
        for (i = 0; i < hisi_hba->queue_count; i++) {
                int idx = i + 96; /* First cq interrupt is irq96 */
+               struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+               struct tasklet_struct *t = &cq->tasklet;
 
                irq = irq_map[idx];
                if (!irq) {
@@ -2742,6 +2832,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                                irq, rc);
                        return -ENOENT;
                }
+               tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq);
        }
 
        return 0;
@@ -2807,6 +2898,12 @@ static int hisi_sas_v2_probe(struct platform_device *pdev)
 
 static int hisi_sas_v2_remove(struct platform_device *pdev)
 {
+       struct sas_ha_struct *sha = platform_get_drvdata(pdev);
+       struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+       if (timer_pending(&hisi_hba->timer))
+               del_timer(&hisi_hba->timer);
+
        return hisi_sas_remove(pdev);
 }
 
index c611412..524a0c7 100644 (file)
@@ -9263,13 +9263,9 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
                access = SA5_ioaccel_mode1_access;
                writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
                writel(4, &h->cfgtable->HostWrite.CoalIntCount);
-       } else {
-               if (trans_support & CFGTBL_Trans_io_accel2) {
+       } else
+               if (trans_support & CFGTBL_Trans_io_accel2)
                        access = SA5_ioaccel_mode2_access;
-                       writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
-                       writel(4, &h->cfgtable->HostWrite.CoalIntCount);
-               }
-       }
        writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
        if (hpsa_wait_for_mode_change_ack(h)) {
                dev_err(&h->pdev->dev,
index 64e9829..bf6cdc1 100644 (file)
@@ -578,38 +578,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
 }
 
 static struct access_method SA5_access = {
-       SA5_submit_command,
-       SA5_intr_mask,
-       SA5_intr_pending,
-       SA5_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_intr_mask,
+       .intr_pending = SA5_intr_pending,
+       .command_completed = SA5_completed,
 };
 
 static struct access_method SA5_ioaccel_mode1_access = {
-       SA5_submit_command,
-       SA5_performant_intr_mask,
-       SA5_ioaccel_mode1_intr_pending,
-       SA5_ioaccel_mode1_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .intr_pending = SA5_ioaccel_mode1_intr_pending,
+       .command_completed = SA5_ioaccel_mode1_completed,
 };
 
 static struct access_method SA5_ioaccel_mode2_access = {
-       SA5_submit_command_ioaccel2,
-       SA5_performant_intr_mask,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command_ioaccel2,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access = {
-       SA5_submit_command,
-       SA5_performant_intr_mask,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 static struct access_method SA5_performant_access_no_read = {
-       SA5_submit_command_no_read,
-       SA5_performant_intr_mask,
-       SA5_performant_intr_pending,
-       SA5_performant_completed,
+       .submit_command = SA5_submit_command_no_read,
+       .set_intr_mask = SA5_performant_intr_mask,
+       .intr_pending = SA5_performant_intr_pending,
+       .command_completed = SA5_performant_completed,
 };
 
 struct board_type {
index 78b72c2..2c92dab 100644 (file)
@@ -3090,6 +3090,7 @@ static struct scsi_host_template driver_template = {
        .name = "IBM POWER Virtual FC Adapter",
        .proc_name = IBMVFC_NAME,
        .queuecommand = ibmvfc_queuecommand,
+       .eh_timed_out = fc_eh_timed_out,
        .eh_abort_handler = ibmvfc_eh_abort_handler,
        .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
        .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
index 50cd011..1deb0a9 100644 (file)
@@ -2072,6 +2072,7 @@ static struct scsi_host_template driver_template = {
        .name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
        .proc_name = "ibmvscsi",
        .queuecommand = ibmvscsi_queuecommand,
+       .eh_timed_out = srp_timed_out,
        .eh_abort_handler = ibmvscsi_eh_abort_handler,
        .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
        .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
index ace4f1f..4228aba 100644 (file)
@@ -967,6 +967,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
        .sg_tablesize           = 4096,
        .max_sectors            = 0xFFFF,
        .cmd_per_lun            = ISCSI_DEF_CMD_PER_LUN,
+       .eh_timed_out           = iscsi_eh_cmd_timed_out,
        .eh_abort_handler       = iscsi_eh_abort,
        .eh_device_reset_handler= iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
index f9b6fba..834d121 100644 (file)
@@ -1930,7 +1930,7 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
                return 0;
 }
 
-static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
 {
        enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
        struct iscsi_task *task = NULL, *running_task;
@@ -2063,6 +2063,7 @@ done:
                     "timer reset" : "nh");
        return rc;
 }
+EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
 
 static void iscsi_check_transport_timeouts(unsigned long data)
 {
@@ -2585,8 +2586,6 @@ int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev)
        if (!shost->cmd_per_lun)
                shost->cmd_per_lun = ISCSI_DEF_CMD_PER_LUN;
 
-       if (!shost->transportt->eh_timed_out)
-               shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
        return scsi_add_host(shost, pdev);
 }
 EXPORT_SYMBOL_GPL(iscsi_host_add);
index 362da44..15ef8e2 100644 (file)
@@ -560,7 +560,6 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
        i = to_sas_internal(stt);
        i->dft = dft;
        stt->create_work_queue = 1;
-       stt->eh_timed_out = sas_scsi_timed_out;
        stt->eh_strategy_handler = sas_scsi_recover_host;
 
        return stt;
index 9cf0bc2..b306b78 100644 (file)
@@ -64,8 +64,6 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
 int  sas_register_ports(struct sas_ha_struct *sas_ha);
 void sas_unregister_ports(struct sas_ha_struct *sas_ha);
 
-enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
-
 int  sas_init_events(struct sas_ha_struct *sas_ha);
 void sas_disable_revalidation(struct sas_ha_struct *ha);
 void sas_enable_revalidation(struct sas_ha_struct *ha);
index 519dac4..9bd55bc 100644 (file)
@@ -803,13 +803,6 @@ out:
                    shost->host_failed, tries);
 }
 
-enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
-{
-       scmd_dbg(cmd, "command %p timed out\n", cmd);
-
-       return BLK_EH_NOT_HANDLED;
-}
-
 int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 {
        struct domain_device *dev = sdev_to_domain_dev(sdev);
index 8a20b4e..6593b07 100644 (file)
@@ -727,7 +727,6 @@ struct lpfc_hba {
        uint32_t cfg_fcp_io_channel;
        uint32_t cfg_total_seg_cnt;
        uint32_t cfg_sg_seg_cnt;
-       uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
        uint64_t cfg_soft_wwnn;
        uint64_t cfg_soft_wwpn;
index c847755..50cf402 100644 (file)
@@ -2073,6 +2073,13 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        phba->soft_wwn_enable = 1;
+
+       dev_printk(KERN_WARNING, &phba->pcidev->dev,
+                  "lpfc%d: soft_wwpn assignment has been enabled.\n",
+                  phba->brd_no);
+       dev_printk(KERN_WARNING, &phba->pcidev->dev,
+                  "  The soft_wwpn feature is not supported by Broadcom.");
+
        return count;
 }
 static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
@@ -2143,7 +2150,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
        phba->soft_wwn_enable = 0;
 
        rc = lpfc_wwn_set(buf, cnt, wwpn);
-       if (!rc) {
+       if (rc) {
                /* not able to set wwpn, unlock it */
                phba->soft_wwn_enable = 1;
                return rc;
@@ -2224,7 +2231,7 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        rc = lpfc_wwn_set(buf, cnt, wwnn);
-       if (!rc) {
+       if (rc) {
                /* Allow wwnn to be set many times, as long as the enable
                 * is set. However, once the wwpn is set, everything locks.
                 */
@@ -2435,7 +2442,8 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
        else
                phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
        phba->cfg_oas_flags &= ~OAS_LUN_VALID;
-       phba->cfg_oas_priority = phba->cfg_XLanePriority;
+       if (phba->cfg_oas_priority == 0)
+               phba->cfg_oas_priority = phba->cfg_XLanePriority;
        phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
        return count;
 }
@@ -2561,7 +2569,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
                        rc = -ENOMEM;
        } else {
                lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
-                                    (struct lpfc_name *)tgt_wwpn, lun);
+                                    (struct lpfc_name *)tgt_wwpn, lun, pri);
        }
        return rc;
 
@@ -2585,7 +2593,8 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
  */
 static uint64_t
 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
-                     uint8_t tgt_wwpn[], uint32_t *lun_status)
+                     uint8_t tgt_wwpn[], uint32_t *lun_status,
+                     uint32_t *lun_pri)
 {
        uint64_t found_lun;
 
@@ -2598,7 +2607,7 @@ lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
                                   &phba->sli4_hba.oas_next_lun,
                                   (struct lpfc_name *)vpt_wwpn,
                                   (struct lpfc_name *)tgt_wwpn,
-                                  &found_lun, lun_status))
+                                  &found_lun, lun_status, lun_pri))
                return found_lun;
        else
                return NOT_OAS_ENABLED_LUN;
@@ -2670,7 +2679,8 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
 
        oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
                                        phba->cfg_oas_tgt_wwpn,
-                                       &phba->cfg_oas_lun_status);
+                                       &phba->cfg_oas_lun_status,
+                                       &phba->cfg_oas_priority);
        if (oas_lun != NOT_OAS_ENABLED_LUN)
                phba->cfg_oas_flags |= OAS_LUN_VALID;
 
@@ -2701,6 +2711,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
        struct Scsi_Host *shost = class_to_shost(dev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
        uint64_t scsi_lun;
+       uint32_t pri;
        ssize_t rc;
 
        if (!phba->cfg_fof)
@@ -2718,17 +2729,20 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
        if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
                return -EINVAL;
 
+       pri = phba->cfg_oas_priority;
+       if (pri == 0)
+               pri = phba->cfg_XLanePriority;
+
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                        "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
                        "priority 0x%x with oas state %d\n",
                        wwn_to_u64(phba->cfg_oas_vpt_wwpn),
                        wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
-                       phba->cfg_oas_priority, phba->cfg_oas_lun_state);
+                       pri, phba->cfg_oas_lun_state);
 
        rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
                                       phba->cfg_oas_tgt_wwpn, scsi_lun,
-                                      phba->cfg_oas_lun_state,
-                                      phba->cfg_oas_priority);
+                                      phba->cfg_oas_lun_state, pri);
        if (rc)
                return rc;
 
@@ -4669,14 +4683,6 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
 LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
            LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
 
-/*
- * This parameter will be depricated, the driver cannot limit the
- * protection data s/g list.
- */
-LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT,
-           LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT,
-           "Max Protection Scatter Gather Segment Count");
-
 /*
  * lpfc_enable_mds_diags: Enable MDS Diagnostics
  *       0  = MDS Diagnostics disabled (default)
@@ -4766,7 +4772,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_sg_seg_cnt,
        &dev_attr_lpfc_max_scsicmpl_time,
        &dev_attr_lpfc_stat_data_ctrl,
-       &dev_attr_lpfc_prot_sg_seg_cnt,
        &dev_attr_lpfc_aer_support,
        &dev_attr_lpfc_aer_state_cleanup,
        &dev_attr_lpfc_sriov_nr_virtfn,
@@ -5060,6 +5065,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
  * Dynamic FC Host Attributes Support
  */
 
+/**
+ * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
+ * @shost: kernel scsi host pointer.
+ **/
+static void
+lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
+{
+       struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+
+       lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
+                                     sizeof fc_host_symbolic_name(shost));
+}
+
 /**
  * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
  * @shost: kernel scsi host pointer.
@@ -5597,6 +5615,8 @@ struct fc_function_template lpfc_transport_functions = {
        .show_host_supported_fc4s = 1,
        .show_host_supported_speeds = 1,
        .show_host_maxframe_size = 1,
+
+       .get_host_symbolic_name = lpfc_get_host_symbolic_name,
        .show_host_symbolic_name = 1,
 
        /* dynamic attributes the driver supports */
@@ -5664,6 +5684,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
        .show_host_supported_fc4s = 1,
        .show_host_supported_speeds = 1,
        .show_host_maxframe_size = 1,
+
+       .get_host_symbolic_name = lpfc_get_host_symbolic_name,
        .show_host_symbolic_name = 1,
 
        /* dynamic attributes the driver supports */
@@ -5768,7 +5790,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        phba->cfg_soft_wwnn = 0L;
        phba->cfg_soft_wwpn = 0L;
        lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
-       lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
        lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
        lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
        lpfc_aer_support_init(phba, lpfc_aer_support);
index 15d2bfd..309643a 100644 (file)
@@ -480,7 +480,7 @@ void lpfc_sli4_offline_eratt(struct lpfc_hba *);
 struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *,
                                                struct lpfc_name *,
                                                struct lpfc_name *,
-                                               uint64_t, bool);
+                                               uint64_t, uint32_t,  bool);
 void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*);
 struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
                                        struct list_head *list,
@@ -489,9 +489,10 @@ struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *,
 bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
                         struct lpfc_name *, uint64_t, uint8_t);
 bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *,
-                         struct lpfc_name *, uint64_t);
+                         struct lpfc_name *, uint64_t, uint8_t);
 bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
                            struct lpfc_name *, uint64_t *, struct lpfc_name *,
-                           struct lpfc_name *, uint64_t *, uint32_t *);
+                           struct lpfc_name *, uint64_t *,
+                           uint32_t *, uint32_t *);
 int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
 void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
index 63bef45..3a1f1a2 100644 (file)
@@ -1999,6 +1999,9 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        if (sp->cmn.fcphHigh < FC_PH3)
                sp->cmn.fcphHigh = FC_PH3;
 
+       sp->cmn.valid_vendor_ver_level = 0;
+       memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
+
        lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
                "Issue PLOGI:     did:x%x",
                did, 0, 0);
@@ -3990,6 +3993,9 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
                } else {
                        memcpy(pcmd, &vport->fc_sparam,
                               sizeof(struct serv_parm));
+
+                       sp->cmn.valid_vendor_ver_level = 0;
+                       memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
                }
 
                lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
@@ -8851,8 +8857,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 {
        struct ls_rjt stat;
 
-       if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
-               BUG();
+       BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
 
        switch (rspiocb->iocb.ulpStatus) {
                case IOSTAT_NPORT_RJT:
index 8226543..3b970d3 100644 (file)
@@ -360,6 +360,12 @@ struct csp {
  * Word 1 Bit 30 in PLOGI request is random offset
  */
 #define virtual_fabric_support randomOffset /* Word 1, bit 30 */
+/*
+ * Word 1 Bit 29 in common service parameter is overloaded.
+ * Word 1 Bit 29 in FLOGI response is multiple NPort assignment
+ * Word 1 Bit 29 in FLOGI/PLOGI request is Valid Vendor Version Level
+ */
+#define valid_vendor_ver_level response_multiple_NPort /* Word 1, bit 29 */
 #ifdef __BIG_ENDIAN_BITFIELD
        uint16_t request_multiple_Nport:1;      /* FC Word 1, bit 31 */
        uint16_t randomOffset:1;        /* FC Word 1, bit 30 */
index ad350d9..1180a22 100644 (file)
@@ -5452,7 +5452,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
                        device_data = lpfc_create_device_data(phba,
                                                        &vport->fc_portname,
                                                        &target_wwpn,
-                                                       sdev->lun, true);
+                                                       sdev->lun,
+                                                       phba->cfg_XLanePriority,
+                                                       true);
                        if (!device_data)
                                return -ENOMEM;
                        spin_lock_irqsave(&phba->devicelock, flags);
@@ -5587,7 +5589,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
 struct lpfc_device_data*
 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
                        struct lpfc_name *target_wwpn, uint64_t lun,
-                       bool atomic_create)
+                       uint32_t pri, bool atomic_create)
 {
 
        struct lpfc_device_data *lun_info;
@@ -5614,7 +5616,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
               sizeof(struct lpfc_name));
        lun_info->device_id.lun = lun;
        lun_info->oas_enabled = false;
-       lun_info->priority = phba->cfg_XLanePriority;
+       lun_info->priority = pri;
        lun_info->available = false;
        return lun_info;
 }
@@ -5716,7 +5718,8 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
                       struct lpfc_name *found_vport_wwpn,
                       struct lpfc_name *found_target_wwpn,
                       uint64_t *found_lun,
-                      uint32_t *found_lun_status)
+                      uint32_t *found_lun_status,
+                      uint32_t *found_lun_pri)
 {
 
        unsigned long flags;
@@ -5763,6 +5766,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
                                                OAS_LUN_STATUS_EXISTS;
                                else
                                        *found_lun_status = 0;
+                               *found_lun_pri = lun_info->priority;
                                if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
                                        memset(vport_wwpn, 0x0,
                                               sizeof(struct lpfc_name));
@@ -5824,13 +5828,14 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
        if (lun_info) {
                if (!lun_info->oas_enabled)
                        lun_info->oas_enabled = true;
+               lun_info->priority = pri;
                spin_unlock_irqrestore(&phba->devicelock, flags);
                return true;
        }
 
        /* Create an lun info structure and add to list of luns */
        lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
-                                          false);
+                                          pri, false);
        if (lun_info) {
                lun_info->oas_enabled = true;
                lun_info->priority = pri;
@@ -5864,7 +5869,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
  **/
 bool
 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
-                    struct lpfc_name *target_wwpn, uint64_t lun)
+                    struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
 {
 
        struct lpfc_device_data *lun_info;
@@ -5882,6 +5887,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
                                          target_wwpn, lun);
        if (lun_info) {
                lun_info->oas_enabled = false;
+               lun_info->priority = pri;
                if (!lun_info->available)
                        lpfc_delete_device_data(phba, lun_info);
                spin_unlock_irqrestore(&phba->devicelock, flags);
@@ -5923,6 +5929,7 @@ struct scsi_host_template lpfc_template = {
        .proc_name              = LPFC_DRIVER_NAME,
        .info                   = lpfc_info,
        .queuecommand           = lpfc_queuecommand,
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = lpfc_abort_handler,
        .eh_device_reset_handler = lpfc_device_reset_handler,
        .eh_target_reset_handler = lpfc_target_reset_handler,
@@ -5949,6 +5956,7 @@ struct scsi_host_template lpfc_vport_template = {
        .proc_name              = LPFC_DRIVER_NAME,
        .info                   = lpfc_info,
        .queuecommand           = lpfc_queuecommand,
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = lpfc_abort_handler,
        .eh_device_reset_handler = lpfc_device_reset_handler,
        .eh_target_reset_handler = lpfc_target_reset_handler,
index a78a3df..d977a47 100644 (file)
@@ -120,6 +120,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
        if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
                bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
        lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+       /* ensure WQE bcopy flushed before doorbell write */
+       wmb();
 
        /* Update the host index before invoking device */
        host_index = q->host_index;
@@ -6313,7 +6315,8 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
                         LPFC_SLI4_MBX_EMBED);
 
        mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
-       mbox->u.mqe.un.set_host_data.param_len = 8;
+       mbox->u.mqe.un.set_host_data.param_len =
+                                       LPFC_HOST_OS_DRIVER_VERSION_SIZE;
        snprintf(mbox->u.mqe.un.set_host_data.data,
                 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
                 "Linux %s v"LPFC_DRIVER_VERSION,
@@ -10035,6 +10038,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                iabt->ulpCommand = CMD_CLOSE_XRI_CN;
 
        abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
+       abtsiocbp->vport = vport;
 
        lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
                         "0339 Abort xri x%x, original iotag x%x, "
@@ -17226,7 +17230,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
        unsigned long iflags = 0;
        char *fail_msg = NULL;
        struct lpfc_sglq *sglq;
-       union lpfc_wqe wqe;
+       union lpfc_wqe128 wqe128;
+       union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
        uint32_t txq_cnt = 0;
 
        spin_lock_irqsave(&pring->ring_lock, iflags);
@@ -17265,9 +17270,9 @@ lpfc_drain_txq(struct lpfc_hba *phba)
                piocbq->sli4_xritag = sglq->sli4_xritag;
                if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
                        fail_msg = "to convert bpl to sgl";
-               else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
+               else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
                        fail_msg = "to convert iocb to wqe";
-               else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+               else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
                        fail_msg = " - Wq is full";
                else
                        lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
index 50bfc43..0ee0623 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "11.2.0.2"
+#define LPFC_DRIVER_VERSION "11.2.0.4"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index c27f4b7..e18bbc6 100644 (file)
@@ -537,6 +537,12 @@ enable_vport(struct fc_vport *fc_vport)
 
        spin_lock_irq(shost->host_lock);
        vport->load_flag |= FC_LOADING;
+       if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+               spin_unlock_irq(shost->host_lock);
+               lpfc_issue_init_vpi(vport);
+               goto out;
+       }
+
        vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
        spin_unlock_irq(shost->host_lock);
 
@@ -557,6 +563,8 @@ enable_vport(struct fc_vport *fc_vport)
        } else {
                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        }
+
+out:
        lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
                         "1827 Vport Enabled.\n");
        return VPORT_OK;
index ccb68d1..196acc7 100644 (file)
@@ -154,7 +154,7 @@ __asm__ __volatile__                                        \
 static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
                                 unsigned char *dst, int len)
 {
-       unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
+       u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
        unsigned char *d = dst;
        int n = len;
        int transferred;
@@ -257,7 +257,7 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
                                  unsigned char *src, int len)
 {
        unsigned char *s = src;
-       unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
+       u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
        int n = len;
        int transferred;
 
@@ -381,10 +381,10 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
 
        hostdata = shost_priv(instance);
        hostdata->base = pio_mem->start;
-       hostdata->io = (void *)pio_mem->start;
+       hostdata->io = (u8 __iomem *)pio_mem->start;
 
        if (pdma_mem && setup_use_pdma)
-               hostdata->pdma_io = (void *)pdma_mem->start;
+               hostdata->pdma_io = (u8 __iomem *)pdma_mem->start;
        else
                host_flags |= FLAG_NO_PSEUDO_DMA;
 
index fdd519c..e7e5974 100644 (file)
@@ -35,8 +35,8 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "06.812.07.00-rc1"
-#define MEGASAS_RELDATE                                "August 22, 2016"
+#define MEGASAS_VERSION                                "07.701.16.00-rc1"
+#define MEGASAS_RELDATE                                "February 2, 2017"
 
 /*
  * Device IDs
 #define PCI_DEVICE_ID_LSI_INTRUDER_24          0x00cf
 #define PCI_DEVICE_ID_LSI_CUTLASS_52           0x0052
 #define PCI_DEVICE_ID_LSI_CUTLASS_53           0x0053
+#define PCI_DEVICE_ID_LSI_VENTURA                  0x0014
+#define PCI_DEVICE_ID_LSI_HARPOON                  0x0016
+#define PCI_DEVICE_ID_LSI_TOMCAT                   0x0017
+#define PCI_DEVICE_ID_LSI_VENTURA_4PORT                0x001B
+#define PCI_DEVICE_ID_LSI_CRUSADER_4PORT       0x001C
 
 /*
  * Intel HBA SSDIDs
  */
 
 /*
- * MFI stands for  MegaRAID SAS FW Interface. This is just a moniker for 
+ * MFI stands for  MegaRAID SAS FW Interface. This is just a moniker for
  * protocol between the software and firmware. Commands are issued using
  * "message frames"
  */
@@ -690,6 +695,18 @@ struct  MR_PD_INFO {
        u8 reserved1[512-428];
 } __packed;
 
+/*
+ * Definition of structure used to expose attributes of VD or JBOD
+ * (this structure is to be filled by firmware when MR_DCMD_DRV_GET_TARGET_PROP
+ * is fired by driver)
+ */
+struct MR_TARGET_PROPERTIES {
+       u32    max_io_size_kb;
+       u32    device_qdepth;
+       u32    sector_size;
+       u8     reserved[500];
+} __packed;
+
  /*
  * defines the physical drive address structure
  */
@@ -728,7 +745,6 @@ struct megasas_pd_list {
        u16             tid;
        u8             driveType;
        u8             driveState;
-       u8             interface;
 } __packed;
 
  /*
@@ -1312,7 +1328,55 @@ struct megasas_ctrl_info {
 #endif
        } adapterOperations3;
 
-       u8          pad[0x800-0x7EC];
+       struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u8 reserved:7;
+       /* Indicates whether the CPLD image is part of
+        *  the package and stored in flash
+        */
+       u8 cpld_in_flash:1;
+#else
+       u8 cpld_in_flash:1;
+       u8 reserved:7;
+#endif
+       u8 reserved1[3];
+       /* Null terminated string. Has the version
+        *  information if cpld_in_flash = FALSE
+        */
+       u8 userCodeDefinition[12];
+       } cpld;  /* Valid only if upgradableCPLD is TRUE */
+
+       struct {
+       #if defined(__BIG_ENDIAN_BITFIELD)
+               u16 reserved:8;
+               u16 fw_swaps_bbu_vpd_info:1;
+               u16 support_pd_map_target_id:1;
+               u16 support_ses_ctrl_in_multipathcfg:1;
+               u16 image_upload_supported:1;
+               u16 support_encrypted_mfc:1;
+               u16 supported_enc_algo:1;
+               u16 support_ibutton_less:1;
+               u16 ctrl_info_ext_supported:1;
+       #else
+
+               u16 ctrl_info_ext_supported:1;
+               u16 support_ibutton_less:1;
+               u16 supported_enc_algo:1;
+               u16 support_encrypted_mfc:1;
+               u16 image_upload_supported:1;
+               /* FW supports LUN based association and target port based */
+               u16 support_ses_ctrl_in_multipathcfg:1;
+               /* association for the SES device connected in multipath mode */
+               /* FW defines Jbod target Id within MR_PD_CFG_SEQ */
+               u16 support_pd_map_target_id:1;
+               /* FW swaps relevant fields in MR_BBU_VPD_INFO_FIXED to
+                *  provide the data in little endian order
+                */
+               u16 fw_swaps_bbu_vpd_info:1;
+               u16 reserved:8;
+       #endif
+               } adapter_operations4;
+       u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
 } __packed;
 
 /*
@@ -1339,12 +1403,15 @@ struct megasas_ctrl_info {
 
 #define MEGASAS_FW_BUSY                                1
 
-#define VD_EXT_DEBUG 0
+/* Driver's internal Logging levels*/
+#define OCR_LOGS    (1 << 0)
 
 #define SCAN_PD_CHANNEL        0x1
 #define SCAN_VD_CHANNEL        0x2
 
 #define MEGASAS_KDUMP_QUEUE_DEPTH               100
+#define MR_LARGE_IO_MIN_SIZE                   (32 * 1024)
+#define MR_R1_LDIO_PIGGYBACK_DEFAULT           4
 
 enum MR_SCSI_CMD_TYPE {
        READ_WRITE_LDIO = 0,
@@ -1391,7 +1458,7 @@ enum FW_BOOT_CONTEXT {
  */
 #define MEGASAS_INT_CMDS                       32
 #define MEGASAS_SKINNY_INT_CMDS                        5
-#define MEGASAS_FUSION_INTERNAL_CMDS           5
+#define MEGASAS_FUSION_INTERNAL_CMDS           8
 #define MEGASAS_FUSION_IOCTL_CMDS              3
 #define MEGASAS_MFI_IOCTL_CMDS                 27
 
@@ -1429,13 +1496,19 @@ enum FW_BOOT_CONTEXT {
 #define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
 #define MR_MAX_MSIX_REG_ARRAY                   16
 #define MR_RDPQ_MODE_OFFSET                    0X00800000
+
+#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT      16
+#define MR_MAX_RAID_MAP_SIZE_MASK              0x1FF
+#define MR_MIN_MAP_SIZE                                0x10000
+/* 64k */
+
 #define MR_CAN_HANDLE_SYNC_CACHE_OFFSET                0X01000000
 
 /*
 * register set for both 1068 and 1078 controllers
 * structure extended for 1078 registers
 */
+
 struct megasas_register_set {
        u32     doorbell;                       /*0000h*/
        u32     fusion_seq_offset;              /*0004h*/
@@ -1471,14 +1544,14 @@ struct megasas_register_set {
        u32     outbound_scratch_pad ;          /*00B0h*/
        u32     outbound_scratch_pad_2;         /*00B4h*/
        u32     outbound_scratch_pad_3;         /*00B8h*/
+       u32     outbound_scratch_pad_4;         /*00BCh*/
 
-       u32     reserved_4;                     /*00BCh*/
 
        u32     inbound_low_queue_port ;        /*00C0h*/
 
        u32     inbound_high_queue_port ;       /*00C4h*/
 
-       u32     reserved_5;                     /*00C8h*/
+       u32 inbound_single_queue_port;  /*00C8h*/
        u32     res_6[11];                      /*CCh*/
        u32     host_diag;
        u32     seq_offset;
@@ -1544,33 +1617,35 @@ union megasas_sgl_frame {
 typedef union _MFI_CAPABILITIES {
        struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-               u32     reserved:20;
-               u32     support_qd_throttling:1;
-               u32     support_fp_rlbypass:1;
-               u32     support_vfid_in_ioframe:1;
-               u32     support_ext_io_size:1;
-               u32     support_ext_queue_depth:1;
-               u32     security_protocol_cmds_fw:1;
-               u32     support_core_affinity:1;
-               u32     support_ndrive_r1_lb:1;
-               u32     support_max_255lds:1;
-               u32     support_fastpath_wb:1;
-               u32     support_additional_msix:1;
-               u32     support_fp_remote_lun:1;
+       u32     reserved:19;
+       u32 support_pd_map_target_id:1;
+       u32     support_qd_throttling:1;
+       u32     support_fp_rlbypass:1;
+       u32     support_vfid_in_ioframe:1;
+       u32     support_ext_io_size:1;
+       u32             support_ext_queue_depth:1;
+       u32     security_protocol_cmds_fw:1;
+       u32     support_core_affinity:1;
+       u32     support_ndrive_r1_lb:1;
+       u32             support_max_255lds:1;
+       u32             support_fastpath_wb:1;
+       u32     support_additional_msix:1;
+       u32     support_fp_remote_lun:1;
 #else
-               u32     support_fp_remote_lun:1;
-               u32     support_additional_msix:1;
-               u32     support_fastpath_wb:1;
-               u32     support_max_255lds:1;
-               u32     support_ndrive_r1_lb:1;
-               u32     support_core_affinity:1;
-               u32     security_protocol_cmds_fw:1;
-               u32     support_ext_queue_depth:1;
-               u32     support_ext_io_size:1;
-               u32     support_vfid_in_ioframe:1;
-               u32     support_fp_rlbypass:1;
-               u32     support_qd_throttling:1;
-               u32     reserved:20;
+       u32     support_fp_remote_lun:1;
+       u32     support_additional_msix:1;
+       u32             support_fastpath_wb:1;
+       u32             support_max_255lds:1;
+       u32     support_ndrive_r1_lb:1;
+       u32     support_core_affinity:1;
+       u32     security_protocol_cmds_fw:1;
+       u32             support_ext_queue_depth:1;
+       u32     support_ext_io_size:1;
+       u32     support_vfid_in_ioframe:1;
+       u32     support_fp_rlbypass:1;
+       u32     support_qd_throttling:1;
+       u32     support_pd_map_target_id:1;
+       u32     reserved:19;
 #endif
        } mfi_capabilities;
        __le32          reg;
@@ -1803,6 +1878,8 @@ union megasas_frame {
 struct MR_PRIV_DEVICE {
        bool is_tm_capable;
        bool tm_busy;
+       atomic_t r1_ldio_hint;
+       u8   interface_type;
 };
 struct megasas_cmd;
 
@@ -1994,17 +2071,24 @@ struct MR_DRV_SYSTEM_INFO {
 };
 
 enum MR_PD_TYPE {
-                UNKNOWN_DRIVE = 0,
-                PARALLEL_SCSI = 1,
-                SAS_PD = 2,
-                SATA_PD = 3,
-                FC_PD = 4,
+       UNKNOWN_DRIVE = 0,
+       PARALLEL_SCSI = 1,
+       SAS_PD = 2,
+       SATA_PD = 3,
+       FC_PD = 4,
+       NVME_PD = 5,
 };
 
 /* JBOD Queue depth definitions */
 #define MEGASAS_SATA_QD        32
 #define MEGASAS_SAS_QD 64
 #define MEGASAS_DEFAULT_PD_QD  64
+#define MEGASAS_NVME_QD                32
+
+#define MR_DEFAULT_NVME_PAGE_SIZE      4096
+#define MR_DEFAULT_NVME_PAGE_SHIFT     12
+#define MR_DEFAULT_NVME_MDTS_KB                128
+#define MR_NVME_PAGE_SIZE_MASK         0x000000FF
 
 struct megasas_instance {
 
@@ -2022,6 +2106,8 @@ struct megasas_instance {
        dma_addr_t hb_host_mem_h;
        struct MR_PD_INFO *pd_info;
        dma_addr_t pd_info_h;
+       struct MR_TARGET_PROPERTIES *tgt_prop;
+       dma_addr_t tgt_prop_h;
 
        __le32 *reply_queue;
        dma_addr_t reply_queue_h;
@@ -2039,6 +2125,7 @@ struct megasas_instance {
        u32 crash_dump_drv_support;
        u32 crash_dump_app_support;
        u32 secure_jbod_support;
+       u32 support_morethan256jbod; /* FW support for more than 256 PD/JBOD */
        bool use_seqnum_jbod_fp;   /* Added for PD sequence */
        spinlock_t crashdump_lock;
 
@@ -2051,6 +2138,7 @@ struct megasas_instance {
 
        u16 max_num_sge;
        u16 max_fw_cmds;
+       u16 max_mpt_cmds;
        u16 max_mfi_cmds;
        u16 max_scsi_cmds;
        u16 ldio_threshold;
@@ -2065,6 +2153,7 @@ struct megasas_instance {
        /* used to sync fire the cmd to fw */
        spinlock_t hba_lock;
        /* used to synch producer, consumer ptrs in dpc */
+       spinlock_t stream_lock;
        spinlock_t completion_lock;
        struct dma_pool *frame_dma_pool;
        struct dma_pool *sense_dma_pool;
@@ -2087,6 +2176,11 @@ struct megasas_instance {
        atomic_t fw_outstanding;
        atomic_t ldio_outstanding;
        atomic_t fw_reset_no_pci_access;
+       atomic_t ieee_sgl;
+       atomic_t prp_sgl;
+       atomic_t sge_holes_type1;
+       atomic_t sge_holes_type2;
+       atomic_t sge_holes_type3;
 
        struct megasas_instance_template *instancet;
        struct tasklet_struct isr_tasklet;
@@ -2142,6 +2236,13 @@ struct megasas_instance {
        u8 is_rdpq;
        bool dev_handle;
        bool fw_sync_cache_support;
+       u32 mfi_frame_size;
+       bool is_ventura;
+       bool msix_combined;
+       u16 max_raid_mapsize;
+       /* preffered count to send as LDIO irrspective of FP capable.*/
+       u8  r1_ldio_hint_default;
+       u32 nvme_page_size;
 };
 struct MR_LD_VF_MAP {
        u32 size;
@@ -2230,12 +2331,12 @@ struct megasas_instance_template {
        u32 (*init_adapter)(struct megasas_instance *);
        u32 (*build_and_issue_cmd) (struct megasas_instance *,
                                    struct scsi_cmnd *);
-       int (*issue_dcmd)(struct megasas_instance *instance,
+       void (*issue_dcmd)(struct megasas_instance *instance,
                            struct megasas_cmd *cmd);
 };
 
-#define MEGASAS_IS_LOGICAL(scp)                                                \
-       ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+#define MEGASAS_IS_LOGICAL(sdev)                                       \
+       ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
 
 #define MEGASAS_DEV_INDEX(scp)                                         \
        (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +   \
@@ -2346,7 +2447,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                    struct IO_REQUEST_INFO *io_info,
                    struct RAID_CONTEXT *pRAID_Context,
                    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
 struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
 u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
@@ -2354,13 +2455,16 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
 
 __le16 get_updated_dev_handle(struct megasas_instance *instance,
-       struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+                             struct LD_LOAD_BALANCE_INFO *lbInfo,
+                             struct IO_REQUEST_INFO *in_info,
+                             struct MR_DRV_RAID_MAP_ALL *drv_map);
 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
        struct LD_LOAD_BALANCE_INFO *lbInfo);
 int megasas_get_ctrl_info(struct megasas_instance *instance);
 /* PD sequence */
 int
 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend);
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev);
 int megasas_set_crash_dump_params(struct megasas_instance *instance,
        u8 crash_buf_state);
 void megasas_free_host_crash_buffer(struct megasas_instance *instance);
@@ -2382,4 +2486,7 @@ void megasas_update_sdev_properties(struct scsi_device *sdev);
 int megasas_reset_fusion(struct Scsi_Host *shost, int reason);
 int megasas_task_abort_fusion(struct scsi_cmnd *scmd);
 int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
+u32 mega_mod64(u64 dividend, u32 divisor);
+int megasas_alloc_fusion_context(struct megasas_instance *instance);
+void megasas_free_fusion_context(struct megasas_instance *instance);
 #endif                         /*LSI_MEGARAID_SAS_H */
index d5cf15e..7ac9a9e 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/uio.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <asm/unaligned.h>
 #include <linux/fs.h>
 #include <linux/compat.h>
 #include <linux/blkdev.h>
@@ -116,8 +117,10 @@ static int megasas_ld_list_query(struct megasas_instance *instance,
 static int megasas_issue_init_mfi(struct megasas_instance *instance);
 static int megasas_register_aen(struct megasas_instance *instance,
                                u32 seq_num, u32 class_locale_word);
-static int
-megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
+static void megasas_get_pd_info(struct megasas_instance *instance,
+                               struct scsi_device *sdev);
+static int megasas_get_target_prop(struct megasas_instance *instance,
+                                  struct scsi_device *sdev);
 /*
  * PCI ID table for all supported controllers
  */
@@ -155,6 +158,12 @@ static struct pci_device_id megasas_pci_table[] = {
        /* Intruder 24 port*/
        {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
        {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
+       /* VENTURA */
+       {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+       {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
+       {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
+       {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
+       {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
        {}
 };
 
@@ -196,12 +205,12 @@ void megasas_fusion_ocr_wq(struct work_struct *work);
 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
                                         int initial);
 
-int
+void
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
        instance->instancet->fire_cmd(instance,
                cmd->frame_phys_addr, 0, instance->reg_set);
-       return 0;
+       return;
 }
 
 /**
@@ -259,6 +268,8 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
        cmd->scmd = NULL;
        cmd->frame_count = 0;
        cmd->flags = 0;
+       memset(cmd->frame, 0, instance->mfi_frame_size);
+       cmd->frame->io.context = cpu_to_le32(cmd->index);
        if (!fusion && reset_devices)
                cmd->frame->hdr.cmd = MFI_CMD_INVALID;
        list_add(&cmd->list, (&instance->cmd_pool)->next);
@@ -989,13 +1000,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
        frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
        frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
 
-       if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
-               (instance->instancet->issue_dcmd(instance, cmd))) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                dev_err(&instance->pdev->dev, "Failed from %s %d\n",
                        __func__, __LINE__);
                return DCMD_NOT_FIRED;
        }
 
+       instance->instancet->issue_dcmd(instance, cmd);
+
        return wait_and_poll(instance, cmd, instance->requestorId ?
                        MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
 }
@@ -1017,13 +1029,14 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
        int ret = 0;
        cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
 
-       if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
-               (instance->instancet->issue_dcmd(instance, cmd))) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                dev_err(&instance->pdev->dev, "Failed from %s %d\n",
                        __func__, __LINE__);
                return DCMD_NOT_FIRED;
        }
 
+       instance->instancet->issue_dcmd(instance, cmd);
+
        if (timeout) {
                ret = wait_event_timeout(instance->int_cmd_wait_q,
                                cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
@@ -1081,13 +1094,14 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
        cmd->sync_cmd = 1;
        cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
 
-       if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
-               (instance->instancet->issue_dcmd(instance, cmd))) {
+       if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
                dev_err(&instance->pdev->dev, "Failed from %s %d\n",
                        __func__, __LINE__);
                return DCMD_NOT_FIRED;
        }
 
+       instance->instancet->issue_dcmd(instance, cmd);
+
        if (timeout) {
                ret = wait_event_timeout(instance->abort_cmd_wait_q,
                                cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
@@ -1273,7 +1287,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
        u16 flags = 0;
        struct megasas_pthru_frame *pthru;
 
-       is_logical = MEGASAS_IS_LOGICAL(scp);
+       is_logical = MEGASAS_IS_LOGICAL(scp->device);
        device_id = MEGASAS_DEV_INDEX(scp);
        pthru = (struct megasas_pthru_frame *)cmd->frame;
 
@@ -1513,11 +1527,11 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd)
        case WRITE_6:
        case READ_16:
        case WRITE_16:
-               ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+               ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
                        READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
                break;
        default:
-               ret = (MEGASAS_IS_LOGICAL(cmd)) ?
+               ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
                        NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
        }
        return ret;
@@ -1537,7 +1551,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
        struct megasas_io_frame *ldio;
        struct megasas_pthru_frame *pthru;
        u32 sgcount;
-       u32 max_cmd = instance->max_fw_cmds;
+       u16 max_cmd = instance->max_fw_cmds;
 
        dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
        dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
@@ -1662,7 +1676,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        /* Check for an mpio path and adjust behavior */
        if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
                if (megasas_check_mpio_paths(instance, scmd) ==
-                   (DID_RESET << 16)) {
+                   (DID_REQUEUE << 16)) {
                        return SCSI_MLQUEUE_HOST_BUSY;
                } else {
                        scmd->result = DID_NO_CONNECT << 16;
@@ -1693,15 +1707,16 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 
        scmd->result = 0;
 
-       if (MEGASAS_IS_LOGICAL(scmd) &&
+       if (MEGASAS_IS_LOGICAL(scmd->device) &&
            (scmd->device->id >= instance->fw_supported_vd_count ||
                scmd->device->lun)) {
                scmd->result = DID_BAD_TARGET << 16;
                goto out_done;
        }
 
-       if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
-               (!instance->fw_sync_cache_support)) {
+       if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
+           MEGASAS_IS_LOGICAL(scmd->device) &&
+           (!instance->fw_sync_cache_support)) {
                scmd->result = DID_OK << 16;
                goto out_done;
        }
@@ -1728,16 +1743,21 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
 }
 
 /*
-* megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
+* megasas_set_dynamic_target_properties -
+* Device property set by driver may not be static and it is required to be
+* updated after OCR
+*
+* set tm_capable.
+* set dma alignment (only for eedp protection enable vd).
 *
 * @sdev: OS provided scsi device
 *
 * Returns void
 */
-void megasas_update_sdev_properties(struct scsi_device *sdev)
+void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
 {
-       u16 pd_index = 0;
-       u32 device_id, ld;
+       u16 pd_index = 0, ld;
+       u32 device_id;
        struct megasas_instance *instance;
        struct fusion_context *fusion;
        struct MR_PRIV_DEVICE *mr_device_priv_data;
@@ -1749,67 +1769,129 @@ void megasas_update_sdev_properties(struct scsi_device *sdev)
        fusion = instance->ctrl_context;
        mr_device_priv_data = sdev->hostdata;
 
-       if (!fusion)
+       if (!fusion || !mr_device_priv_data)
                return;
 
-       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
-               instance->use_seqnum_jbod_fp) {
-               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
-                       sdev->id;
-               pd_sync = (void *)fusion->pd_seq_sync
-                               [(instance->pd_seq_map_id - 1) & 1];
-               mr_device_priv_data->is_tm_capable =
-                       pd_sync->seq[pd_index].capability.tmCapable;
-       } else {
+       if (MEGASAS_IS_LOGICAL(sdev)) {
                device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
                                        + sdev->id;
                local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
                ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
+               if (ld >= instance->fw_supported_vd_count)
+                       return;
                raid = MR_LdRaidGet(ld, local_map_ptr);
 
                if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
                blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+
                mr_device_priv_data->is_tm_capable =
                        raid->capability.tmCapable;
+       } else if (instance->use_seqnum_jbod_fp) {
+               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
+                       sdev->id;
+               pd_sync = (void *)fusion->pd_seq_sync
+                               [(instance->pd_seq_map_id - 1) & 1];
+               mr_device_priv_data->is_tm_capable =
+                       pd_sync->seq[pd_index].capability.tmCapable;
        }
 }
 
-static void megasas_set_device_queue_depth(struct scsi_device *sdev)
+/*
+ * megasas_set_nvme_device_properties -
+ * set nomerges=2
+ * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
+ * set maximum io transfer = MDTS of NVME device provided by MR firmware.
+ *
+ * MR firmware provides value in KB. Caller of this function converts
+ * kb into bytes.
+ *
+ * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
+ * MR firmware provides value 128 as (32 * 4K) = 128K.
+ *
+ * @sdev:                              scsi device
+ * @max_io_size:                               maximum io transfer size
+ *
+ */
+static inline void
+megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
 {
-       u16                             pd_index = 0;
-       int             ret = DCMD_FAILED;
        struct megasas_instance *instance;
+       u32 mr_nvme_pg_size;
 
-       instance = megasas_lookup_instance(sdev->host->host_no);
+       instance = (struct megasas_instance *)sdev->host->hostdata;
+       mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+                               MR_DEFAULT_NVME_PAGE_SIZE);
 
-       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
-               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+       blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
 
-               if (instance->pd_info) {
-                       mutex_lock(&instance->hba_mutex);
-                       ret = megasas_get_pd_info(instance, pd_index);
-                       mutex_unlock(&instance->hba_mutex);
-               }
+       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
+       blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
+}
 
-               if (ret != DCMD_SUCCESS)
-                       return;
 
-               if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
+/*
+ * megasas_set_static_target_properties -
+ * Device property set by driver are static and it is not required to be
+ * updated after OCR.
+ *
+ * set io timeout
+ * set device queue depth
+ * set nvme device properties. see - megasas_set_nvme_device_properties
+ *
+ * @sdev:                              scsi device
+ * @is_target_prop                     true, if fw provided target properties.
+ */
+static void megasas_set_static_target_properties(struct scsi_device *sdev,
+                                                bool is_target_prop)
+{
+       u16     target_index = 0;
+       u8 interface_type;
+       u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
+       u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
+       u32 tgt_device_qd;
+       struct megasas_instance *instance;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
 
-                       switch (instance->pd_list[pd_index].interface) {
-                       case SAS_PD:
-                               scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
-                               break;
+       instance = megasas_lookup_instance(sdev->host->host_no);
+       mr_device_priv_data = sdev->hostdata;
+       interface_type  = mr_device_priv_data->interface_type;
 
-                       case SATA_PD:
-                               scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
-                               break;
+       /*
+        * The RAID firmware may require extended timeouts.
+        */
+       blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
 
-                       default:
-                               scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
-                       }
-               }
+       target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
+
+       switch (interface_type) {
+       case SAS_PD:
+               device_qd = MEGASAS_SAS_QD;
+               break;
+       case SATA_PD:
+               device_qd = MEGASAS_SATA_QD;
+               break;
+       case NVME_PD:
+               device_qd = MEGASAS_NVME_QD;
+               break;
+       }
+
+       if (is_target_prop) {
+               tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
+               if (tgt_device_qd &&
+                   (tgt_device_qd <= instance->host->can_queue))
+                       device_qd = tgt_device_qd;
+
+               /* max_io_size_kb will be set to non zero for
+                * nvme based vd and syspd.
+                */
+               max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
        }
+
+       if (instance->nvme_page_size && max_io_size_kb)
+               megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
+
+       scsi_change_queue_depth(sdev, device_qd);
+
 }
 
 
@@ -1817,11 +1899,12 @@ static int megasas_slave_configure(struct scsi_device *sdev)
 {
        u16 pd_index = 0;
        struct megasas_instance *instance;
+       int ret_target_prop = DCMD_FAILED;
+       bool is_target_prop = false;
 
        instance = megasas_lookup_instance(sdev->host->host_no);
        if (instance->pd_list_not_supported) {
-               if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
-                       sdev->type == TYPE_DISK) {
+               if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
                        pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
                                sdev->id;
                        if (instance->pd_list[pd_index].driveState !=
@@ -1829,14 +1912,25 @@ static int megasas_slave_configure(struct scsi_device *sdev)
                                return -ENXIO;
                }
        }
-       megasas_set_device_queue_depth(sdev);
-       megasas_update_sdev_properties(sdev);
 
-       /*
-        * The RAID firmware may require extended timeouts.
+       mutex_lock(&instance->hba_mutex);
+       /* Send DCMD to Firmware and cache the information */
+       if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
+               megasas_get_pd_info(instance, sdev);
+
+       /* Some ventura firmware may not have instance->nvme_page_size set.
+        * Do not send MR_DCMD_DRV_GET_TARGET_PROP
         */
-       blk_queue_rq_timeout(sdev->request_queue,
-               scmd_timeout * HZ);
+       if ((instance->tgt_prop) && (instance->nvme_page_size))
+               ret_target_prop = megasas_get_target_prop(instance, sdev);
+
+       is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
+       megasas_set_static_target_properties(sdev, is_target_prop);
+
+       mutex_unlock(&instance->hba_mutex);
+
+       /* This sdev property may change post OCR */
+       megasas_set_dynamic_target_properties(sdev);
 
        return 0;
 }
@@ -1848,7 +1942,7 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
        struct MR_PRIV_DEVICE *mr_device_priv_data;
 
        instance = megasas_lookup_instance(sdev->host->host_no);
-       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+       if (!MEGASAS_IS_LOGICAL(sdev)) {
                /*
                 * Open the OS scan to the SYSTEM PD
                 */
@@ -2483,7 +2577,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
                                                struct megasas_cmd, list);
                        list_del_init(&reset_cmd->list);
                        if (reset_cmd->scmd) {
-                               reset_cmd->scmd->result = DID_RESET << 16;
+                               reset_cmd->scmd->result = DID_REQUEUE << 16;
                                dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
                                        reset_index, reset_cmd,
                                        reset_cmd->scmd->cmnd[0]);
@@ -2650,6 +2744,24 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
        return BLK_EH_RESET_TIMER;
 }
 
+/**
+ * megasas_dump_frame -        This function will dump MPT/MFI frame
+ */
+static inline void
+megasas_dump_frame(void *mpi_request, int sz)
+{
+       int i;
+       __le32 *mfp = (__le32 *)mpi_request;
+
+       printk(KERN_INFO "IO request frame:\n\t");
+       for (i = 0; i < sz / sizeof(__le32); i++) {
+               if (i && ((i % 8) == 0))
+                       printk("\n\t");
+               printk("%08x ", le32_to_cpu(mfp[i]));
+       }
+       printk("\n");
+}
+
 /**
  * megasas_reset_bus_host -    Bus & host reset handler entry point
  */
@@ -2660,12 +2772,26 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
 
        instance = (struct megasas_instance *)scmd->device->host->hostdata;
 
+       scmd_printk(KERN_INFO, scmd,
+               "Controller reset is requested due to IO timeout\n"
+               "SCSI command pointer: (%p)\t SCSI host state: %d\t"
+               " SCSI host busy: %d\t FW outstanding: %d\n",
+               scmd, scmd->device->host->shost_state,
+               atomic_read((atomic_t *)&scmd->device->host->host_busy),
+               atomic_read(&instance->fw_outstanding));
+
        /*
         * First wait for all commands to complete
         */
-       if (instance->ctrl_context)
-               ret = megasas_reset_fusion(scmd->device->host, 1);
-       else
+       if (instance->ctrl_context) {
+               struct megasas_cmd_fusion *cmd;
+               cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
+               if (cmd)
+                       megasas_dump_frame(cmd->io_request,
+                               sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+               ret = megasas_reset_fusion(scmd->device->host,
+                               SCSIIO_TIMEOUT_OCR);
+       } else
                ret = megasas_generic_reset(scmd);
 
        return ret;
@@ -3343,7 +3469,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
 {
        struct megasas_cmd *cmd;
        int i;
-       u32 max_cmd = instance->max_fw_cmds;
+       u16 max_cmd = instance->max_fw_cmds;
        u32 defer_index;
        unsigned long flags;
 
@@ -3719,7 +3845,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
 {
        int i;
-       u32 max_cmd = instance->max_mfi_cmds;
+       u16 max_cmd = instance->max_mfi_cmds;
        struct megasas_cmd *cmd;
 
        if (!instance->frame_dma_pool)
@@ -3763,9 +3889,8 @@ static void megasas_teardown_frame_pool(struct megasas_instance *instance)
 static int megasas_create_frame_pool(struct megasas_instance *instance)
 {
        int i;
-       u32 max_cmd;
+       u16 max_cmd;
        u32 sge_sz;
-       u32 total_sz;
        u32 frame_count;
        struct megasas_cmd *cmd;
 
@@ -3793,12 +3918,13 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
         * Total 192 byte (3 MFI frame of 64 byte)
         */
        frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
-       total_sz = MEGAMFI_FRAME_SIZE * frame_count;
+       instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
        /*
         * Use DMA pool facility provided by PCI layer
         */
        instance->frame_dma_pool = pci_pool_create("megasas frame pool",
-                                       instance->pdev, total_sz, 256, 0);
+                                       instance->pdev, instance->mfi_frame_size,
+                                       256, 0);
 
        if (!instance->frame_dma_pool) {
                dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
@@ -3842,7 +3968,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
                        return -ENOMEM;
                }
 
-               memset(cmd->frame, 0, total_sz);
+               memset(cmd->frame, 0, instance->mfi_frame_size);
                cmd->frame->io.context = cpu_to_le32(cmd->index);
                cmd->frame->io.pad_0 = 0;
                if (!instance->ctrl_context && reset_devices)
@@ -3897,7 +4023,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
 {
        int i;
        int j;
-       u32 max_cmd;
+       u16 max_cmd;
        struct megasas_cmd *cmd;
        struct fusion_context *fusion;
 
@@ -3974,18 +4100,22 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
                return INITIATE_OCR;
 }
 
-static int
-megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
+static void
+megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
 {
        int ret;
        struct megasas_cmd *cmd;
        struct megasas_dcmd_frame *dcmd;
 
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
+       u16 device_id = 0;
+
+       device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
        cmd = megasas_get_cmd(instance);
 
        if (!cmd) {
                dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
-               return -ENOMEM;
+               return;
        }
 
        dcmd = &cmd->frame->dcmd;
@@ -4012,7 +4142,9 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
 
        switch (ret) {
        case DCMD_SUCCESS:
-               instance->pd_list[device_id].interface =
+               mr_device_priv_data = sdev->hostdata;
+               le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
+               mr_device_priv_data->interface_type =
                                instance->pd_info->state.ddf.pdType.intf;
                break;
 
@@ -4039,7 +4171,7 @@ megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
        if (ret != DCMD_TIMEOUT)
                megasas_return_cmd(instance, cmd);
 
-       return ret;
+       return;
 }
 /*
  * megasas_get_pd_list_info -  Returns FW's pd_list structure
@@ -4418,8 +4550,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
 {
        struct fusion_context *fusion;
-       u32 old_map_sz;
-       u32 new_map_sz;
+       u32 ventura_map_sz = 0;
 
        fusion = instance->ctrl_context;
        /* For MFI based controllers return dummy success */
@@ -4449,21 +4580,27 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
                instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
                "Legacy(64 VD) firmware");
 
-       old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
-                               (sizeof(struct MR_LD_SPAN_MAP) *
-                               (instance->fw_supported_vd_count - 1));
-       new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
-       fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
-                               (sizeof(struct MR_LD_SPAN_MAP) *
-                               (instance->drv_supported_vd_count - 1));
-
-       fusion->max_map_sz = max(old_map_sz, new_map_sz);
+       if (instance->max_raid_mapsize) {
+               ventura_map_sz = instance->max_raid_mapsize *
+                                               MR_MIN_MAP_SIZE; /* 64k */
+               fusion->current_map_sz = ventura_map_sz;
+               fusion->max_map_sz = ventura_map_sz;
+       } else {
+               fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
+                                       (sizeof(struct MR_LD_SPAN_MAP) *
+                                       (instance->fw_supported_vd_count - 1));
+               fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
 
+               fusion->max_map_sz =
+                       max(fusion->old_map_sz, fusion->new_map_sz);
 
-       if (instance->supportmax256vd)
-               fusion->current_map_sz = new_map_sz;
-       else
-               fusion->current_map_sz = old_map_sz;
+               if (instance->supportmax256vd)
+                       fusion->current_map_sz = fusion->new_map_sz;
+               else
+                       fusion->current_map_sz = fusion->old_map_sz;
+       }
+       /* irrespective of FW raid maps, driver raid map is constant */
+       fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
 }
 
 /**
@@ -4533,6 +4670,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
                le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
                le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
                le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+               le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
 
                /* Update the latest Ext VD info.
                 * From Init path, store current firmware details.
@@ -4542,6 +4680,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
                megasas_update_ext_vd_details(instance);
                instance->use_seqnum_jbod_fp =
                        ctrl_info->adapterOperations3.useSeqNumJbodFP;
+               instance->support_morethan256jbod =
+                       ctrl_info->adapter_operations4.support_pd_map_target_id;
 
                /*Check whether controller is iMR or MR */
                instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
@@ -4989,13 +5129,13 @@ skip_alloc:
 static int megasas_init_fw(struct megasas_instance *instance)
 {
        u32 max_sectors_1;
-       u32 max_sectors_2;
-       u32 tmp_sectors, msix_enable, scratch_pad_2;
+       u32 max_sectors_2, tmp_sectors, msix_enable;
+       u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
        resource_size_t base_addr;
        struct megasas_register_set __iomem *reg_set;
        struct megasas_ctrl_info *ctrl_info = NULL;
        unsigned long bar_list;
-       int i, loop, fw_msix_count = 0;
+       int i, j, loop, fw_msix_count = 0;
        struct IOV_111 *iovPtr;
        struct fusion_context *fusion;
 
@@ -5020,34 +5160,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
        reg_set = instance->reg_set;
 
-       switch (instance->pdev->device) {
-       case PCI_DEVICE_ID_LSI_FUSION:
-       case PCI_DEVICE_ID_LSI_PLASMA:
-       case PCI_DEVICE_ID_LSI_INVADER:
-       case PCI_DEVICE_ID_LSI_FURY:
-       case PCI_DEVICE_ID_LSI_INTRUDER:
-       case PCI_DEVICE_ID_LSI_INTRUDER_24:
-       case PCI_DEVICE_ID_LSI_CUTLASS_52:
-       case PCI_DEVICE_ID_LSI_CUTLASS_53:
+       if (fusion)
                instance->instancet = &megasas_instance_template_fusion;
-               break;
-       case PCI_DEVICE_ID_LSI_SAS1078R:
-       case PCI_DEVICE_ID_LSI_SAS1078DE:
-               instance->instancet = &megasas_instance_template_ppc;
-               break;
-       case PCI_DEVICE_ID_LSI_SAS1078GEN2:
-       case PCI_DEVICE_ID_LSI_SAS0079GEN2:
-               instance->instancet = &megasas_instance_template_gen2;
-               break;
-       case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
-       case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
-               instance->instancet = &megasas_instance_template_skinny;
-               break;
-       case PCI_DEVICE_ID_LSI_SAS1064R:
-       case PCI_DEVICE_ID_DELL_PERC5:
-       default:
-               instance->instancet = &megasas_instance_template_xscale;
-               break;
+       else {
+               switch (instance->pdev->device) {
+               case PCI_DEVICE_ID_LSI_SAS1078R:
+               case PCI_DEVICE_ID_LSI_SAS1078DE:
+                       instance->instancet = &megasas_instance_template_ppc;
+                       break;
+               case PCI_DEVICE_ID_LSI_SAS1078GEN2:
+               case PCI_DEVICE_ID_LSI_SAS0079GEN2:
+                       instance->instancet = &megasas_instance_template_gen2;
+                       break;
+               case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
+               case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
+                       instance->instancet = &megasas_instance_template_skinny;
+                       break;
+               case PCI_DEVICE_ID_LSI_SAS1064R:
+               case PCI_DEVICE_ID_DELL_PERC5:
+               default:
+                       instance->instancet = &megasas_instance_template_xscale;
+                       instance->pd_list_not_supported = 1;
+                       break;
+               }
        }
 
        if (megasas_transition_to_ready(instance, 0)) {
@@ -5066,13 +5201,13 @@ static int megasas_init_fw(struct megasas_instance *instance)
                        goto fail_ready_state;
        }
 
-       /*
-        * MSI-X host index 0 is common for all adapter.
-        * It is used for all MPT based Adapters.
-        */
-       instance->reply_post_host_index_addr[0] =
-               (u32 __iomem *)((u8 __iomem *)instance->reg_set +
-               MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+       if (instance->is_ventura) {
+               scratch_pad_3 =
+                       readl(&instance->reg_set->outbound_scratch_pad_3);
+               instance->max_raid_mapsize = ((scratch_pad_3 >>
+                       MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
+                       MR_MAX_RAID_MAP_SIZE_MASK);
+       }
 
        /* Check if MSI-X is supported while in ready state */
        msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
@@ -5092,6 +5227,9 @@ static int megasas_init_fw(struct megasas_instance *instance)
                                instance->msix_vectors = ((scratch_pad_2
                                        & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
                                        >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
+                               if (instance->msix_vectors > 16)
+                                       instance->msix_combined = true;
+
                                if (rdpq_enable)
                                        instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
                                                                1 : 0;
@@ -5125,6 +5263,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
                else
                        instance->msix_vectors = 0;
        }
+       /*
+        * MSI-X host index 0 is common for all adapter.
+        * It is used for all MPT based Adapters.
+        */
+       if (instance->msix_combined) {
+               instance->reply_post_host_index_addr[0] =
+                               (u32 *)((u8 *)instance->reg_set +
+                               MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
+       } else {
+               instance->reply_post_host_index_addr[0] =
+                       (u32 *)((u8 *)instance->reg_set +
+                       MPI2_REPLY_POST_HOST_INDEX_OFFSET);
+       }
+
        i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
        if (i < 0)
                goto fail_setup_irqs;
@@ -5155,6 +5307,18 @@ static int megasas_init_fw(struct megasas_instance *instance)
        if (instance->instancet->init_adapter(instance))
                goto fail_init_adapter;
 
+       if (instance->is_ventura) {
+               scratch_pad_4 =
+                       readl(&instance->reg_set->outbound_scratch_pad_4);
+               if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
+                       MR_DEFAULT_NVME_PAGE_SHIFT)
+                       instance->nvme_page_size =
+                               (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
+
+               dev_info(&instance->pdev->dev,
+                        "NVME page size\t: (%d)\n", instance->nvme_page_size);
+       }
+
        if (instance->msix_vectors ?
                megasas_setup_irqs_msix(instance, 1) :
                megasas_setup_irqs_ioapic(instance))
@@ -5173,13 +5337,43 @@ static int megasas_init_fw(struct megasas_instance *instance)
                (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
        if (megasas_get_pd_list(instance) < 0) {
                dev_err(&instance->pdev->dev, "failed to get PD list\n");
-               goto fail_get_pd_list;
+               goto fail_get_ld_pd_list;
        }
 
        memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+
+       /* stream detection initialization */
+       if (instance->is_ventura && fusion) {
+               fusion->stream_detect_by_ld =
+                       kzalloc(sizeof(struct LD_STREAM_DETECT *)
+                       * MAX_LOGICAL_DRIVES_EXT,
+                       GFP_KERNEL);
+               if (!fusion->stream_detect_by_ld) {
+                       dev_err(&instance->pdev->dev,
+                               "unable to allocate stream detection for pool of LDs\n");
+                       goto fail_get_ld_pd_list;
+               }
+               for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
+                       fusion->stream_detect_by_ld[i] =
+                               kmalloc(sizeof(struct LD_STREAM_DETECT),
+                               GFP_KERNEL);
+                       if (!fusion->stream_detect_by_ld[i]) {
+                               dev_err(&instance->pdev->dev,
+                                       "unable to allocate stream detect by LD\n ");
+                               for (j = 0; j < i; ++j)
+                                       kfree(fusion->stream_detect_by_ld[j]);
+                               kfree(fusion->stream_detect_by_ld);
+                               fusion->stream_detect_by_ld = NULL;
+                               goto fail_get_ld_pd_list;
+                       }
+                       fusion->stream_detect_by_ld[i]->mru_bit_map
+                               = MR_STREAM_BITMAP;
+               }
+       }
+
        if (megasas_ld_list_query(instance,
                                  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
-               megasas_get_ld_list(instance);
+               goto fail_get_ld_pd_list;
 
        /*
         * Compute the max allowed sectors per IO: The controller info has two
@@ -5296,7 +5490,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
        return 0;
 
-fail_get_pd_list:
+fail_get_ld_pd_list:
        instance->instancet->disable_intr(instance);
 fail_init_adapter:
        megasas_destroy_irqs(instance);
@@ -5309,9 +5503,11 @@ fail_ready_state:
        instance->ctrl_info = NULL;
        iounmap(instance->reg_set);
 
-      fail_ioremap:
+fail_ioremap:
        pci_release_selected_regions(instance->pdev, 1<<instance->bar);
 
+       dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+               __func__, __LINE__);
        return -EINVAL;
 }
 
@@ -5531,6 +5727,98 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
        return 0;
 }
 
+/* megasas_get_target_prop - Send DCMD with below details to firmware.
+ *
+ * This DCMD will fetch few properties of LD/system PD defined
+ * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
+ *
+ * DCMD send by drivers whenever new target is added to the OS.
+ *
+ * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
+ * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
+ *                       0 = system PD, 1 = LD.
+ * dcmd.mbox.s[1]      - TargetID for LD/system PD.
+ * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
+ *
+ * @instance:          Adapter soft state
+ * @sdev:              OS provided scsi device
+ *
+ * Returns 0 on success non-zero on failure.
+ */
+static int
+megasas_get_target_prop(struct megasas_instance *instance,
+                       struct scsi_device *sdev)
+{
+       int ret;
+       struct megasas_cmd *cmd;
+       struct megasas_dcmd_frame *dcmd;
+       u16 targetId = (sdev->channel % 2) + sdev->id;
+
+       cmd = megasas_get_cmd(instance);
+
+       if (!cmd) {
+               dev_err(&instance->pdev->dev,
+                       "Failed to get cmd %s\n", __func__);
+               return -ENOMEM;
+       }
+
+       dcmd = &cmd->frame->dcmd;
+
+       memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
+       memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+       dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
+
+       dcmd->mbox.s[1] = cpu_to_le16(targetId);
+       dcmd->cmd = MFI_CMD_DCMD;
+       dcmd->cmd_status = 0xFF;
+       dcmd->sge_count = 1;
+       dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+       dcmd->timeout = 0;
+       dcmd->pad_0 = 0;
+       dcmd->data_xfer_len =
+               cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
+       dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
+       dcmd->sgl.sge32[0].phys_addr =
+               cpu_to_le32(instance->tgt_prop_h);
+       dcmd->sgl.sge32[0].length =
+               cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
+
+       if (instance->ctrl_context && !instance->mask_interrupts)
+               ret = megasas_issue_blocked_cmd(instance,
+                                               cmd, MFI_IO_TIMEOUT_SECS);
+       else
+               ret = megasas_issue_polled(instance, cmd);
+
+       switch (ret) {
+       case DCMD_TIMEOUT:
+               switch (dcmd_timeout_ocr_possible(instance)) {
+               case INITIATE_OCR:
+                       cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+                       megasas_reset_fusion(instance->host,
+                                            MFI_IO_TIMEOUT_OCR);
+                       break;
+               case KILL_ADAPTER:
+                       megaraid_sas_kill_hba(instance);
+                       break;
+               case IGNORE_TIMEOUT:
+                       dev_info(&instance->pdev->dev,
+                                "Ignore DCMD timeout: %s %d\n",
+                                __func__, __LINE__);
+                       break;
+               }
+               break;
+
+       default:
+               megasas_return_cmd(instance, cmd);
+       }
+       if (ret != DCMD_SUCCESS)
+               dev_err(&instance->pdev->dev,
+                       "return from %s %d return value %d\n",
+                       __func__, __LINE__, ret);
+
+       return ret;
+}
+
 /**
  * megasas_start_aen - Subscribes to AEN during driver load time
  * @instance:          Adapter soft state
@@ -5714,6 +6002,12 @@ static int megasas_probe_one(struct pci_dev *pdev,
        instance->pdev = pdev;
 
        switch (instance->pdev->device) {
+       case PCI_DEVICE_ID_LSI_VENTURA:
+       case PCI_DEVICE_ID_LSI_HARPOON:
+       case PCI_DEVICE_ID_LSI_TOMCAT:
+       case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+       case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+            instance->is_ventura = true;
        case PCI_DEVICE_ID_LSI_FUSION:
        case PCI_DEVICE_ID_LSI_PLASMA:
        case PCI_DEVICE_ID_LSI_INVADER:
@@ -5723,21 +6017,17 @@ static int megasas_probe_one(struct pci_dev *pdev,
        case PCI_DEVICE_ID_LSI_CUTLASS_52:
        case PCI_DEVICE_ID_LSI_CUTLASS_53:
        {
-               instance->ctrl_context_pages =
-                       get_order(sizeof(struct fusion_context));
-               instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
-                               instance->ctrl_context_pages);
-               if (!instance->ctrl_context) {
-                       dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
-                              "memory for Fusion context info\n");
+               if (megasas_alloc_fusion_context(instance)) {
+                       megasas_free_fusion_context(instance);
                        goto fail_alloc_dma_buf;
                }
                fusion = instance->ctrl_context;
-               memset(fusion, 0,
-                       ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
+
                if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
                        (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
                        fusion->adapter_type = THUNDERBOLT_SERIES;
+               else if (instance->is_ventura)
+                       fusion->adapter_type = VENTURA_SERIES;
                else
                        fusion->adapter_type = INVADER_SERIES;
        }
@@ -5799,9 +6089,17 @@ static int megasas_probe_one(struct pci_dev *pdev,
                instance->pd_info = pci_alloc_consistent(pdev,
                        sizeof(struct MR_PD_INFO), &instance->pd_info_h);
 
+               instance->pd_info = pci_alloc_consistent(pdev,
+                       sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+               instance->tgt_prop = pci_alloc_consistent(pdev,
+                       sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+
                if (!instance->pd_info)
                        dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
 
+               if (!instance->tgt_prop)
+                       dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+
                instance->crash_dump_buf = pci_alloc_consistent(pdev,
                                                CRASH_DMA_BUF_SIZE,
                                                &instance->crash_dump_h);
@@ -5823,6 +6121,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 
        spin_lock_init(&instance->mfi_pool_lock);
        spin_lock_init(&instance->hba_lock);
+       spin_lock_init(&instance->stream_lock);
        spin_lock_init(&instance->completion_lock);
 
        mutex_init(&instance->reset_mutex);
@@ -5945,6 +6244,10 @@ fail_alloc_dma_buf:
                pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
                                        instance->pd_info,
                                        instance->pd_info_h);
+       if (instance->tgt_prop)
+               pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+                                       instance->tgt_prop,
+                                       instance->tgt_prop_h);
        if (instance->producer)
                pci_free_consistent(pdev, sizeof(u32), instance->producer,
                                    instance->producer_h);
@@ -6217,6 +6520,10 @@ fail_init_mfi:
                pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
                                        instance->pd_info,
                                        instance->pd_info_h);
+       if (instance->tgt_prop)
+               pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+                                       instance->tgt_prop,
+                                       instance->tgt_prop_h);
        if (instance->producer)
                pci_free_consistent(pdev, sizeof(u32), instance->producer,
                                instance->producer_h);
@@ -6330,6 +6637,14 @@ skip_firing_dcmds:
        if (instance->msix_vectors)
                pci_free_irq_vectors(instance->pdev);
 
+       if (instance->is_ventura) {
+               for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
+                       kfree(fusion->stream_detect_by_ld[i]);
+               kfree(fusion->stream_detect_by_ld);
+               fusion->stream_detect_by_ld = NULL;
+       }
+
+
        if (instance->ctrl_context) {
                megasas_release_fusion(instance);
                        pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
@@ -6350,8 +6665,7 @@ skip_firing_dcmds:
                                        fusion->pd_seq_sync[i],
                                        fusion->pd_seq_phys[i]);
                }
-               free_pages((ulong)instance->ctrl_context,
-                       instance->ctrl_context_pages);
+               megasas_free_fusion_context(instance);
        } else {
                megasas_release_mfi(instance);
                pci_free_consistent(pdev, sizeof(u32),
@@ -6367,11 +6681,14 @@ skip_firing_dcmds:
        if (instance->evt_detail)
                pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
                                instance->evt_detail, instance->evt_detail_h);
-
        if (instance->pd_info)
                pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
                                        instance->pd_info,
                                        instance->pd_info_h);
+       if (instance->tgt_prop)
+               pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+                                       instance->tgt_prop,
+                                       instance->tgt_prop_h);
        if (instance->vf_affiliation)
                pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
                                    sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6570,6 +6887,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
                                               MFI_FRAME_SGL64 |
                                               MFI_FRAME_SENSE64));
 
+       if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) {
+               if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
+                       megasas_return_cmd(instance, cmd);
+                       return -1;
+               }
+       }
+
        if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
                error = megasas_set_crash_dump_params_ioctl(cmd);
                megasas_return_cmd(instance, cmd);
@@ -6678,7 +7002,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
                sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
                                ioc->sense_off);
 
-               if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
+               if (copy_to_user((void __user *)((unsigned long)
+                                get_unaligned((unsigned long *)sense_ptr)),
                                 sense, ioc->sense_len)) {
                        dev_err(&instance->pdev->dev, "Failed to copy out to user "
                                        "sense data\n");
@@ -7047,6 +7372,13 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
                megasas_sysfs_set_dbg_lvl);
 
+static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
+{
+       sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
+       scsi_remove_device(sdev);
+       scsi_device_put(sdev);
+}
+
 static void
 megasas_aen_polling(struct work_struct *work)
 {
@@ -7151,10 +7483,8 @@ megasas_aen_polling(struct work_struct *work)
                                        else
                                                scsi_device_put(sdev1);
                                } else {
-                                       if (sdev1) {
-                                               scsi_remove_device(sdev1);
-                                               scsi_device_put(sdev1);
-                                       }
+                                       if (sdev1)
+                                               megasas_remove_scsi_device(sdev1);
                                }
                        }
                }
@@ -7171,10 +7501,8 @@ megasas_aen_polling(struct work_struct *work)
                                        else
                                                scsi_device_put(sdev1);
                                } else {
-                                       if (sdev1) {
-                                               scsi_remove_device(sdev1);
-                                               scsi_device_put(sdev1);
-                                       }
+                                       if (sdev1)
+                                               megasas_remove_scsi_device(sdev1);
                                }
                        }
                }
index f237d00..62affa7 100644 (file)
@@ -77,7 +77,6 @@ MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
 #endif
 #define TRUE 1
 
-#define SPAN_DEBUG 0
 #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize)
 #define SPAN_ROW_DATA_SIZE(map_, ld, index_)   (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize)
 #define SPAN_INVALID  0xff
@@ -155,12 +154,17 @@ __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
        return map->raidMap.devHndlInfo[pd].curDevHdl;
 }
 
+static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
+{
+       return map->raidMap.devHndlInfo[pd].interfaceType;
+}
+
 u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
 {
        return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
 }
 
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
+u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
 {
        return map->raidMap.ldTgtIdToLd[ldTgtId];
 }
@@ -179,18 +183,108 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
        struct fusion_context *fusion = instance->ctrl_context;
        struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
        struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
-       int i;
+       int i, j;
        u16 ld_count;
+       struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
+       struct MR_FW_RAID_MAP_EXT *fw_map_ext;
+       struct MR_RAID_MAP_DESC_TABLE *desc_table;
 
 
        struct MR_DRV_RAID_MAP_ALL *drv_map =
                        fusion->ld_drv_map[(instance->map_id & 1)];
        struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+       void *raid_map_data = NULL;
+
+       memset(drv_map, 0, fusion->drv_map_sz);
+       memset(pDrvRaidMap->ldTgtIdToLd,
+              0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
+
+       if (instance->max_raid_mapsize) {
+               fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+               desc_table =
+               (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
+               if (desc_table != fw_map_dyn->raid_map_desc_table)
+                       dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
+                               desc_table, fw_map_dyn->raid_map_desc_table);
+
+               ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
+               pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+               pDrvRaidMap->fpPdIoTimeoutSec =
+                       fw_map_dyn->fp_pd_io_timeout_sec;
+               pDrvRaidMap->totalSize =
+                       cpu_to_le32(sizeof(struct MR_DRV_RAID_MAP_ALL));
+               /* point to actual data starting point*/
+               raid_map_data = (void *)fw_map_dyn +
+                       le32_to_cpu(fw_map_dyn->desc_table_offset) +
+                       le32_to_cpu(fw_map_dyn->desc_table_size);
+
+               for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
+                       switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
+                       case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
+                               fw_map_dyn->dev_hndl_info =
+                               (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+                               memcpy(pDrvRaidMap->devHndlInfo,
+                                       fw_map_dyn->dev_hndl_info,
+                                       sizeof(struct MR_DEV_HANDLE_INFO) *
+                                       le32_to_cpu(desc_table->raid_map_desc_elements));
+                       break;
+                       case RAID_MAP_DESC_TYPE_TGTID_INFO:
+                               fw_map_dyn->ld_tgt_id_to_ld =
+                                       (u16 *)(raid_map_data +
+                                       le32_to_cpu(desc_table->raid_map_desc_offset));
+                               for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
+                                       pDrvRaidMap->ldTgtIdToLd[j] =
+                                               le16_to_cpu(fw_map_dyn->ld_tgt_id_to_ld[j]);
+                               }
+                       break;
+                       case RAID_MAP_DESC_TYPE_ARRAY_INFO:
+                               fw_map_dyn->ar_map_info =
+                                       (struct MR_ARRAY_INFO *)
+                                       (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
+                               memcpy(pDrvRaidMap->arMapInfo,
+                                      fw_map_dyn->ar_map_info,
+                                      sizeof(struct MR_ARRAY_INFO) *
+                                      le32_to_cpu(desc_table->raid_map_desc_elements));
+                       break;
+                       case RAID_MAP_DESC_TYPE_SPAN_INFO:
+                               fw_map_dyn->ld_span_map =
+                                       (struct MR_LD_SPAN_MAP *)
+                                       (raid_map_data +
+                                       le32_to_cpu(desc_table->raid_map_desc_offset));
+                               memcpy(pDrvRaidMap->ldSpanMap,
+                                      fw_map_dyn->ld_span_map,
+                                      sizeof(struct MR_LD_SPAN_MAP) *
+                                      le32_to_cpu(desc_table->raid_map_desc_elements));
+                       break;
+                       default:
+                               dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
+                                       fw_map_dyn->desc_table_num_elements);
+                       }
+                       ++desc_table;
+               }
+
+       } else if (instance->supportmax256vd) {
+               fw_map_ext =
+                       (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+               ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
+               if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
+                       dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
+                       return;
+               }
+
+               pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
+               pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
+               for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
+                       pDrvRaidMap->ldTgtIdToLd[i] =
+                               (u16)fw_map_ext->ldTgtIdToLd[i];
+               memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
+                      sizeof(struct MR_LD_SPAN_MAP) * ld_count);
+               memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
+                      sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
+               memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
+                      sizeof(struct MR_DEV_HANDLE_INFO) *
+                      MAX_RAIDMAP_PHYSICAL_DEVICES);
 
-       if (instance->supportmax256vd) {
-               memcpy(fusion->ld_drv_map[instance->map_id & 1],
-                       fusion->ld_map[instance->map_id & 1],
-                       fusion->current_map_sz);
                /* New Raid map will not set totalSize, so keep expected value
                 * for legacy code in ValidateMapInfo
                 */
@@ -201,50 +295,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
                        fusion->ld_map[(instance->map_id & 1)];
                pFwRaidMap = &fw_map_old->raidMap;
                ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
-
-#if VD_EXT_DEBUG
-               for (i = 0; i < ld_count; i++) {
-                       dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
-                               "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
-                               instance->unique_id, i,
-                               fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
-                               fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
-                               fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
-               }
-#endif
-
-               memset(drv_map, 0, fusion->drv_map_sz);
                pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
                pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
                pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
                for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
                        pDrvRaidMap->ldTgtIdToLd[i] =
                                (u8)pFwRaidMap->ldTgtIdToLd[i];
-               for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
-                       i < MAX_LOGICAL_DRIVES_EXT; i++)
-                       pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
                for (i = 0; i < ld_count; i++) {
                        pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
-#if VD_EXT_DEBUG
-                       dev_dbg(&instance->pdev->dev,
-                               "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
-                               "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
-                               "size 0x%x\n", i, i,
-                               pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
-                               pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
-                               (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
-                       dev_dbg(&instance->pdev->dev,
-                               "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
-                               "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
-                               "size 0x%x\n", i, i,
-                               pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
-                               pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
-                               (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
-                       dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
-                               "raid map %p LD RAID MAP %p/%p\n", drv_map,
-                               pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
-                               &pDrvRaidMap->ldSpanMap[i].ldRaid);
-#endif
                }
                memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
                        sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
@@ -265,7 +323,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
        struct LD_LOAD_BALANCE_INFO *lbInfo;
        PLD_SPAN_INFO ldSpanInfo;
        struct MR_LD_RAID         *raid;
-       u16 ldCount, num_lds;
+       u16 num_lds, i;
        u16 ld;
        u32 expected_size;
 
@@ -279,7 +337,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
        lbInfo = fusion->load_balance_info;
        ldSpanInfo = fusion->log_to_span;
 
-       if (instance->supportmax256vd)
+       if (instance->max_raid_mapsize)
+               expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
+       else if (instance->supportmax256vd)
                expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
        else
                expected_size =
@@ -287,8 +347,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
                        (sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
 
        if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
-               dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
-                      (unsigned int) expected_size);
+               dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
+                       le32_to_cpu(pDrvRaidMap->totalSize));
+               dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
+                       (unsigned int)expected_size);
                dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
                        (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
                        le32_to_cpu(pDrvRaidMap->totalSize));
@@ -298,15 +360,23 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
        if (instance->UnevenSpanSupport)
                mr_update_span_set(drv_map, ldSpanInfo);
 
-       mr_update_load_balance_params(drv_map, lbInfo);
+       if (lbInfo)
+               mr_update_load_balance_params(drv_map, lbInfo);
 
        num_lds = le16_to_cpu(drv_map->raidMap.ldCount);
 
        /*Convert Raid capability values to CPU arch */
-       for (ldCount = 0; ldCount < num_lds; ldCount++) {
-               ld = MR_TargetIdToLdGet(ldCount, drv_map);
+       for (i = 0; (num_lds > 0) && (i < MAX_LOGICAL_DRIVES_EXT); i++) {
+               ld = MR_TargetIdToLdGet(i, drv_map);
+
+               /* For non existing VDs, iterate to next VD*/
+               if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
+                       continue;
+
                raid = MR_LdRaidGet(ld, drv_map);
                le32_to_cpus((u32 *)&raid->capability);
+
+               num_lds--;
        }
 
        return 1;
@@ -345,91 +415,6 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
        return SPAN_INVALID;
 }
 
-/*
-******************************************************************************
-*
-* Function to print info about span set created in driver from FW raid map
-*
-* Inputs :
-* map    - LD map
-* ldSpanInfo - ldSpanInfo per HBA instance
-*/
-#if SPAN_DEBUG
-static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
-       PLD_SPAN_INFO ldSpanInfo)
-{
-
-       u8   span;
-       u32    element;
-       struct MR_LD_RAID *raid;
-       LD_SPAN_SET *span_set;
-       struct MR_QUAD_ELEMENT    *quad;
-       int ldCount;
-       u16 ld;
-
-       for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
-               ld = MR_TargetIdToLdGet(ldCount, map);
-                       if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1))
-                               continue;
-               raid = MR_LdRaidGet(ld, map);
-               dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
-                       ld, raid->spanDepth);
-               for (span = 0; span < raid->spanDepth; span++)
-                       dev_dbg(&instance->pdev->dev, "Span=%x,"
-                       " number of quads=%x\n", span,
-                       le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
-                       block_span_info.noElements));
-               for (element = 0; element < MAX_QUAD_DEPTH; element++) {
-                       span_set = &(ldSpanInfo[ld].span_set[element]);
-                       if (span_set->span_row_data_width == 0)
-                               break;
-
-                       dev_dbg(&instance->pdev->dev, "Span Set %x:"
-                               "width=%x, diff=%x\n", element,
-                               (unsigned int)span_set->span_row_data_width,
-                               (unsigned int)span_set->diff);
-                       dev_dbg(&instance->pdev->dev, "logical LBA"
-                               "start=0x%08lx, end=0x%08lx\n",
-                               (long unsigned int)span_set->log_start_lba,
-                               (long unsigned int)span_set->log_end_lba);
-                       dev_dbg(&instance->pdev->dev, "span row start=0x%08lx,"
-                               " end=0x%08lx\n",
-                               (long unsigned int)span_set->span_row_start,
-                               (long unsigned int)span_set->span_row_end);
-                       dev_dbg(&instance->pdev->dev, "data row start=0x%08lx,"
-                               " end=0x%08lx\n",
-                               (long unsigned int)span_set->data_row_start,
-                               (long unsigned int)span_set->data_row_end);
-                       dev_dbg(&instance->pdev->dev, "data strip start=0x%08lx,"
-                               " end=0x%08lx\n",
-                               (long unsigned int)span_set->data_strip_start,
-                               (long unsigned int)span_set->data_strip_end);
-
-                       for (span = 0; span < raid->spanDepth; span++) {
-                               if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
-                                       block_span_info.noElements) >=
-                                       element + 1) {
-                                       quad = &map->raidMap.ldSpanMap[ld].
-                                               spanBlock[span].block_span_info.
-                                               quad[element];
-                               dev_dbg(&instance->pdev->dev, "Span=%x,"
-                                       "Quad=%x, diff=%x\n", span,
-                                       element, le32_to_cpu(quad->diff));
-                               dev_dbg(&instance->pdev->dev,
-                                       "offset_in_span=0x%08lx\n",
-                                       (long unsigned int)le64_to_cpu(quad->offsetInSpan));
-                               dev_dbg(&instance->pdev->dev,
-                                       "logical start=0x%08lx, end=0x%08lx\n",
-                                       (long unsigned int)le64_to_cpu(quad->logStart),
-                                       (long unsigned int)le64_to_cpu(quad->logEnd));
-                               }
-                       }
-               }
-       }
-       return 0;
-}
-#endif
-
 /*
 ******************************************************************************
 *
@@ -543,19 +528,7 @@ static u64  get_row_from_strip(struct megasas_instance *instance,
                                else
                                        break;
                        }
-#if SPAN_DEBUG
-               dev_info(&instance->pdev->dev, "Strip 0x%llx,"
-                       "span_set_Strip 0x%llx, span_set_Row 0x%llx"
-                       "data width 0x%llx span offset 0x%x\n", strip,
-                       (unsigned long long)span_set_Strip,
-                       (unsigned long long)span_set_Row,
-                       (unsigned long long)span_set->span_row_data_width,
-                       span_offset);
-               dev_info(&instance->pdev->dev, "For strip 0x%llx"
-                       "row is 0x%llx\n", strip,
-                       (unsigned long long) span_set->data_row_start +
-                       (unsigned long long) span_set_Row + (span_offset - 1));
-#endif
+
                retval = (span_set->data_row_start + span_set_Row +
                                (span_offset - 1));
                return retval;
@@ -672,11 +645,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
                                else
                                        break;
                        }
-#if SPAN_DEBUG
-               dev_info(&instance->pdev->dev, "get_arm_from_strip:"
-                       "for ld=0x%x strip=0x%lx arm is  0x%x\n", ld,
-                       (long unsigned int)strip, (strip_offset - span_offset));
-#endif
+
                retval = (strip_offset - span_offset);
                return retval;
        }
@@ -737,16 +706,18 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
                struct MR_DRV_RAID_MAP_ALL *map)
 {
        struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
-       u32     pd, arRef;
+       u32     pd, arRef, r1_alt_pd;
        u8      physArm, span;
        u64     row;
        u8      retval = TRUE;
        u64     *pdBlock = &io_info->pdBlock;
        __le16  *pDevHandle = &io_info->devHandle;
+       u8      *pPdInterface = &io_info->pd_interface;
        u32     logArm, rowMod, armQ, arm;
        struct fusion_context *fusion;
 
        fusion = instance->ctrl_context;
+       *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
 
        /*Get row and span from io_info for Uneven Span IO.*/
        row         = io_info->start_row;
@@ -772,27 +743,46 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
        arRef       = MR_LdSpanArrayGet(ld, span, map);
        pd          = MR_ArPdGet(arRef, physArm, map);
 
-       if (pd != MR_PD_INVALID)
+       if (pd != MR_PD_INVALID) {
                *pDevHandle = MR_PdDevHandleGet(pd, map);
-       else {
-               *pDevHandle = cpu_to_le16(MR_PD_INVALID);
+               *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+               /* get second pd also for raid 1/10 fast path writes*/
+               if (instance->is_ventura &&
+                   (raid->level == 1) &&
+                   !io_info->isRead) {
+                       r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+                       if (r1_alt_pd != MR_PD_INVALID)
+                               io_info->r1_alt_dev_handle =
+                               MR_PdDevHandleGet(r1_alt_pd, map);
+               }
+       } else {
                if ((raid->level >= 5) &&
                        ((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
                        ((fusion->adapter_type == INVADER_SERIES) &&
                        (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
-                       pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+                       pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
                else if (raid->level == 1) {
                        physArm = physArm + 1;
                        pd = MR_ArPdGet(arRef, physArm, map);
-                       if (pd != MR_PD_INVALID)
+                       if (pd != MR_PD_INVALID) {
                                *pDevHandle = MR_PdDevHandleGet(pd, map);
+                               *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+                       }
                }
        }
 
        *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
-       pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
-                                       physArm;
-       io_info->span_arm = pRAID_Context->spanArm;
+       if (instance->is_ventura) {
+               ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
+                       (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+               io_info->span_arm =
+                       (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+       } else {
+               pRAID_Context->span_arm =
+                       (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+               io_info->span_arm = pRAID_Context->span_arm;
+       }
+       io_info->pd_after_lb = pd;
        return retval;
 }
 
@@ -819,16 +809,17 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
                struct MR_DRV_RAID_MAP_ALL *map)
 {
        struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
-       u32         pd, arRef;
+       u32         pd, arRef, r1_alt_pd;
        u8          physArm, span;
        u64         row;
        u8          retval = TRUE;
        u64         *pdBlock = &io_info->pdBlock;
        __le16      *pDevHandle = &io_info->devHandle;
+       u8          *pPdInterface = &io_info->pd_interface;
        struct fusion_context *fusion;
 
        fusion = instance->ctrl_context;
-
+       *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID);
 
        row =  mega_div64_32(stripRow, raid->rowDataSize);
 
@@ -867,31 +858,49 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
        arRef       = MR_LdSpanArrayGet(ld, span, map);
        pd          = MR_ArPdGet(arRef, physArm, map); /* Get the pd */
 
-       if (pd != MR_PD_INVALID)
+       if (pd != MR_PD_INVALID) {
                /* Get dev handle from Pd. */
                *pDevHandle = MR_PdDevHandleGet(pd, map);
-       else {
-               /* set dev handle as invalid. */
-               *pDevHandle = cpu_to_le16(MR_PD_INVALID);
+               *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+               /* get second pd also for raid 1/10 fast path writes*/
+               if (instance->is_ventura &&
+                   (raid->level == 1) &&
+                   !io_info->isRead) {
+                       r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
+                       if (r1_alt_pd != MR_PD_INVALID)
+                               io_info->r1_alt_dev_handle =
+                                       MR_PdDevHandleGet(r1_alt_pd, map);
+               }
+       } else {
                if ((raid->level >= 5) &&
                        ((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
                        ((fusion->adapter_type == INVADER_SERIES) &&
                        (raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
-                       pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
+                       pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
                else if (raid->level == 1) {
                        /* Get alternate Pd. */
                        physArm = physArm + 1;
                        pd = MR_ArPdGet(arRef, physArm, map);
-                       if (pd != MR_PD_INVALID)
+                       if (pd != MR_PD_INVALID) {
                                /* Get dev handle from Pd */
                                *pDevHandle = MR_PdDevHandleGet(pd, map);
+                               *pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+                       }
                }
        }
 
        *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
-       pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
-               physArm;
-       io_info->span_arm = pRAID_Context->spanArm;
+       if (instance->is_ventura) {
+               ((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
+                               (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+               io_info->span_arm =
+                               (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+       } else {
+               pRAID_Context->span_arm =
+                       (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
+               io_info->span_arm = pRAID_Context->span_arm;
+       }
+       io_info->pd_after_lb = pd;
        return retval;
 }
 
@@ -912,7 +921,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 {
        struct fusion_context *fusion;
        struct MR_LD_RAID  *raid;
-       u32         ld, stripSize, stripe_mask;
+       u32         stripSize, stripe_mask;
        u64         endLba, endStrip, endRow, start_row, start_strip;
        u64         regStart;
        u32         regSize;
@@ -924,6 +933,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
        u8          retval = 0;
        u8          startlba_span = SPAN_INVALID;
        u64 *pdBlock = &io_info->pdBlock;
+       u16         ld;
 
        ldStartBlock = io_info->ldStartBlock;
        numBlocks = io_info->numBlocks;
@@ -935,6 +945,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
 
        ld = MR_TargetIdToLdGet(ldTgtId, map);
        raid = MR_LdRaidGet(ld, map);
+       /*check read ahead bit*/
+       io_info->ra_capable = raid->capability.ra_capable;
 
        /*
         * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
@@ -996,17 +1008,6 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                }
                io_info->start_span     = startlba_span;
                io_info->start_row      = start_row;
-#if SPAN_DEBUG
-               dev_dbg(&instance->pdev->dev, "Check Span number from %s %d"
-                       "for row 0x%llx, start strip 0x%llx end strip 0x%llx"
-                       " span 0x%x\n", __func__, __LINE__,
-                       (unsigned long long)start_row,
-                       (unsigned long long)start_strip,
-                       (unsigned long long)endStrip, startlba_span);
-               dev_dbg(&instance->pdev->dev, "start_row 0x%llx endRow 0x%llx"
-                       "Start span 0x%x\n", (unsigned long long)start_row,
-                       (unsigned long long)endRow, startlba_span);
-#endif
        } else {
                start_row = mega_div64_32(start_strip, raid->rowDataSize);
                endRow    = mega_div64_32(endStrip, raid->rowDataSize);
@@ -1093,20 +1094,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                        regSize += stripSize;
        }
 
-       pRAID_Context->timeoutValue =
+       pRAID_Context->timeout_value =
                cpu_to_le16(raid->fpIoTimeoutForLd ?
                            raid->fpIoTimeoutForLd :
                            map->raidMap.fpPdIoTimeoutSec);
        if (fusion->adapter_type == INVADER_SERIES)
-               pRAID_Context->regLockFlags = (isRead) ?
+               pRAID_Context->reg_lock_flags = (isRead) ?
                        raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
-       else
-               pRAID_Context->regLockFlags = (isRead) ?
+       else if (!instance->is_ventura)
+               pRAID_Context->reg_lock_flags = (isRead) ?
                        REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
-       pRAID_Context->VirtualDiskTgtId = raid->targetId;
-       pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
-       pRAID_Context->regLockLength    = cpu_to_le32(regSize);
-       pRAID_Context->configSeqNum     = raid->seqNum;
+       pRAID_Context->virtual_disk_tgt_id = raid->targetId;
+       pRAID_Context->reg_lock_row_lba    = cpu_to_le64(regStart);
+       pRAID_Context->reg_lock_length    = cpu_to_le32(regSize);
+       pRAID_Context->config_seq_num   = raid->seqNum;
        /* save pointer to raid->LUN array */
        *raidLUN = raid->LUN;
 
@@ -1122,7 +1123,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                                        ref_in_start_stripe, io_info,
                                        pRAID_Context, map);
                /* If IO on an invalid Pd, then FP is not possible.*/
-               if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
+               if (io_info->devHandle == MR_DEVHANDLE_INVALID)
                        io_info->fpOkForIo = FALSE;
                return retval;
        } else if (isRead) {
@@ -1140,12 +1141,6 @@ MR_BuildRaidContext(struct megasas_instance *instance,
                                return TRUE;
                }
        }
-
-#if SPAN_DEBUG
-       /* Just for testing what arm we get for strip.*/
-       if (io_info->IoforUnevenSpan)
-               get_arm_from_strip(instance, ld, start_strip, map);
-#endif
        return TRUE;
 }
 
@@ -1259,10 +1254,6 @@ void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
                        break;
            }
        }
-#if SPAN_DEBUG
-       getSpanInfo(map, ldSpanInfo);
-#endif
-
 }
 
 void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
@@ -1293,11 +1284,12 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
 }
 
 u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
-       struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+                          struct LD_LOAD_BALANCE_INFO *lbInfo,
+                          struct IO_REQUEST_INFO *io_info,
+                          struct MR_DRV_RAID_MAP_ALL *drv_map)
 {
-       struct fusion_context *fusion;
        struct MR_LD_RAID  *raid;
-       struct MR_DRV_RAID_MAP_ALL *drv_map;
+       u16     pd1_dev_handle;
        u16     pend0, pend1, ld;
        u64     diff0, diff1;
        u8      bestArm, pd0, pd1, span, arm;
@@ -1310,9 +1302,6 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
                        >> RAID_CTX_SPANARM_SPAN_SHIFT);
        arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
 
-
-       fusion = instance->ctrl_context;
-       drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
        ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
        raid = MR_LdRaidGet(ld, drv_map);
        span_row_size = instance->UnevenSpanSupport ?
@@ -1323,47 +1312,52 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
        pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
                (arm + 1 - span_row_size) : arm + 1, drv_map);
 
-       /* get the pending cmds for the data and mirror arms */
-       pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
-       pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
+       /* Get PD1 Dev Handle */
+
+       pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
 
-       /* Determine the disk whose head is nearer to the req. block */
-       diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
-       diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
-       bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+       if (pd1_dev_handle == MR_DEVHANDLE_INVALID) {
+               bestArm = arm;
+       } else {
+               /* get the pending cmds for the data and mirror arms */
+               pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+               pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
 
-       if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
-                       (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
-               bestArm ^= 1;
+               /* Determine the disk whose head is nearer to the req. block */
+               diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+               diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+               bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
+
+               /* Make balance count from 16 to 4 to
+                *  keep driver in sync with Firmware
+                */
+               if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
+                   (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
+                       bestArm ^= 1;
+
+               /* Update the last accessed block on the correct pd */
+               io_info->span_arm =
+                       (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+               io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+       }
 
-       /* Update the last accessed block on the correct pd */
-       io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
        lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
-       io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
-#if SPAN_DEBUG
-       if (arm != bestArm)
-               dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
-                       "occur - span 0x%x arm 0x%x bestArm 0x%x "
-                       "io_info->span_arm 0x%x\n",
-                       span, arm, bestArm, io_info->span_arm);
-#endif
        return io_info->pd_after_lb;
 }
 
 __le16 get_updated_dev_handle(struct megasas_instance *instance,
-       struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
+                             struct LD_LOAD_BALANCE_INFO *lbInfo,
+                             struct IO_REQUEST_INFO *io_info,
+                             struct MR_DRV_RAID_MAP_ALL *drv_map)
 {
        u8 arm_pd;
        __le16 devHandle;
-       struct fusion_context *fusion;
-       struct MR_DRV_RAID_MAP_ALL *drv_map;
-
-       fusion = instance->ctrl_context;
-       drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
 
        /* get best new arm (PD ID) */
-       arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+       arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info, drv_map);
        devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+       io_info->pd_interface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
        atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
+
        return devHandle;
 }
index 24778ba..29650ba 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
 #include <linux/poll.h>
+#include <linux/vmalloc.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -181,32 +182,44 @@ inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
        struct megasas_cmd_fusion *cmd)
 {
        cmd->scmd = NULL;
-       memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
+       memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
+       cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+       cmd->cmd_completed = false;
 }
 
 /**
  * megasas_fire_cmd_fusion -   Sends command to the FW
+ * @instance:                  Adapter soft state
+ * @req_desc:                  32bit or 64bit Request descriptor
+ *
+ * Perform PCI Write. Ventura supports 32 bit Descriptor.
+ * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
  */
+
 static void
 megasas_fire_cmd_fusion(struct megasas_instance *instance,
                union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
 {
+       if (instance->is_ventura)
+               writel(le32_to_cpu(req_desc->u.low),
+                       &instance->reg_set->inbound_single_queue_port);
+       else {
 #if defined(writeq) && defined(CONFIG_64BIT)
-       u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
-                       le32_to_cpu(req_desc->u.low));
+               u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
+                               le32_to_cpu(req_desc->u.low));
 
-       writeq(req_data, &instance->reg_set->inbound_low_queue_port);
+               writeq(req_data, &instance->reg_set->inbound_low_queue_port);
 #else
-       unsigned long flags;
-
-       spin_lock_irqsave(&instance->hba_lock, flags);
-       writel(le32_to_cpu(req_desc->u.low),
-               &instance->reg_set->inbound_low_queue_port);
-       writel(le32_to_cpu(req_desc->u.high),
-               &instance->reg_set->inbound_high_queue_port);
-       mmiowb();
-       spin_unlock_irqrestore(&instance->hba_lock, flags);
+               unsigned long flags;
+               spin_lock_irqsave(&instance->hba_lock, flags);
+               writel(le32_to_cpu(req_desc->u.low),
+                       &instance->reg_set->inbound_low_queue_port);
+               writel(le32_to_cpu(req_desc->u.high),
+                       &instance->reg_set->inbound_high_queue_port);
+               mmiowb();
+               spin_unlock_irqrestore(&instance->hba_lock, flags);
 #endif
+       }
 }
 
 /**
@@ -229,7 +242,10 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
 
        reg_set = instance->reg_set;
 
-       cur_max_fw_cmds = readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
+       /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
+       if (!instance->is_ventura)
+               cur_max_fw_cmds =
+               readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
 
        if (dual_qdepth_disable || !cur_max_fw_cmds)
                cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
@@ -243,7 +259,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
 
        if (fw_boot_context == OCR_CONTEXT) {
                cur_max_fw_cmds = cur_max_fw_cmds - 1;
-               if (cur_max_fw_cmds <= instance->max_fw_cmds) {
+               if (cur_max_fw_cmds < instance->max_fw_cmds) {
                        instance->cur_can_queue =
                                cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
                                                MEGASAS_FUSION_IOCTL_CMDS);
@@ -255,7 +271,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
                instance->ldio_threshold = ldio_threshold;
 
                if (!instance->is_rdpq)
-                       instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
+                       instance->max_fw_cmds =
+                               min_t(u16, instance->max_fw_cmds, 1024);
 
                if (reset_devices)
                        instance->max_fw_cmds = min(instance->max_fw_cmds,
@@ -271,7 +288,14 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
                                (MEGASAS_FUSION_INTERNAL_CMDS +
                                MEGASAS_FUSION_IOCTL_CMDS);
                instance->cur_can_queue = instance->max_scsi_cmds;
+               instance->host->can_queue = instance->cur_can_queue;
        }
+
+       if (instance->is_ventura)
+               instance->max_mpt_cmds =
+               instance->max_fw_cmds * RAID_1_PEER_CMDS;
+       else
+               instance->max_mpt_cmds = instance->max_fw_cmds;
 }
 /**
  * megasas_free_cmds_fusion -  Free all the cmds in the free cmd pool
@@ -285,7 +309,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
        struct megasas_cmd_fusion *cmd;
 
        /* SG, Sense */
-       for (i = 0; i < instance->max_fw_cmds; i++) {
+       for (i = 0; i < instance->max_mpt_cmds; i++) {
                cmd = fusion->cmd_list[i];
                if (cmd) {
                        if (cmd->sg_frame)
@@ -329,7 +353,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 
 
        /* cmd_list */
-       for (i = 0; i < instance->max_fw_cmds; i++)
+       for (i = 0; i < instance->max_mpt_cmds; i++)
                kfree(fusion->cmd_list[i]);
 
        kfree(fusion->cmd_list);
@@ -343,7 +367,7 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 {
        int i;
-       u32 max_cmd;
+       u16 max_cmd;
        struct fusion_context *fusion;
        struct megasas_cmd_fusion *cmd;
 
@@ -353,7 +377,8 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
 
        fusion->sg_dma_pool =
                        pci_pool_create("mr_sg", instance->pdev,
-                               instance->max_chain_frame_sz, 4, 0);
+                               instance->max_chain_frame_sz,
+                               MR_DEFAULT_NVME_PAGE_SIZE, 0);
        /* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
        fusion->sense_dma_pool =
                        pci_pool_create("mr_sense", instance->pdev,
@@ -381,33 +406,47 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
                        return -ENOMEM;
                }
        }
+
+       /* create sense buffer for the raid 1/10 fp */
+       for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
+               cmd = fusion->cmd_list[i];
+               cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
+                       GFP_KERNEL, &cmd->sense_phys_addr);
+               if (!cmd->sense) {
+                       dev_err(&instance->pdev->dev,
+                               "Failed from %s %d\n",  __func__, __LINE__);
+                       return -ENOMEM;
+               }
+       }
+
        return 0;
 }
 
 int
 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
 {
-       u32 max_cmd, i;
+       u32 max_mpt_cmd, i;
        struct fusion_context *fusion;
 
        fusion = instance->ctrl_context;
 
-       max_cmd = instance->max_fw_cmds;
+       max_mpt_cmd = instance->max_mpt_cmds;
 
        /*
         * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
         * Allocate the dynamic array first and then allocate individual
         * commands.
         */
-       fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) * max_cmd,
-                                               GFP_KERNEL);
+       fusion->cmd_list =
+               kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd,
+                       GFP_KERNEL);
        if (!fusion->cmd_list) {
                dev_err(&instance->pdev->dev,
                        "Failed from %s %d\n",  __func__, __LINE__);
                return -ENOMEM;
        }
 
-       for (i = 0; i < max_cmd; i++) {
+       for (i = 0; i < max_mpt_cmd; i++) {
                fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
                                              GFP_KERNEL);
                if (!fusion->cmd_list[i]) {
@@ -539,7 +578,7 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
                }
 
                fusion->rdpq_virt[i].RDPQBaseAddress =
-                       fusion->reply_frames_desc_phys[i];
+                       cpu_to_le64(fusion->reply_frames_desc_phys[i]);
 
                reply_desc = fusion->reply_frames_desc[i];
                for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
@@ -642,13 +681,14 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
         */
 
        /* SMID 0 is reserved. Set SMID/index from 1 */
-       for (i = 0; i < instance->max_fw_cmds; i++) {
+       for (i = 0; i < instance->max_mpt_cmds; i++) {
                cmd = fusion->cmd_list[i];
                offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
                memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
                cmd->index = i + 1;
                cmd->scmd = NULL;
-               cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
+               cmd->sync_cmd_idx =
+               (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
                                (i - instance->max_scsi_cmds) :
                                (u32)ULONG_MAX; /* Set to Invalid */
                cmd->instance = instance;
@@ -658,6 +698,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
                memset(cmd->io_request, 0,
                       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
                cmd->io_request_phys_addr = io_req_base_phys + offset;
+               cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
        }
 
        if (megasas_create_sg_sense_fusion(instance))
@@ -725,6 +766,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
        const char *sys_info;
        MFI_CAPABILITIES *drv_ops;
        u32 scratch_pad_2;
+       unsigned long flags;
 
        fusion = instance->ctrl_context;
 
@@ -781,6 +823,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                        MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
        IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
        IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
+       IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
        init_frame = (struct megasas_init_frame *)cmd->frame;
        memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
 
@@ -796,7 +839,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
        drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
 
        /* driver support Extended MSIX */
-       if (fusion->adapter_type == INVADER_SERIES)
+       if (fusion->adapter_type >= INVADER_SERIES)
                drv_ops->mfi_capabilities.support_additional_msix = 1;
        /* driver supports HA / Remote LUN over Fast Path interface */
        drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -813,6 +856,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
 
        drv_ops->mfi_capabilities.support_qd_throttling = 1;
+       drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
        /* Convert capability to LE32 */
        cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
@@ -850,7 +894,14 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                        break;
        }
 
-       megasas_fire_cmd_fusion(instance, &req_desc);
+       /* For Ventura also IOC INIT required 64 bit Descriptor write. */
+       spin_lock_irqsave(&instance->hba_lock, flags);
+       writel(le32_to_cpu(req_desc.u.low),
+              &instance->reg_set->inbound_low_queue_port);
+       writel(le32_to_cpu(req_desc.u.high),
+              &instance->reg_set->inbound_high_queue_port);
+       mmiowb();
+       spin_unlock_irqrestore(&instance->hba_lock, flags);
 
        wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
 
@@ -1009,11 +1060,6 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
 
        memset(ci, 0, fusion->max_map_sz);
        memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-#if VD_EXT_DEBUG
-       dev_dbg(&instance->pdev->dev,
-               "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
-               __func__, cpu_to_le32(size_map_info));
-#endif
        dcmd->cmd = MFI_CMD_DCMD;
        dcmd->cmd_status = 0xFF;
        dcmd->sge_count = 1;
@@ -1065,10 +1111,11 @@ megasas_get_map_info(struct megasas_instance *instance)
 int
 megasas_sync_map_info(struct megasas_instance *instance)
 {
-       int ret = 0, i;
+       int i;
        struct megasas_cmd *cmd;
        struct megasas_dcmd_frame *dcmd;
-       u32 size_sync_info, num_lds;
+       u16 num_lds;
+       u32 size_sync_info;
        struct fusion_context *fusion;
        struct MR_LD_TARGET_SYNC *ci = NULL;
        struct MR_DRV_RAID_MAP_ALL *map;
@@ -1134,7 +1181,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
 
        instance->instancet->issue_dcmd(instance, cmd);
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -1220,7 +1267,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
 {
        struct megasas_register_set __iomem *reg_set;
        struct fusion_context *fusion;
-       u32 max_cmd, scratch_pad_2;
+       u16 max_cmd;
+       u32 scratch_pad_2;
        int i = 0, count;
 
        fusion = instance->ctrl_context;
@@ -1229,13 +1277,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
 
        megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
 
-       /*
-        * Reduce the max supported cmds by 1. This is to ensure that the
-        * reply_q_sz (1 more than the max cmd that driver may send)
-        * does not exceed max cmds that the FW can support
-        */
-       instance->max_fw_cmds = instance->max_fw_cmds-1;
-
        /*
         * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
         */
@@ -1247,12 +1288,12 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
        fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
 
        fusion->request_alloc_sz =
-               sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
+       sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
        fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
                *(fusion->reply_q_depth);
        fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
-               (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
-                (max_cmd + 1)); /* Extra 1 for SMID 0 */
+               (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+               * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
 
        scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
        /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1302,7 +1343,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
                fusion->last_reply_idx[i] = 0;
 
        /*
-        * For fusion adapters, 3 commands for IOCTL and 5 commands
+        * For fusion adapters, 3 commands for IOCTL and 8 commands
         * for driver's internal DCMDs.
         */
        instance->max_scsi_cmds = instance->max_fw_cmds -
@@ -1331,6 +1372,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
        }
 
        instance->flag_ieee = 1;
+       instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
        fusion->fast_path_io = 0;
 
        fusion->drv_map_pages = get_order(fusion->drv_map_sz);
@@ -1388,96 +1430,348 @@ fail_alloc_mfi_cmds:
  */
 
 void
-map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
+map_cmd_status(struct fusion_context *fusion,
+               struct scsi_cmnd *scmd, u8 status, u8 ext_status,
+               u32 data_length, u8 *sense)
 {
+       u8 cmd_type;
+       int resid;
 
+       cmd_type = megasas_cmd_type(scmd);
        switch (status) {
 
        case MFI_STAT_OK:
-               cmd->scmd->result = DID_OK << 16;
+               scmd->result = DID_OK << 16;
                break;
 
        case MFI_STAT_SCSI_IO_FAILED:
        case MFI_STAT_LD_INIT_IN_PROGRESS:
-               cmd->scmd->result = (DID_ERROR << 16) | ext_status;
+               scmd->result = (DID_ERROR << 16) | ext_status;
                break;
 
        case MFI_STAT_SCSI_DONE_WITH_ERROR:
 
-               cmd->scmd->result = (DID_OK << 16) | ext_status;
+               scmd->result = (DID_OK << 16) | ext_status;
                if (ext_status == SAM_STAT_CHECK_CONDITION) {
-                       memset(cmd->scmd->sense_buffer, 0,
+                       memset(scmd->sense_buffer, 0,
                               SCSI_SENSE_BUFFERSIZE);
-                       memcpy(cmd->scmd->sense_buffer, cmd->sense,
+                       memcpy(scmd->sense_buffer, sense,
                               SCSI_SENSE_BUFFERSIZE);
-                       cmd->scmd->result |= DRIVER_SENSE << 24;
+                       scmd->result |= DRIVER_SENSE << 24;
                }
+
+               /*
+                * If the  IO request is partially completed, then MR FW will
+                * update "io_request->DataLength" field with actual number of
+                * bytes transferred.Driver will set residual bytes count in
+                * SCSI command structure.
+                */
+               resid = (scsi_bufflen(scmd) - data_length);
+               scsi_set_resid(scmd, resid);
+
+               if (resid &&
+                       ((cmd_type == READ_WRITE_LDIO) ||
+                       (cmd_type == READ_WRITE_SYSPDIO)))
+                       scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
+                               " requested/completed 0x%x/0x%x\n",
+                               status, scsi_bufflen(scmd), data_length);
                break;
 
        case MFI_STAT_LD_OFFLINE:
        case MFI_STAT_DEVICE_NOT_FOUND:
-               cmd->scmd->result = DID_BAD_TARGET << 16;
+               scmd->result = DID_BAD_TARGET << 16;
                break;
        case MFI_STAT_CONFIG_SEQ_MISMATCH:
-               cmd->scmd->result = DID_IMM_RETRY << 16;
+               scmd->result = DID_IMM_RETRY << 16;
                break;
        default:
-               dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
-               cmd->scmd->result = DID_ERROR << 16;
+               scmd->result = DID_ERROR << 16;
                break;
        }
 }
 
+/**
+ * megasas_is_prp_possible -
+ * Checks if native NVMe PRPs can be built for the IO
+ *
+ * @instance:          Adapter soft state
+ * @scmd:              SCSI command from the mid-layer
+ * @sge_count:         scatter gather element count.
+ *
+ * Returns:            true: PRPs can be built
+ *                     false: IEEE SGLs needs to be built
+ */
+static bool
+megasas_is_prp_possible(struct megasas_instance *instance,
+                       struct scsi_cmnd *scmd, int sge_count)
+{
+       struct fusion_context *fusion;
+       int i;
+       u32 data_length = 0;
+       struct scatterlist *sg_scmd;
+       bool build_prp = false;
+       u32 mr_nvme_pg_size;
+
+       mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+                               MR_DEFAULT_NVME_PAGE_SIZE);
+       fusion = instance->ctrl_context;
+       data_length = scsi_bufflen(scmd);
+       sg_scmd = scsi_sglist(scmd);
+
+       /*
+        * NVMe uses one PRP for each page (or part of a page)
+        * look at the data length - if 4 pages or less then IEEE is OK
+        * if  > 5 pages then we need to build a native SGL
+        * if > 4 and <= 5 pages, then check physical address of 1st SG entry
+        * if this first size in the page is >= the residual beyond 4 pages
+        * then use IEEE, otherwise use native SGL
+        */
+
+       if (data_length > (mr_nvme_pg_size * 5)) {
+               build_prp = true;
+       } else if ((data_length > (mr_nvme_pg_size * 4)) &&
+                       (data_length <= (mr_nvme_pg_size * 5)))  {
+               /* check if 1st SG entry size is < residual beyond 4 pages */
+               if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
+                       build_prp = true;
+       }
+
+/*
+ * Below code detects gaps/holes in IO data buffers.
+ * What does holes/gaps mean?
+ * Any SGE except first one in a SGL starts at non NVME page size
+ * aligned address OR Any SGE except last one in a SGL ends at
+ * non NVME page size boundary.
+ *
+ * Driver has already informed block layer by setting boundary rules for
+ * bio merging done at NVME page size boundary calling kernel API
+ * blk_queue_virt_boundary inside slave_config.
+ * Still there is possibility of IO coming with holes to driver because of
+ * IO merging done by IO scheduler.
+ *
+ * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
+ * IO scheduling so no IO merging.
+ *
+ * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
+ * then sending IOs with holes.
+ *
+ * Though driver can request block layer to disable IO merging by calling-
+ * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
+ * user may tune sysfs parameter- nomerges again to 0 or 1.
+ *
+ * If in future IO scheduling is enabled with SCSI BLK MQ,
+ * this algorithm to detect holes will be required in driver
+ * for SCSI BLK MQ enabled case as well.
+ *
+ *
+ */
+       scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
+               if ((i != 0) && (i != (sge_count - 1))) {
+                       if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
+                           mega_mod64(sg_dma_address(sg_scmd),
+                                      mr_nvme_pg_size)) {
+                               build_prp = false;
+                               atomic_inc(&instance->sge_holes_type1);
+                               break;
+                       }
+               }
+
+               if ((sge_count > 1) && (i == 0)) {
+                       if ((mega_mod64((sg_dma_address(sg_scmd) +
+                                       sg_dma_len(sg_scmd)),
+                                       mr_nvme_pg_size))) {
+                               build_prp = false;
+                               atomic_inc(&instance->sge_holes_type2);
+                               break;
+                       }
+               }
+
+               if ((sge_count > 1) && (i == (sge_count - 1))) {
+                       if (mega_mod64(sg_dma_address(sg_scmd),
+                                      mr_nvme_pg_size)) {
+                               build_prp = false;
+                               atomic_inc(&instance->sge_holes_type3);
+                               break;
+                       }
+               }
+       }
+
+       return build_prp;
+}
+
+/**
+ * megasas_make_prp_nvme -
+ * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ *
+ * @instance:          Adapter soft state
+ * @scmd:              SCSI command from the mid-layer
+ * @sgl_ptr:           SGL to be filled in
+ * @cmd:               Fusion command frame
+ * @sge_count:         scatter gather element count.
+ *
+ * Returns:            true: PRPs are built
+ *                     false: IEEE SGLs needs to be built
+ */
+static bool
+megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
+                     struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
+                     struct megasas_cmd_fusion *cmd, int sge_count)
+{
+       int sge_len, offset, num_prp_in_chain = 0;
+       struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
+       u64 *ptr_sgl;
+       dma_addr_t ptr_sgl_phys;
+       u64 sge_addr;
+       u32 page_mask, page_mask_result;
+       struct scatterlist *sg_scmd;
+       u32 first_prp_len;
+       bool build_prp = false;
+       int data_len = scsi_bufflen(scmd);
+       struct fusion_context *fusion;
+       u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
+                                       MR_DEFAULT_NVME_PAGE_SIZE);
+
+       fusion = instance->ctrl_context;
+
+       build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
+
+       if (!build_prp)
+               return false;
+
+       /*
+        * Nvme has a very convoluted prp format.  One prp is required
+        * for each page or partial page. Driver need to split up OS sg_list
+        * entries if it is longer than one page or cross a page
+        * boundary.  Driver also have to insert a PRP list pointer entry as
+        * the last entry in each physical page of the PRP list.
+        *
+        * NOTE: The first PRP "entry" is actually placed in the first
+        * SGL entry in the main message as IEEE 64 format.  The 2nd
+        * entry in the main message is the chain element, and the rest
+        * of the PRP entries are built in the contiguous pcie buffer.
+        */
+       page_mask = mr_nvme_pg_size - 1;
+       ptr_sgl = (u64 *)cmd->sg_frame;
+       ptr_sgl_phys = cmd->sg_frame_phys_addr;
+       memset(ptr_sgl, 0, instance->max_chain_frame_sz);
+
+       /* Build chain frame element which holds all prps except first*/
+       main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
+           ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
+
+       main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
+       main_chain_element->NextChainOffset = 0;
+       main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+                                       IEEE_SGE_FLAGS_SYSTEM_ADDR |
+                                       MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
+
+       /* Build first prp, sge need not to be page aligned*/
+       ptr_first_sgl = sgl_ptr;
+       sg_scmd = scsi_sglist(scmd);
+       sge_addr = sg_dma_address(sg_scmd);
+       sge_len = sg_dma_len(sg_scmd);
+
+       offset = (u32)(sge_addr & page_mask);
+       first_prp_len = mr_nvme_pg_size - offset;
+
+       ptr_first_sgl->Address = cpu_to_le64(sge_addr);
+       ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
+
+       data_len -= first_prp_len;
+
+       if (sge_len > first_prp_len) {
+               sge_addr += first_prp_len;
+               sge_len -= first_prp_len;
+       } else if (sge_len == first_prp_len) {
+               sg_scmd = sg_next(sg_scmd);
+               sge_addr = sg_dma_address(sg_scmd);
+               sge_len = sg_dma_len(sg_scmd);
+       }
+
+       for (;;) {
+               offset = (u32)(sge_addr & page_mask);
+
+               /* Put PRP pointer due to page boundary*/
+               page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
+               if (unlikely(!page_mask_result)) {
+                       scmd_printk(KERN_NOTICE,
+                                   scmd, "page boundary ptr_sgl: 0x%p\n",
+                                   ptr_sgl);
+                       ptr_sgl_phys += 8;
+                       *ptr_sgl = cpu_to_le64(ptr_sgl_phys);
+                       ptr_sgl++;
+                       num_prp_in_chain++;
+               }
+
+               *ptr_sgl = cpu_to_le64(sge_addr);
+               ptr_sgl++;
+               ptr_sgl_phys += 8;
+               num_prp_in_chain++;
+
+               sge_addr += mr_nvme_pg_size;
+               sge_len -= mr_nvme_pg_size;
+               data_len -= mr_nvme_pg_size;
+
+               if (data_len <= 0)
+                       break;
+
+               if (sge_len > 0)
+                       continue;
+
+               sg_scmd = sg_next(sg_scmd);
+               sge_addr = sg_dma_address(sg_scmd);
+               sge_len = sg_dma_len(sg_scmd);
+       }
+
+       main_chain_element->Length =
+                       cpu_to_le32(num_prp_in_chain * sizeof(u64));
+
+       atomic_inc(&instance->prp_sgl);
+       return build_prp;
+}
+
 /**
  * megasas_make_sgl_fusion -   Prepares 32-bit SGL
  * @instance:          Adapter soft state
  * @scp:               SCSI command from the mid-layer
  * @sgl_ptr:           SGL to be filled in
  * @cmd:               cmd we are working on
+ * @sge_count          sge count
  *
- * If successful, this function returns the number of SG elements.
  */
-static int
+static void
 megasas_make_sgl_fusion(struct megasas_instance *instance,
                        struct scsi_cmnd *scp,
                        struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
-                       struct megasas_cmd_fusion *cmd)
+                       struct megasas_cmd_fusion *cmd, int sge_count)
 {
-       int i, sg_processed, sge_count;
+       int i, sg_processed;
        struct scatterlist *os_sgl;
        struct fusion_context *fusion;
 
        fusion = instance->ctrl_context;
 
-       if (fusion->adapter_type == INVADER_SERIES) {
+       if (fusion->adapter_type >= INVADER_SERIES) {
                struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
                sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
                sgl_ptr_end->Flags = 0;
        }
 
-       sge_count = scsi_dma_map(scp);
-
-       BUG_ON(sge_count < 0);
-
-       if (sge_count > instance->max_num_sge || !sge_count)
-               return sge_count;
-
        scsi_for_each_sg(scp, os_sgl, sge_count, i) {
                sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
                sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
                sgl_ptr->Flags = 0;
-               if (fusion->adapter_type == INVADER_SERIES)
+               if (fusion->adapter_type >= INVADER_SERIES)
                        if (i == sge_count - 1)
                                sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
                sgl_ptr++;
-
                sg_processed = i + 1;
 
                if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
                    (sge_count > fusion->max_sge_in_main_msg)) {
 
                        struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
-                       if (fusion->adapter_type == INVADER_SERIES) {
+                       if (fusion->adapter_type >= INVADER_SERIES) {
                                if ((le16_to_cpu(cmd->io_request->IoFlags) &
                                        MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
                                        MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1493,7 +1787,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
                        sg_chain = sgl_ptr;
                        /* Prepare chain element */
                        sg_chain->NextChainOffset = 0;
-                       if (fusion->adapter_type == INVADER_SERIES)
+                       if (fusion->adapter_type >= INVADER_SERIES)
                                sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
                        else
                                sg_chain->Flags =
@@ -1507,6 +1801,45 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
                        memset(sgl_ptr, 0, instance->max_chain_frame_sz);
                }
        }
+       atomic_inc(&instance->ieee_sgl);
+}
+
+/**
+ * megasas_make_sgl -  Build Scatter Gather List(SGLs)
+ * @scp:               SCSI command pointer
+ * @instance:          Soft instance of controller
+ * @cmd:               Fusion command pointer
+ *
+ * This function will build sgls based on device type.
+ * For nvme drives, there is different way of building sgls in nvme native
+ * format- PRPs(Physical Region Page).
+ *
+ * Returns the number of sg lists actually used, zero if the sg lists
+ * is NULL, or -ENOMEM if the mapping failed
+ */
+static
+int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
+                    struct megasas_cmd_fusion *cmd)
+{
+       int sge_count;
+       bool build_prp = false;
+       struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
+
+       sge_count = scsi_dma_map(scp);
+
+       if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
+               return sge_count;
+
+       sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
+       if ((le16_to_cpu(cmd->io_request->IoFlags) &
+           MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
+           (cmd->pd_interface == NVME_PD))
+               build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
+                                                 cmd, sge_count);
+
+       if (!build_prp)
+               megasas_make_sgl_fusion(instance, scp, sgl_chain64,
+                                       cmd, sge_count);
 
        return sge_count;
 }
@@ -1525,7 +1858,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
 {
        struct MR_LD_RAID *raid;
-       u32 ld;
+       u16 ld;
        u64 start_blk = io_info->pdBlock;
        u8 *cdb = io_request->CDB.CDB32;
        u32 num_blocks = io_info->numBlocks;
@@ -1574,6 +1907,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
+                               MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
                                MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
                } else {
                        io_request->EEDPFlags = cpu_to_le16(
@@ -1687,6 +2021,166 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
        }
 }
 
+/**
+ * megasas_stream_detect -     stream detection on read and and write IOs
+ * @instance:          Adapter soft state
+ * @cmd:                   Command to be prepared
+ * @io_info:           IO Request info
+ *
+ */
+
+/** stream detection on read and and write IOs */
+static void megasas_stream_detect(struct megasas_instance *instance,
+                                 struct megasas_cmd_fusion *cmd,
+                                 struct IO_REQUEST_INFO *io_info)
+{
+       struct fusion_context *fusion = instance->ctrl_context;
+       u32 device_id = io_info->ldTgtId;
+       struct LD_STREAM_DETECT *current_ld_sd
+               = fusion->stream_detect_by_ld[device_id];
+       u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
+       u32 shifted_values, unshifted_values;
+       u32 index_value_mask, shifted_values_mask;
+       int i;
+       bool is_read_ahead = false;
+       struct STREAM_DETECT *current_sd;
+       /* find possible stream */
+       for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
+               stream_num = (*track_stream >>
+                       (i * BITS_PER_INDEX_STREAM)) &
+                       STREAM_MASK;
+               current_sd = &current_ld_sd->stream_track[stream_num];
+               /* if we found a stream, update the raid
+                *  context and also update the mruBitMap
+                */
+               /*      boundary condition */
+               if ((current_sd->next_seq_lba) &&
+                   (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
+                   (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
+                   (current_sd->is_read == io_info->isRead)) {
+
+                       if ((io_info->ldStartBlock != current_sd->next_seq_lba) &&
+                           ((!io_info->isRead) || (!is_read_ahead)))
+                               /*
+                                * Once the API availible we need to change this.
+                                * At this point we are not allowing any gap
+                                */
+                               continue;
+
+                       SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
+                       current_sd->next_seq_lba =
+                       io_info->ldStartBlock + io_info->numBlocks;
+                       /*
+                        *      update the mruBitMap LRU
+                        */
+                       shifted_values_mask =
+                               (1 <<  i * BITS_PER_INDEX_STREAM) - 1;
+                       shifted_values = ((*track_stream & shifted_values_mask)
+                                               << BITS_PER_INDEX_STREAM);
+                       index_value_mask =
+                               STREAM_MASK << i * BITS_PER_INDEX_STREAM;
+                       unshifted_values =
+                               *track_stream & ~(shifted_values_mask |
+                               index_value_mask);
+                       *track_stream =
+                               unshifted_values | shifted_values | stream_num;
+                       return;
+               }
+       }
+       /*
+        * if we did not find any stream, create a new one
+        * from the least recently used
+        */
+       stream_num = (*track_stream >>
+               ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
+               STREAM_MASK;
+       current_sd = &current_ld_sd->stream_track[stream_num];
+       current_sd->is_read = io_info->isRead;
+       current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
+       *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
+       return;
+}
+
+/**
+ * megasas_set_raidflag_cpu_affinity - This function sets the cpu
+ * affinity (cpu of the controller) and raid_flags in the raid context
+ * based on IO type.
+ *
+ * @praid_context:     IO RAID context
+ * @raid:              LD raid map
+ * @fp_possible:       Is fast path possible?
+ * @is_read:           Is read IO?
+ *
+ */
+static void
+megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
+                                 struct MR_LD_RAID *raid, bool fp_possible,
+                                 u8 is_read, u32 scsi_buff_len)
+{
+       u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
+       struct RAID_CONTEXT_G35 *rctx_g35;
+
+       rctx_g35 = &praid_context->raid_context_g35;
+       if (fp_possible) {
+               if (is_read) {
+                       if ((raid->cpuAffinity.pdRead.cpu0) &&
+                           (raid->cpuAffinity.pdRead.cpu1))
+                               cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+                       else if (raid->cpuAffinity.pdRead.cpu1)
+                               cpu_sel = MR_RAID_CTX_CPUSEL_1;
+               } else {
+                       if ((raid->cpuAffinity.pdWrite.cpu0) &&
+                           (raid->cpuAffinity.pdWrite.cpu1))
+                               cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+                       else if (raid->cpuAffinity.pdWrite.cpu1)
+                               cpu_sel = MR_RAID_CTX_CPUSEL_1;
+                       /* Fast path cache by pass capable R0/R1 VD */
+                       if ((raid->level <= 1) &&
+                           (raid->capability.fp_cache_bypass_capable)) {
+                               rctx_g35->routing_flags |=
+                                       (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
+                               rctx_g35->raid_flags =
+                                       (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
+                                       << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+                       }
+               }
+       } else {
+               if (is_read) {
+                       if ((raid->cpuAffinity.ldRead.cpu0) &&
+                           (raid->cpuAffinity.ldRead.cpu1))
+                               cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+                       else if (raid->cpuAffinity.ldRead.cpu1)
+                               cpu_sel = MR_RAID_CTX_CPUSEL_1;
+               } else {
+                       if ((raid->cpuAffinity.ldWrite.cpu0) &&
+                           (raid->cpuAffinity.ldWrite.cpu1))
+                               cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
+                       else if (raid->cpuAffinity.ldWrite.cpu1)
+                               cpu_sel = MR_RAID_CTX_CPUSEL_1;
+
+                       if (is_stream_detected(rctx_g35) &&
+                           (raid->level == 5) &&
+                           (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
+                           (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
+                               cpu_sel = MR_RAID_CTX_CPUSEL_0;
+               }
+       }
+
+       rctx_g35->routing_flags |=
+               (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
+
+       /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+        * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
+        * IO Subtype is not bitmap.
+        */
+       if ((raid->level == 1) && (!is_read)) {
+               if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+                       praid_context->raid_context_g35.raid_flags =
+                               (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
+                               << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
+       }
+}
+
 /**
  * megasas_build_ldio_fusion - Prepares IOs to devices
  * @instance:          Adapter soft state
@@ -1701,29 +2195,36 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                          struct scsi_cmnd *scp,
                          struct megasas_cmd_fusion *cmd)
 {
-       u8 fp_possible;
+       bool fp_possible;
+       u16 ld;
        u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
+       u32 scsi_buff_len;
        struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
        union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
        struct IO_REQUEST_INFO io_info;
        struct fusion_context *fusion;
        struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
        u8 *raidLUN;
+       unsigned long spinlock_flags;
+       union RAID_CONTEXT_UNION *praid_context;
+       struct MR_LD_RAID *raid = NULL;
+       struct MR_PRIV_DEVICE *mrdev_priv;
 
        device_id = MEGASAS_DEV_INDEX(scp);
 
        fusion = instance->ctrl_context;
 
        io_request = cmd->io_request;
-       io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
-       io_request->RaidContext.status = 0;
-       io_request->RaidContext.exStatus = 0;
+       io_request->RaidContext.raid_context.virtual_disk_tgt_id =
+               cpu_to_le16(device_id);
+       io_request->RaidContext.raid_context.status = 0;
+       io_request->RaidContext.raid_context.ex_status = 0;
 
        req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
 
        start_lba_lo = 0;
        start_lba_hi = 0;
-       fp_possible = 0;
+       fp_possible = false;
 
        /*
         * 6-byte READ(0x08) or WRITE(0x0A) cdb
@@ -1779,22 +2280,27 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
        io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
        io_info.numBlocks = datalength;
        io_info.ldTgtId = device_id;
-       io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
+       io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+       scsi_buff_len = scsi_bufflen(scp);
+       io_request->DataLength = cpu_to_le32(scsi_buff_len);
 
        if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
                io_info.isRead = 1;
 
        local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
+       ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
 
-       if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
-               instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
-               io_request->RaidContext.regLockFlags  = 0;
-               fp_possible = 0;
+       if (ld < instance->fw_supported_vd_count)
+               raid = MR_LdRaidGet(ld, local_map_ptr);
+
+       if (!raid || (!fusion->fast_path_io)) {
+               io_request->RaidContext.raid_context.reg_lock_flags  = 0;
+               fp_possible = false;
        } else {
                if (MR_BuildRaidContext(instance, &io_info,
-                                       &io_request->RaidContext,
+                                       &io_request->RaidContext.raid_context,
                                        local_map_ptr, &raidLUN))
-                       fp_possible = io_info.fpOkForIo;
+                       fp_possible = (io_info.fpOkForIo > 0) ? true : false;
        }
 
        /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
@@ -1803,6 +2309,54 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
        cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
                raw_smp_processor_id() % instance->msix_vectors : 0;
 
+       praid_context = &io_request->RaidContext;
+
+       if (instance->is_ventura) {
+               spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
+               megasas_stream_detect(instance, cmd, &io_info);
+               spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
+               /* In ventura if stream detected for a read and it is read ahead
+                *  capable make this IO as LDIO
+                */
+               if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
+                   io_info.isRead && io_info.ra_capable)
+                       fp_possible = false;
+
+               /* FP for Optimal raid level 1.
+                * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
+                * are built by the driver as LD I/Os.
+                * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
+                * (there is never a reason to process these as buffered writes)
+                * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
+                * with the SLD bit asserted.
+                */
+               if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+                       mrdev_priv = scp->device->hostdata;
+
+                       if (atomic_inc_return(&instance->fw_outstanding) >
+                               (instance->host->can_queue)) {
+                               fp_possible = false;
+                               atomic_dec(&instance->fw_outstanding);
+                       } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
+                                  atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) {
+                               fp_possible = false;
+                               atomic_dec(&instance->fw_outstanding);
+                               if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
+                                       atomic_set(&mrdev_priv->r1_ldio_hint,
+                                                  instance->r1_ldio_hint_default);
+                       }
+               }
+
+               /* If raid is NULL, set CPU affinity to default CPU0 */
+               if (raid)
+                       megasas_set_raidflag_cpu_affinity(praid_context,
+                               raid, fp_possible, io_info.isRead,
+                               scsi_buff_len);
+               else
+                       praid_context->raid_context_g35.routing_flags |=
+                               (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
+       }
+
        if (fp_possible) {
                megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
                                   local_map_ptr, start_lba_lo);
@@ -1811,29 +2365,52 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
                        (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
                         << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                if (fusion->adapter_type == INVADER_SERIES) {
-                       if (io_request->RaidContext.regLockFlags ==
+                       if (io_request->RaidContext.raid_context.reg_lock_flags ==
                            REGION_TYPE_UNUSED)
                                cmd->request_desc->SCSIIO.RequestFlags =
                                        (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
                                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-                       io_request->RaidContext.Type = MPI2_TYPE_CUDA;
-                       io_request->RaidContext.nseg = 0x1;
+                       io_request->RaidContext.raid_context.type
+                               = MPI2_TYPE_CUDA;
+                       io_request->RaidContext.raid_context.nseg = 0x1;
                        io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
-                       io_request->RaidContext.regLockFlags |=
+                       io_request->RaidContext.raid_context.reg_lock_flags |=
                          (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
                           MR_RL_FLAGS_SEQ_NUM_ENABLE);
+               } else if (instance->is_ventura) {
+                       io_request->RaidContext.raid_context_g35.nseg_type |=
+                                               (1 << RAID_CONTEXT_NSEG_SHIFT);
+                       io_request->RaidContext.raid_context_g35.nseg_type |=
+                                               (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+                       io_request->RaidContext.raid_context_g35.routing_flags |=
+                                               (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+                       io_request->IoFlags |=
+                               cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
                }
-               if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
-                   (io_info.isRead)) {
+               if (fusion->load_balance_info &&
+                       (fusion->load_balance_info[device_id].loadBalanceFlag) &&
+                       (io_info.isRead)) {
                        io_info.devHandle =
                                get_updated_dev_handle(instance,
                                        &fusion->load_balance_info[device_id],
-                                       &io_info);
+                                       &io_info, local_map_ptr);
                        scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
                        cmd->pd_r1_lb = io_info.pd_after_lb;
+                       if (instance->is_ventura)
+                               io_request->RaidContext.raid_context_g35.span_arm
+                                       = io_info.span_arm;
+                       else
+                               io_request->RaidContext.raid_context.span_arm
+                                       = io_info.span_arm;
+
                } else
                        scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
 
+               if (instance->is_ventura)
+                       cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
+               else
+                       cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
+
                if ((raidLUN[0] == 1) &&
                        (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
                        instance->dev_handle = !(instance->dev_handle);
@@ -1843,28 +2420,39 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
 
                cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
                io_request->DevHandle = io_info.devHandle;
+               cmd->pd_interface = io_info.pd_interface;
                /* populate the LUN field */
                memcpy(io_request->LUN, raidLUN, 8);
        } else {
-               io_request->RaidContext.timeoutValue =
+               io_request->RaidContext.raid_context.timeout_value =
                        cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
                cmd->request_desc->SCSIIO.RequestFlags =
                        (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
                         << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
                if (fusion->adapter_type == INVADER_SERIES) {
                        if (io_info.do_fp_rlbypass ||
-                               (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED))
+                       (io_request->RaidContext.raid_context.reg_lock_flags
+                                       == REGION_TYPE_UNUSED))
                                cmd->request_desc->SCSIIO.RequestFlags =
                                        (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
                                        MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-                       io_request->RaidContext.Type = MPI2_TYPE_CUDA;
-                       io_request->RaidContext.regLockFlags |=
+                       io_request->RaidContext.raid_context.type
+                               = MPI2_TYPE_CUDA;
+                       io_request->RaidContext.raid_context.reg_lock_flags |=
                                (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
                                 MR_RL_FLAGS_SEQ_NUM_ENABLE);
-                       io_request->RaidContext.nseg = 0x1;
+                       io_request->RaidContext.raid_context.nseg = 0x1;
+               } else if (instance->is_ventura) {
+                       io_request->RaidContext.raid_context_g35.routing_flags |=
+                                       (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+                       io_request->RaidContext.raid_context_g35.nseg_type |=
+                                       (1 << RAID_CONTEXT_NSEG_SHIFT);
+                       io_request->RaidContext.raid_context_g35.nseg_type |=
+                                       (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
                }
                io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
                io_request->DevHandle = cpu_to_le16(device_id);
+
        } /* Not FP */
 }
 
@@ -1881,27 +2469,26 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
 {
        u32 device_id;
        struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
-       u16 pd_index = 0;
+       u16 ld;
        struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
        struct fusion_context *fusion = instance->ctrl_context;
        u8                          span, physArm;
        __le16                      devHandle;
-       u32                         ld, arRef, pd;
+       u32                         arRef, pd;
        struct MR_LD_RAID                  *raid;
        struct RAID_CONTEXT                *pRAID_Context;
        u8 fp_possible = 1;
 
        io_request = cmd->io_request;
        device_id = MEGASAS_DEV_INDEX(scmd);
-       pd_index = MEGASAS_PD_INDEX(scmd);
        local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
        io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
        /* get RAID_Context pointer */
-       pRAID_Context = &io_request->RaidContext;
+       pRAID_Context = &io_request->RaidContext.raid_context;
        /* Check with FW team */
-       pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
-       pRAID_Context->regLockRowLBA    = 0;
-       pRAID_Context->regLockLength    = 0;
+       pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+       pRAID_Context->reg_lock_row_lba    = 0;
+       pRAID_Context->reg_lock_length    = 0;
 
        if (fusion->fast_path_io && (
                device_id < instance->fw_supported_vd_count)) {
@@ -1909,10 +2496,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
                ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
                if (ld >= instance->fw_supported_vd_count)
                        fp_possible = 0;
-
-               raid = MR_LdRaidGet(ld, local_map_ptr);
-               if (!(raid->capability.fpNonRWCapable))
-                       fp_possible = 0;
+               else {
+                       raid = MR_LdRaidGet(ld, local_map_ptr);
+                       if (!(raid->capability.fpNonRWCapable))
+                               fp_possible = 0;
+               }
        } else
                fp_possible = 0;
 
@@ -1920,7 +2508,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
                io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
                io_request->DevHandle = cpu_to_le16(device_id);
                io_request->LUN[1] = scmd->device->lun;
-               pRAID_Context->timeoutValue =
+               pRAID_Context->timeout_value =
                        cpu_to_le16 (scmd->request->timeout / HZ);
                cmd->request_desc->SCSIIO.RequestFlags =
                        (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
@@ -1928,9 +2516,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
        } else {
 
                /* set RAID context values */
-               pRAID_Context->configSeqNum = raid->seqNum;
-               pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
-               pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
+               pRAID_Context->config_seq_num = raid->seqNum;
+               if (!instance->is_ventura)
+                       pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
+               pRAID_Context->timeout_value =
+                       cpu_to_le16(raid->fpIoTimeoutForLd);
 
                /* get the DevHandle for the PD (since this is
                   fpNonRWCapable, this is a single disk RAID0) */
@@ -1965,7 +2555,8 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
  */
 static void
 megasas_build_syspd_fusion(struct megasas_instance *instance,
-       struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
+       struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
+       bool fp_possible)
 {
        u32 device_id;
        struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
@@ -1975,22 +2566,25 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
        struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
        struct RAID_CONTEXT     *pRAID_Context;
        struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
        struct fusion_context *fusion = instance->ctrl_context;
        pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
 
        device_id = MEGASAS_DEV_INDEX(scmd);
        pd_index = MEGASAS_PD_INDEX(scmd);
        os_timeout_value = scmd->request->timeout / HZ;
+       mr_device_priv_data = scmd->device->hostdata;
+       cmd->pd_interface = mr_device_priv_data->interface_type;
 
        io_request = cmd->io_request;
        /* get RAID_Context pointer */
-       pRAID_Context = &io_request->RaidContext;
-       pRAID_Context->regLockFlags = 0;
-       pRAID_Context->regLockRowLBA = 0;
-       pRAID_Context->regLockLength = 0;
+       pRAID_Context = &io_request->RaidContext.raid_context;
+       pRAID_Context->reg_lock_flags = 0;
+       pRAID_Context->reg_lock_row_lba = 0;
+       pRAID_Context->reg_lock_length = 0;
        io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
        io_request->LUN[1] = scmd->device->lun;
-       pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+       pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
                << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
 
        /* If FW supports PD sequence number */
@@ -1999,24 +2593,38 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
                /* TgtId must be incremented by 255 as jbod seq number is index
                 * below raid map
                 */
-               pRAID_Context->VirtualDiskTgtId =
-                       cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
-               pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
+                /* More than 256 PD/JBOD support for Ventura */
+               if (instance->support_morethan256jbod)
+                       pRAID_Context->virtual_disk_tgt_id =
+                               pd_sync->seq[pd_index].pd_target_id;
+               else
+                       pRAID_Context->virtual_disk_tgt_id =
+                               cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
+               pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
                io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
-               pRAID_Context->regLockFlags |=
-                       (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
-               pRAID_Context->Type = MPI2_TYPE_CUDA;
-               pRAID_Context->nseg = 0x1;
+               if (instance->is_ventura) {
+                       io_request->RaidContext.raid_context_g35.routing_flags |=
+                               (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
+                       io_request->RaidContext.raid_context_g35.nseg_type |=
+                                                       (1 << RAID_CONTEXT_NSEG_SHIFT);
+                       io_request->RaidContext.raid_context_g35.nseg_type |=
+                                                       (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
+               } else {
+                       pRAID_Context->type = MPI2_TYPE_CUDA;
+                       pRAID_Context->nseg = 0x1;
+                       pRAID_Context->reg_lock_flags |=
+                               (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+               }
        } else if (fusion->fast_path_io) {
-               pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
-               pRAID_Context->configSeqNum = 0;
+               pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+               pRAID_Context->config_seq_num = 0;
                local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
                io_request->DevHandle =
                        local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
        } else {
                /* Want to send all IO via FW path */
-               pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
-               pRAID_Context->configSeqNum = 0;
+               pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
+               pRAID_Context->config_seq_num = 0;
                io_request->DevHandle = cpu_to_le16(0xFFFF);
        }
 
@@ -2032,17 +2640,17 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
                cmd->request_desc->SCSIIO.RequestFlags =
                        (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
                                MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-               pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
-               pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
+               pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
+               pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
        } else {
                /* system pd Fast Path */
                io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
                timeout_limit = (scmd->device->type == TYPE_DISK) ?
                                255 : 0xFFFF;
-               pRAID_Context->timeoutValue =
+               pRAID_Context->timeout_value =
                        cpu_to_le16((os_timeout_value > timeout_limit) ?
                        timeout_limit : os_timeout_value);
-               if (fusion->adapter_type == INVADER_SERIES)
+               if (fusion->adapter_type >= INVADER_SERIES)
                        io_request->IoFlags |=
                                cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
 
@@ -2066,9 +2674,11 @@ megasas_build_io_fusion(struct megasas_instance *instance,
                        struct scsi_cmnd *scp,
                        struct megasas_cmd_fusion *cmd)
 {
-       u16 sge_count;
+       int sge_count;
        u8  cmd_type;
        struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
+       struct MR_PRIV_DEVICE *mr_device_priv_data;
+       mr_device_priv_data = scp->device->hostdata;
 
        /* Zero out some fields so they don't get reused */
        memset(io_request->LUN, 0x0, 8);
@@ -2078,9 +2688,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
        io_request->Control = 0;
        io_request->EEDPBlockSize = 0;
        io_request->ChainOffset = 0;
-       io_request->RaidContext.RAIDFlags = 0;
-       io_request->RaidContext.Type = 0;
-       io_request->RaidContext.nseg = 0;
+       io_request->RaidContext.raid_context.raid_flags = 0;
+       io_request->RaidContext.raid_context.type = 0;
+       io_request->RaidContext.raid_context.nseg = 0;
 
        memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
        /*
@@ -2097,12 +2707,14 @@ megasas_build_io_fusion(struct megasas_instance *instance,
                megasas_build_ld_nonrw_fusion(instance, scp, cmd);
                break;
        case READ_WRITE_SYSPDIO:
+               megasas_build_syspd_fusion(instance, scp, cmd, true);
+               break;
        case NON_READ_WRITE_SYSPDIO:
-               if (instance->secure_jbod_support &&
-                       (cmd_type == NON_READ_WRITE_SYSPDIO))
-                       megasas_build_syspd_fusion(instance, scp, cmd, 0);
+               if (instance->secure_jbod_support ||
+                   mr_device_priv_data->is_tm_capable)
+                       megasas_build_syspd_fusion(instance, scp, cmd, false);
                else
-                       megasas_build_syspd_fusion(instance, scp, cmd, 1);
+                       megasas_build_syspd_fusion(instance, scp, cmd, true);
                break;
        default:
                break;
@@ -2112,23 +2724,27 @@ megasas_build_io_fusion(struct megasas_instance *instance,
         * Construct SGL
         */
 
-       sge_count =
-               megasas_make_sgl_fusion(instance, scp,
-                                       (struct MPI25_IEEE_SGE_CHAIN64 *)
-                                       &io_request->SGL, cmd);
+       sge_count = megasas_make_sgl(instance, scp, cmd);
 
-       if (sge_count > instance->max_num_sge) {
-               dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
-                      "max (0x%x) allowed\n", sge_count,
-                      instance->max_num_sge);
+       if (sge_count > instance->max_num_sge || (sge_count < 0)) {
+               dev_err(&instance->pdev->dev,
+                       "%s %d sge_count (%d) is out of range. Range is:  0-%d\n",
+                       __func__, __LINE__, sge_count, instance->max_num_sge);
                return 1;
        }
 
-       /* numSGE store lower 8 bit of sge_count.
-        * numSGEExt store higher 8 bit of sge_count
-        */
-       io_request->RaidContext.numSGE = sge_count;
-       io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
+       if (instance->is_ventura) {
+               set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
+               cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
+               cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
+       } else {
+               /* numSGE store lower 8 bit of sge_count.
+                * numSGEExt store higher 8 bit of sge_count
+                */
+               io_request->RaidContext.raid_context.num_sge = sge_count;
+               io_request->RaidContext.raid_context.num_sge_ext =
+                       (u8)(sge_count >> 8);
+       }
 
        io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
 
@@ -2149,25 +2765,61 @@ megasas_build_io_fusion(struct megasas_instance *instance,
        return 0;
 }
 
-union MEGASAS_REQUEST_DESCRIPTOR_UNION *
+static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
 {
        u8 *p;
        struct fusion_context *fusion;
 
-       if (index >= instance->max_fw_cmds) {
-               dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
-                      "descriptor for scsi%d\n", index,
-                       instance->host->host_no);
-               return NULL;
-       }
        fusion = instance->ctrl_context;
-       p = fusion->req_frames_desc
-               +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
+       p = fusion->req_frames_desc +
+               sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
 
        return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
 }
 
+
+/* megasas_prepate_secondRaid1_IO
+ *  It prepares the raid 1 second IO
+ */
+void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+                           struct megasas_cmd_fusion *cmd,
+                           struct megasas_cmd_fusion *r1_cmd)
+{
+       union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
+       struct fusion_context *fusion;
+       fusion = instance->ctrl_context;
+       req_desc = cmd->request_desc;
+       /* copy the io request frame as well as 8 SGEs data for r1 command*/
+       memcpy(r1_cmd->io_request, cmd->io_request,
+              (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
+       memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
+              (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
+       /*sense buffer is different for r1 command*/
+       r1_cmd->io_request->SenseBufferLowAddress =
+                       cpu_to_le32(r1_cmd->sense_phys_addr);
+       r1_cmd->scmd = cmd->scmd;
+       req_desc2 = megasas_get_request_descriptor(instance,
+                                                  (r1_cmd->index - 1));
+       req_desc2->Words = 0;
+       r1_cmd->request_desc = req_desc2;
+       req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
+       req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
+       r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
+       r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
+       r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
+       cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+                       cpu_to_le16(r1_cmd->index);
+       r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
+                       cpu_to_le16(cmd->index);
+       /*MSIxIndex of both commands request descriptors should be same*/
+       r1_cmd->request_desc->SCSIIO.MSIxIndex =
+                       cmd->request_desc->SCSIIO.MSIxIndex;
+       /*span arm is different for r1 cmd*/
+       r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
+                       cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
+}
+
 /**
  * megasas_build_and_issue_cmd_fusion -Main routine for building and
  *                                     issuing non IOCTL cmd
@@ -2178,7 +2830,7 @@ static u32
 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
                                   struct scsi_cmnd *scmd)
 {
-       struct megasas_cmd_fusion *cmd;
+       struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
        union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
        u32 index;
        struct fusion_context *fusion;
@@ -2193,13 +2845,22 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
                return SCSI_MLQUEUE_DEVICE_BUSY;
        }
 
+       if (atomic_inc_return(&instance->fw_outstanding) >
+                       instance->host->can_queue) {
+               atomic_dec(&instance->fw_outstanding);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
        cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
 
+       if (!cmd) {
+               atomic_dec(&instance->fw_outstanding);
+               return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
        index = cmd->index;
 
        req_desc = megasas_get_request_descriptor(instance, index-1);
-       if (!req_desc)
-               return SCSI_MLQUEUE_HOST_BUSY;
 
        req_desc->Words = 0;
        cmd->request_desc = req_desc;
@@ -2208,6 +2869,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
                megasas_return_cmd_fusion(instance, cmd);
                dev_err(&instance->pdev->dev, "Error building command\n");
                cmd->request_desc = NULL;
+               atomic_dec(&instance->fw_outstanding);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
@@ -2218,17 +2880,91 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
            cmd->io_request->ChainOffset != 0xF)
                dev_err(&instance->pdev->dev, "The chain offset value is not "
                       "correct : %x\n", cmd->io_request->ChainOffset);
+       /*
+        *      if it is raid 1/10 fp write capable.
+        *      try to get second command from pool and construct it.
+        *      From FW, it has confirmed that lba values of two PDs
+        *      corresponds to single R1/10 LD are always same
+        *
+        */
+       /*      driver side count always should be less than max_fw_cmds
+        *      to get new command
+        */
+       if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
+               r1_cmd = megasas_get_cmd_fusion(instance,
+                               (scmd->request->tag + instance->max_fw_cmds));
+               megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
+       }
+
 
        /*
         * Issue the command to the FW
         */
-       atomic_inc(&instance->fw_outstanding);
 
        megasas_fire_cmd_fusion(instance, req_desc);
 
+       if (r1_cmd)
+               megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
+
+
        return 0;
 }
 
+/**
+ * megasas_complete_r1_command -
+ * completes R1 FP write commands which has valid peer smid
+ * @instance:                  Adapter soft state
+ * @cmd_fusion:                        MPT command frame
+ *
+ */
+static inline void
+megasas_complete_r1_command(struct megasas_instance *instance,
+                           struct megasas_cmd_fusion *cmd)
+{
+       u8 *sense, status, ex_status;
+       u32 data_length;
+       u16 peer_smid;
+       struct fusion_context *fusion;
+       struct megasas_cmd_fusion *r1_cmd = NULL;
+       struct scsi_cmnd *scmd_local = NULL;
+       struct RAID_CONTEXT_G35 *rctx_g35;
+
+       rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
+       fusion = instance->ctrl_context;
+       peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
+
+       r1_cmd = fusion->cmd_list[peer_smid - 1];
+       scmd_local = cmd->scmd;
+       status = rctx_g35->status;
+       ex_status = rctx_g35->ex_status;
+       data_length = cmd->io_request->DataLength;
+       sense = cmd->sense;
+
+       cmd->cmd_completed = true;
+
+       /* Check if peer command is completed or not*/
+       if (r1_cmd->cmd_completed) {
+               rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
+               if (rctx_g35->status != MFI_STAT_OK) {
+                       status = rctx_g35->status;
+                       ex_status = rctx_g35->ex_status;
+                       data_length = r1_cmd->io_request->DataLength;
+                       sense = r1_cmd->sense;
+               }
+
+               megasas_return_cmd_fusion(instance, r1_cmd);
+               map_cmd_status(fusion, scmd_local, status, ex_status,
+                              le32_to_cpu(data_length), sense);
+               if (instance->ldio_threshold &&
+                   megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+                       atomic_dec(&instance->ldio_outstanding);
+               scmd_local->SCp.ptr = NULL;
+               megasas_return_cmd_fusion(instance, cmd);
+               scsi_dma_unmap(scmd_local);
+               scmd_local->scsi_done(scmd_local);
+       }
+}
+
 /**
  * complete_cmd_fusion -       Completes command
  * @instance:                  Adapter soft state
@@ -2244,8 +2980,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
        struct megasas_cmd *cmd_mfi;
        struct megasas_cmd_fusion *cmd_fusion;
        u16 smid, num_completed;
-       u8 reply_descript_type;
-       u32 status, extStatus, device_id;
+       u8 reply_descript_type, *sense, status, extStatus;
+       u32 device_id, data_length;
        union desc_value d_val;
        struct LD_LOAD_BALANCE_INFO *lbinfo;
        int threshold_reply_count = 0;
@@ -2275,20 +3011,17 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
 
        while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
               d_val.u.high != cpu_to_le32(UINT_MAX)) {
-               smid = le16_to_cpu(reply_desc->SMID);
 
+               smid = le16_to_cpu(reply_desc->SMID);
                cmd_fusion = fusion->cmd_list[smid - 1];
-
-               scsi_io_req =
-                       (struct MPI2_RAID_SCSI_IO_REQUEST *)
-                 cmd_fusion->io_request;
-
-               if (cmd_fusion->scmd)
-                       cmd_fusion->scmd->SCp.ptr = NULL;
+               scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
+                                               cmd_fusion->io_request;
 
                scmd_local = cmd_fusion->scmd;
-               status = scsi_io_req->RaidContext.status;
-               extStatus = scsi_io_req->RaidContext.exStatus;
+               status = scsi_io_req->RaidContext.raid_context.status;
+               extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
+               sense = cmd_fusion->sense;
+               data_length = scsi_io_req->DataLength;
 
                switch (scsi_io_req->Function) {
                case MPI2_FUNCTION_SCSI_TASK_MGMT:
@@ -2303,37 +3036,33 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
                        break;
                case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
                        /* Update load balancing info */
-                       device_id = MEGASAS_DEV_INDEX(scmd_local);
-                       lbinfo = &fusion->load_balance_info[device_id];
-                       if (cmd_fusion->scmd->SCp.Status &
-                           MEGASAS_LOAD_BALANCE_FLAG) {
+                       if (fusion->load_balance_info &&
+                           (cmd_fusion->scmd->SCp.Status &
+                           MEGASAS_LOAD_BALANCE_FLAG)) {
+                               device_id = MEGASAS_DEV_INDEX(scmd_local);
+                               lbinfo = &fusion->load_balance_info[device_id];
                                atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
-                               cmd_fusion->scmd->SCp.Status &=
-                                       ~MEGASAS_LOAD_BALANCE_FLAG;
+                               cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
                        }
-                       if (reply_descript_type ==
-                           MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
-                               if (megasas_dbg_lvl == 5)
-                                       dev_err(&instance->pdev->dev, "\nFAST Path "
-                                              "IO Success\n");
-                       }
-                       /* Fall thru and complete IO */
+                       //Fall thru and complete IO
                case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
-                       /* Map the FW Cmd Status */
-                       map_cmd_status(cmd_fusion, status, extStatus);
-                       scsi_io_req->RaidContext.status = 0;
-                       scsi_io_req->RaidContext.exStatus = 0;
-                       if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
-                               atomic_dec(&instance->ldio_outstanding);
-                       megasas_return_cmd_fusion(instance, cmd_fusion);
-                       scsi_dma_unmap(scmd_local);
-                       scmd_local->scsi_done(scmd_local);
                        atomic_dec(&instance->fw_outstanding);
-
+                       if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
+                               map_cmd_status(fusion, scmd_local, status,
+                                              extStatus, le32_to_cpu(data_length),
+                                              sense);
+                               if (instance->ldio_threshold &&
+                                   (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
+                                       atomic_dec(&instance->ldio_outstanding);
+                               scmd_local->SCp.ptr = NULL;
+                               megasas_return_cmd_fusion(instance, cmd_fusion);
+                               scsi_dma_unmap(scmd_local);
+                               scmd_local->scsi_done(scmd_local);
+                       } else  /* Optimal VD - R1 FP command completion. */
+                               megasas_complete_r1_command(instance, cmd_fusion);
                        break;
                case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
                        cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
-
                        /* Poll mode. Dummy free.
                         * In case of Interrupt mode, caller has reverse check.
                         */
@@ -2376,7 +3105,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
                 * pending to be completed
                 */
                if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
-                       if (fusion->adapter_type == INVADER_SERIES)
+                       if (instance->msix_combined)
                                writel(((MSIxIndex & 0x7) << 24) |
                                        fusion->last_reply_idx[MSIxIndex],
                                        instance->reply_post_host_index_addr[MSIxIndex/8]);
@@ -2392,7 +3121,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
                return IRQ_NONE;
 
        wmb();
-       if (fusion->adapter_type == INVADER_SERIES)
+       if (instance->msix_combined)
                writel(((MSIxIndex & 0x7) << 24) |
                        fusion->last_reply_idx[MSIxIndex],
                        instance->reply_post_host_index_addr[MSIxIndex/8]);
@@ -2404,6 +3133,22 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
        return IRQ_HANDLED;
 }
 
+/**
+ * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
+ * @instance:                  Adapter soft state
+ */
+void megasas_sync_irqs(unsigned long instance_addr)
+{
+       u32 count, i;
+       struct megasas_instance *instance =
+               (struct megasas_instance *)instance_addr;
+
+       count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
+       for (i = 0; i < count; i++)
+               synchronize_irq(pci_irq_vector(instance->pdev, i));
+}
+
 /**
  * megasas_complete_cmd_dpc_fusion -   Completes command
  * @instance:                  Adapter soft state
@@ -2489,7 +3234,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
  * mfi_cmd:                    megasas_cmd pointer
  *
  */
-u8
+void
 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
                        struct megasas_cmd *mfi_cmd)
 {
@@ -2518,7 +3263,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
 
        io_req = cmd->io_request;
 
-       if (fusion->adapter_type == INVADER_SERIES) {
+       if (fusion->adapter_type >= INVADER_SERIES) {
                struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
                        (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
                sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -2539,8 +3284,6 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
                MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
 
        mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
-
-       return 0;
 }
 
 /**
@@ -2552,21 +3295,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
-       union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+       union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
        u16 index;
 
-       if (build_mpt_mfi_pass_thru(instance, cmd)) {
-               dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
-               return NULL;
-       }
-
+       build_mpt_mfi_pass_thru(instance, cmd);
        index = cmd->context.smid;
 
        req_desc = megasas_get_request_descriptor(instance, index - 1);
 
-       if (!req_desc)
-               return NULL;
-
        req_desc->Words = 0;
        req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
                                         MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
@@ -2582,21 +3318,16 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
  * @cmd:                       mfi cmd pointer
  *
  */
-int
+void
 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
                          struct megasas_cmd *cmd)
 {
        union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 
        req_desc = build_mpt_cmd(instance, cmd);
-       if (!req_desc) {
-               dev_info(&instance->pdev->dev, "Failed from %s %d\n",
-                                       __func__, __LINE__);
-               return DCMD_NOT_FIRED;
-       }
 
        megasas_fire_cmd_fusion(instance, req_desc);
-       return DCMD_SUCCESS;
+       return;
 }
 
 /**
@@ -2771,6 +3502,14 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
                               " will reset adapter scsi%d.\n",
                                instance->host->host_no);
                        megasas_complete_cmd_dpc_fusion((unsigned long)instance);
+                       if (instance->requestorId && reason) {
+                               dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
+                               " state while polling during"
+                               " I/O timeout handling for %d\n",
+                               instance->host->host_no);
+                               *convert = 1;
+                       }
+
                        retval = 1;
                        goto out;
                }
@@ -2790,7 +3529,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
                }
 
                /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
-               if (instance->requestorId && reason) {
+               if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
                        if (instance->hb_host_mem->HB.fwCounter !=
                            instance->hb_host_mem->HB.driverCounter) {
                                instance->hb_host_mem->HB.driverCounter =
@@ -3030,12 +3769,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
 
        req_desc = megasas_get_request_descriptor(instance,
                        (cmd_fusion->index - 1));
-       if (!req_desc) {
-               dev_err(&instance->pdev->dev, "Failed from %s %d\n",
-                       __func__, __LINE__);
-               megasas_return_cmd(instance, cmd_mfi);
-               return -ENOMEM;
-       }
 
        cmd_fusion->request_desc = req_desc;
        req_desc->Words = 0;
@@ -3092,7 +3825,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
                        break;
                else {
                        instance->instancet->disable_intr(instance);
-                       msleep(1000);
+                       megasas_sync_irqs((unsigned long)instance);
                        megasas_complete_cmd_dpc_fusion
                                        ((unsigned long)instance);
                        instance->instancet->enable_intr(instance);
@@ -3173,13 +3906,13 @@ static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
        instance = (struct megasas_instance *)sdev->host->hostdata;
        fusion = instance->ctrl_context;
 
-       if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
+       if (!MEGASAS_IS_LOGICAL(sdev)) {
                if (instance->use_seqnum_jbod_fp) {
-                               pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
-                                               sdev->id;
-                               pd_sync = (void *)fusion->pd_seq_sync
-                                               [(instance->pd_seq_map_id - 1) & 1];
-                               devhandle = pd_sync->seq[pd_index].devHandle;
+                       pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+                                   + sdev->id;
+                       pd_sync = (void *)fusion->pd_seq_sync
+                                       [(instance->pd_seq_map_id - 1) & 1];
+                       devhandle = pd_sync->seq[pd_index].devHandle;
                } else
                        sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
                                " without JBOD MAP support from %s %d\n", __func__, __LINE__);
@@ -3212,6 +3945,9 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
        instance = (struct megasas_instance *)scmd->device->host->hostdata;
        fusion = instance->ctrl_context;
 
+       scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
+       scsi_print_command(scmd);
+
        if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
                dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
                "SCSI host:%d\n", instance->host->host_no);
@@ -3292,6 +4028,9 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
        instance = (struct megasas_instance *)scmd->device->host->hostdata;
        fusion = instance->ctrl_context;
 
+       sdev_printk(KERN_INFO, scmd->device,
+                   "target reset called for scmd(%p)\n", scmd);
+
        if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
                dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
                "SCSI host:%d\n", instance->host->host_no);
@@ -3362,7 +4101,7 @@ int megasas_check_mpio_paths(struct megasas_instance *instance,
        struct scsi_cmnd *scmd)
 {
        struct megasas_instance *peer_instance = NULL;
-       int retval = (DID_RESET << 16);
+       int retval = (DID_REQUEUE << 16);
 
        if (instance->peerIsPresent) {
                peer_instance = megasas_get_peer_instance(instance);
@@ -3377,9 +4116,9 @@ int megasas_check_mpio_paths(struct megasas_instance *instance,
 /* Core fusion reset function */
 int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
 {
-       int retval = SUCCESS, i, convert = 0;
+       int retval = SUCCESS, i, j, convert = 0;
        struct megasas_instance *instance;
-       struct megasas_cmd_fusion *cmd_fusion;
+       struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
        struct fusion_context *fusion;
        u32 abs_state, status_reg, reset_adapter;
        u32 io_timeout_in_crash_mode = 0;
@@ -3440,7 +4179,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
        set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
        atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
        instance->instancet->disable_intr(instance);
-       msleep(1000);
+       megasas_sync_irqs((unsigned long)instance);
 
        /* First try waiting for commands to complete */
        if (megasas_wait_for_outstanding_fusion(instance, reason,
@@ -3451,23 +4190,40 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
                if (convert)
                        reason = 0;
 
+               if (megasas_dbg_lvl & OCR_LOGS)
+                       dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
+
                /* Now return commands back to the OS */
                for (i = 0 ; i < instance->max_scsi_cmds; i++) {
                        cmd_fusion = fusion->cmd_list[i];
+                       /*check for extra commands issued by driver*/
+                       if (instance->is_ventura) {
+                               r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
+                               megasas_return_cmd_fusion(instance, r1_cmd);
+                       }
                        scmd_local = cmd_fusion->scmd;
                        if (cmd_fusion->scmd) {
+                               if (megasas_dbg_lvl & OCR_LOGS) {
+                                       sdev_printk(KERN_INFO,
+                                               cmd_fusion->scmd->device, "SMID: 0x%x\n",
+                                               cmd_fusion->index);
+                                       scsi_print_command(cmd_fusion->scmd);
+                               }
+
                                scmd_local->result =
                                        megasas_check_mpio_paths(instance,
                                                        scmd_local);
-                               if (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
+                               if (instance->ldio_threshold &&
+                                       megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
                                        atomic_dec(&instance->ldio_outstanding);
                                megasas_return_cmd_fusion(instance, cmd_fusion);
                                scsi_dma_unmap(scmd_local);
                                scmd_local->scsi_done(scmd_local);
-                               atomic_dec(&instance->fw_outstanding);
                        }
                }
 
+               atomic_set(&instance->fw_outstanding, 0);
+
                status_reg = instance->instancet->read_fw_status_reg(
                        instance->reg_set);
                abs_state = status_reg & MFI_STATE_MASK;
@@ -3528,11 +4284,13 @@ transition_to_ready:
                                        __func__, __LINE__);
                                megaraid_sas_kill_hba(instance);
                                retval = FAILED;
+                               goto out;
                        }
                        /* Reset load balance info */
-                       memset(fusion->load_balance_info, 0,
-                              sizeof(struct LD_LOAD_BALANCE_INFO)
-                              *MAX_LOGICAL_DRIVES_EXT);
+                       if (fusion->load_balance_info)
+                               memset(fusion->load_balance_info, 0,
+                                      (sizeof(struct LD_LOAD_BALANCE_INFO) *
+                                      MAX_LOGICAL_DRIVES_EXT));
 
                        if (!megasas_get_map_info(instance))
                                megasas_sync_map_info(instance);
@@ -3540,7 +4298,17 @@ transition_to_ready:
                        megasas_setup_jbod_map(instance);
 
                        shost_for_each_device(sdev, shost)
-                               megasas_update_sdev_properties(sdev);
+                               megasas_set_dynamic_target_properties(sdev);
+
+                       /* reset stream detection array */
+                       if (instance->is_ventura) {
+                               for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
+                                       memset(fusion->stream_detect_by_ld[j],
+                                       0, sizeof(struct LD_STREAM_DETECT));
+                                fusion->stream_detect_by_ld[j]->mru_bit_map
+                                               = MR_STREAM_BITMAP;
+                               }
+                       }
 
                        clear_bit(MEGASAS_FUSION_IN_RESET,
                                  &instance->reset_flags);
@@ -3676,6 +4444,64 @@ void megasas_fusion_ocr_wq(struct work_struct *work)
        megasas_reset_fusion(instance->host, 0);
 }
 
+/* Allocate fusion context */
+int
+megasas_alloc_fusion_context(struct megasas_instance *instance)
+{
+       struct fusion_context *fusion;
+
+       instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
+       instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+               instance->ctrl_context_pages);
+       if (!instance->ctrl_context) {
+               /* fall back to using vmalloc for fusion_context */
+               instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
+               if (!instance->ctrl_context) {
+                       dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
+                       return -ENOMEM;
+               }
+       }
+
+       fusion = instance->ctrl_context;
+
+       fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+               sizeof(struct LD_LOAD_BALANCE_INFO));
+       fusion->load_balance_info =
+               (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+               fusion->load_balance_info_pages);
+       if (!fusion->load_balance_info) {
+               fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+                       sizeof(struct LD_LOAD_BALANCE_INFO));
+               if (!fusion->load_balance_info)
+                       dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
+                               "continuing without Load Balance support\n");
+       }
+
+       return 0;
+}
+
+void
+megasas_free_fusion_context(struct megasas_instance *instance)
+{
+       struct fusion_context *fusion = instance->ctrl_context;
+
+       if (fusion) {
+               if (fusion->load_balance_info) {
+                       if (is_vmalloc_addr(fusion->load_balance_info))
+                               vfree(fusion->load_balance_info);
+                       else
+                               free_pages((ulong)fusion->load_balance_info,
+                                       fusion->load_balance_info_pages);
+               }
+
+               if (is_vmalloc_addr(fusion))
+                       vfree(fusion);
+               else
+                       free_pages((ulong)fusion,
+                               instance->ctrl_context_pages);
+       }
+}
+
 struct megasas_instance_template megasas_instance_template_fusion = {
        .enable_intr = megasas_enable_intr_fusion,
        .disable_intr = megasas_disable_intr_fusion,
index e3bee04..d78d761 100644 (file)
@@ -59,6 +59,8 @@
 #define        MR_RL_FLAGS_GRANT_DESTINATION_CPU1          0x10
 #define        MR_RL_FLAGS_GRANT_DESTINATION_CUDA          0x80
 #define MR_RL_FLAGS_SEQ_NUM_ENABLE                 0x8
+#define MR_RL_WRITE_THROUGH_MODE                   0x00
+#define MR_RL_WRITE_BACK_MODE                      0x01
 
 /* T10 PI defines */
 #define MR_PROT_INFO_TYPE_CONTROLLER                0x8
 enum MR_RAID_FLAGS_IO_SUB_TYPE {
        MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
        MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
+       MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA     = 2,
+       MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P        = 3,
+       MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q        = 4,
+       MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
+       MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
 };
 
 /*
@@ -94,11 +101,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
 #define MEGASAS_FP_CMD_LEN     16
 #define MEGASAS_FUSION_IN_RESET 0
 #define THRESHOLD_REPLY_COUNT 50
+#define RAID_1_PEER_CMDS 2
 #define JBOD_MAPS_COUNT        2
 
 enum MR_FUSION_ADAPTER_TYPE {
        THUNDERBOLT_SERIES = 0,
        INVADER_SERIES = 1,
+       VENTURA_SERIES = 2,
 };
 
 /*
@@ -108,29 +117,133 @@ enum MR_FUSION_ADAPTER_TYPE {
 
 struct RAID_CONTEXT {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-       u8      nseg:4;
-       u8      Type:4;
+       u8 nseg:4;
+       u8 type:4;
 #else
-       u8      Type:4;
-       u8      nseg:4;
+       u8 type:4;
+       u8 nseg:4;
 #endif
-       u8      resvd0;
-       __le16  timeoutValue;
-       u8      regLockFlags;
-       u8      resvd1;
-       __le16  VirtualDiskTgtId;
-       __le64  regLockRowLBA;
-       __le32  regLockLength;
-       __le16  nextLMId;
-       u8      exStatus;
-       u8      status;
-       u8      RAIDFlags;
-       u8      numSGE;
-       __le16  configSeqNum;
-       u8      spanArm;
-       u8      priority;
-       u8      numSGEExt;
-       u8      resvd2;
+       u8 resvd0;
+       __le16 timeout_value;
+       u8 reg_lock_flags;
+       u8 resvd1;
+       __le16 virtual_disk_tgt_id;
+       __le64 reg_lock_row_lba;
+       __le32 reg_lock_length;
+       __le16 next_lmid;
+       u8 ex_status;
+       u8 status;
+       u8 raid_flags;
+       u8 num_sge;
+       __le16 config_seq_num;
+       u8 span_arm;
+       u8 priority;
+       u8 num_sge_ext;
+       u8 resvd2;
+};
+
+/*
+ * Raid Context structure which describes ventura MegaRAID specific
+ * IO Paramenters ,This resides at offset 0x60 where the SGL normally
+ * starts in MPT IO Frames
+ */
+struct RAID_CONTEXT_G35 {
+       #define RAID_CONTEXT_NSEG_MASK  0x00F0
+       #define RAID_CONTEXT_NSEG_SHIFT 4
+       #define RAID_CONTEXT_TYPE_MASK  0x000F
+       #define RAID_CONTEXT_TYPE_SHIFT 0
+       u16             nseg_type;
+       u16 timeout_value; /* 0x02 -0x03 */
+       u16             routing_flags;  // 0x04 -0x05 routing flags
+       u16 virtual_disk_tgt_id;   /* 0x06 -0x07 */
+       u64 reg_lock_row_lba;      /* 0x08 - 0x0F */
+       u32 reg_lock_length;      /* 0x10 - 0x13 */
+       union {
+               u16 next_lmid; /* 0x14 - 0x15 */
+               u16     peer_smid;      /* used for the raid 1/10 fp writes */
+       } smid;
+       u8 ex_status;       /* 0x16 : OUT */
+       u8 status;          /* 0x17 status */
+       u8 raid_flags;          /* 0x18 resvd[7:6], ioSubType[5:4],
+                                * resvd[3:1], preferredCpu[0]
+                                */
+       u8 span_arm;            /* 0x1C span[7:5], arm[4:0] */
+       u16     config_seq_num;           /* 0x1A -0x1B */
+       union {
+               /*
+                * Bit format:
+                *       ---------------------------------
+                *       | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+                *       ---------------------------------
+                * Byte0 |    numSGE[7]- numSGE[0]       |
+                *       ---------------------------------
+                * Byte1 |SD | resvd     | numSGE 8-11   |
+                *        --------------------------------
+                */
+               #define NUM_SGE_MASK_LOWER      0xFF
+               #define NUM_SGE_MASK_UPPER      0x0F
+               #define NUM_SGE_SHIFT_UPPER     8
+               #define STREAM_DETECT_SHIFT     7
+               #define STREAM_DETECT_MASK      0x80
+               struct {
+#if   defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
+                       u16 stream_detected:1;
+                       u16 reserved:3;
+                       u16 num_sge:12;
+#else
+                       u16 num_sge:12;
+                       u16 reserved:3;
+                       u16 stream_detected:1;
+#endif
+               } bits;
+               u8 bytes[2];
+       } u;
+       u8 resvd2[2];          /* 0x1E-0x1F */
+};
+
+#define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT     1
+#define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT     2
+#define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT     3
+#define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT     4
+#define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT     5
+#define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT      6
+#define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT     7
+#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT  8
+#define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK   0x0F00
+#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT       12
+#define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK        0xF000
+
+static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
+                              u16 sge_count)
+{
+       rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
+       rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
+                                                       & NUM_SGE_MASK_UPPER);
+}
+
+static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
+{
+       u16 sge_count;
+
+       sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
+                       << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
+       return sge_count;
+}
+
+#define SET_STREAM_DETECTED(rctx_g35) \
+       (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
+
+#define CLEAR_STREAM_DETECTED(rctx_g35) \
+       (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
+
+static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
+{
+       return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
+}
+
+union RAID_CONTEXT_UNION {
+       struct RAID_CONTEXT raid_context;
+       struct RAID_CONTEXT_G35 raid_context_g35;
 };
 
 #define RAID_CTX_SPANARM_ARM_SHIFT     (0)
@@ -139,6 +252,14 @@ struct RAID_CONTEXT {
 #define RAID_CTX_SPANARM_SPAN_SHIFT    (5)
 #define RAID_CTX_SPANARM_SPAN_MASK     (0xE0)
 
+/* number of bits per index in U32 TrackStream */
+#define BITS_PER_INDEX_STREAM          4
+#define INVALID_STREAM_NUM              16
+#define MR_STREAM_BITMAP               0x76543210
+#define STREAM_MASK                    ((1 << BITS_PER_INDEX_STREAM) - 1)
+#define ZERO_LAST_STREAM               0x0fffffff
+#define MAX_STREAMS_TRACKED            8
+
 /*
  * define region lock types
  */
@@ -175,6 +296,8 @@ enum REGION_TYPE {
 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG          (0x0200)
 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD           (0x0100)
 #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP             (0x0004)
+/* EEDP escape mode */
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE  (0x0040)
 #define MPI2_FUNCTION_SCSI_IO_REQUEST               (0x00) /* SCSI IO */
 #define MPI2_FUNCTION_SCSI_TASK_MGMT                (0x01)
 #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY       (0x03)
@@ -407,7 +530,7 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
        u8                      LUN[8];                         /* 0x34 */
        __le32                  Control;                        /* 0x3C */
        union MPI2_SCSI_IO_CDB_UNION  CDB;                      /* 0x40 */
-       struct RAID_CONTEXT     RaidContext;                    /* 0x60 */
+       union RAID_CONTEXT_UNION RaidContext;  /* 0x60 */
        union MPI2_SGE_IO_UNION       SGL;                      /* 0x80 */
 };
 
@@ -563,7 +686,7 @@ struct MPI2_IOC_INIT_REQUEST {
        __le16                  HeaderVersion;                  /* 0x0E */
        u32                     Reserved5;                      /* 0x10 */
        __le16                  Reserved6;                      /* 0x14 */
-       u8                      Reserved7;                      /* 0x16 */
+       u8                      HostPageSize;                   /* 0x16 */
        u8                      HostMSIxVectors;                /* 0x17 */
        __le16                  Reserved8;                      /* 0x18 */
        __le16                  SystemRequestFrameSize;         /* 0x1A */
@@ -579,6 +702,7 @@ struct MPI2_IOC_INIT_REQUEST {
 
 /* mrpriv defines */
 #define MR_PD_INVALID 0xFFFF
+#define MR_DEVHANDLE_INVALID 0xFFFF
 #define MAX_SPAN_DEPTH 8
 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
@@ -586,16 +710,20 @@ struct MPI2_IOC_INIT_REQUEST {
 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
 #define MAX_LOGICAL_DRIVES 64
 #define MAX_LOGICAL_DRIVES_EXT 256
+#define MAX_LOGICAL_DRIVES_DYN 512
 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
 #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
 #define MAX_ARRAYS 128
 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
 #define MAX_ARRAYS_EXT 256
 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
+#define MAX_API_ARRAYS_DYN 512
 #define MAX_PHYSICAL_DEVICES 256
 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
+#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
 #define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO      0x0200e102
+#define MR_DCMD_DRV_GET_TARGET_PROP         0x0200e103
 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB alloc*/
 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111   0x03200200
 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS       0x03150200
@@ -603,7 +731,7 @@ struct MPI2_IOC_INIT_REQUEST {
 struct MR_DEV_HANDLE_INFO {
        __le16  curDevHdl;
        u8      validHandles;
-       u8      reserved;
+       u8      interfaceType;
        __le16  devHandle[2];
 };
 
@@ -640,10 +768,56 @@ struct MR_SPAN_BLOCK_INFO {
        struct MR_SPAN_INFO block_span_info;
 };
 
+#define MR_RAID_CTX_CPUSEL_0           0
+#define MR_RAID_CTX_CPUSEL_1           1
+#define MR_RAID_CTX_CPUSEL_2           2
+#define MR_RAID_CTX_CPUSEL_3           3
+#define MR_RAID_CTX_CPUSEL_FCFS                0xF
+
+struct MR_CPU_AFFINITY_MASK {
+       union {
+               struct {
+#ifndef MFI_BIG_ENDIAN
+               u8 hw_path:1;
+               u8 cpu0:1;
+               u8 cpu1:1;
+               u8 cpu2:1;
+               u8 cpu3:1;
+               u8 reserved:3;
+#else
+               u8 reserved:3;
+               u8 cpu3:1;
+               u8 cpu2:1;
+               u8 cpu1:1;
+               u8 cpu0:1;
+               u8 hw_path:1;
+#endif
+               };
+               u8 core_mask;
+       };
+};
+
+struct MR_IO_AFFINITY {
+       union {
+               struct {
+                       struct MR_CPU_AFFINITY_MASK pdRead;
+                       struct MR_CPU_AFFINITY_MASK pdWrite;
+                       struct MR_CPU_AFFINITY_MASK ldRead;
+                       struct MR_CPU_AFFINITY_MASK ldWrite;
+                       };
+               u32 word;
+               };
+       u8 maxCores;    /* Total cores + HW Path in ROC */
+       u8 reserved[3];
+};
+
 struct MR_LD_RAID {
        struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-               u32     reserved4:5;
+               u32 reserved4:2;
+               u32 fp_cache_bypass_capable:1;
+               u32 fp_rmw_capable:1;
+               u32 disable_coalescing:1;
                u32     fpBypassRegionLock:1;
                u32     tmCapable:1;
                u32     fpNonRWCapable:1;
@@ -654,11 +828,13 @@ struct MR_LD_RAID {
                u32     encryptionType:8;
                u32     pdPiMode:4;
                u32     ldPiMode:4;
-               u32     reserved5:3;
+               u32 reserved5:2;
+               u32 ra_capable:1;
                u32     fpCapable:1;
 #else
                u32     fpCapable:1;
-               u32     reserved5:3;
+               u32 ra_capable:1;
+               u32 reserved5:2;
                u32     ldPiMode:4;
                u32     pdPiMode:4;
                u32     encryptionType:8;
@@ -669,7 +845,10 @@ struct MR_LD_RAID {
                u32     fpNonRWCapable:1;
                u32     tmCapable:1;
                u32     fpBypassRegionLock:1;
-               u32     reserved4:5;
+               u32 disable_coalescing:1;
+               u32 fp_rmw_capable:1;
+               u32 fp_cache_bypass_capable:1;
+               u32 reserved4:2;
 #endif
        } capability;
        __le32     reserved6;
@@ -696,7 +875,36 @@ struct MR_LD_RAID {
 
        u8      LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
        u8      fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
-       u8      reserved3[0x80-0x2D]; /* 0x2D */
+       /* Ox2D This LD accept priority boost of this type */
+       u8 ld_accept_priority_type;
+       u8 reserved2[2];                /* 0x2E - 0x2F */
+       /* 0x30 - 0x33, Logical block size for the LD */
+       u32 logical_block_length;
+       struct {
+#ifndef MFI_BIG_ENDIAN
+       /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+       u32 ld_pi_exp:4;
+       /* 0x34, LOGICAL BLOCKS PER PHYSICAL
+        *  BLOCK EXPONENT from READ CAPACITY 16
+        */
+       u32 ld_logical_block_exp:4;
+       u32 reserved1:24;           /* 0x34 */
+#else
+       u32 reserved1:24;           /* 0x34 */
+       /* 0x34, LOGICAL BLOCKS PER PHYSICAL
+        *  BLOCK EXPONENT from READ CAPACITY 16
+        */
+       u32 ld_logical_block_exp:4;
+       /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
+       u32 ld_pi_exp:4;
+#endif
+       };                               /* 0x34 - 0x37 */
+        /* 0x38 - 0x3f, This will determine which
+         *  core will process LD IO and PD IO.
+         */
+       struct MR_IO_AFFINITY cpuAffinity;
+     /* Bit definiations are specified by MR_IO_AFFINITY */
+       u8 reserved3[0x80 - 0x40];    /* 0x40 - 0x7f */
 };
 
 struct MR_LD_SPAN_MAP {
@@ -735,6 +943,7 @@ struct IO_REQUEST_INFO {
        u16 ldTgtId;
        u8 isRead;
        __le16 devHandle;
+       u8 pd_interface;
        u64 pdBlock;
        u8 fpOkForIo;
        u8 IoforUnevenSpan;
@@ -743,6 +952,8 @@ struct IO_REQUEST_INFO {
        u64 start_row;
        u8  span_arm;   /* span[7:5], arm[4:0] */
        u8  pd_after_lb;
+       u16 r1_alt_dev_handle; /* raid 1/10 only */
+       bool ra_capable;
 };
 
 struct MR_LD_TARGET_SYNC {
@@ -751,6 +962,91 @@ struct MR_LD_TARGET_SYNC {
        __le16 seqNum;
 };
 
+/*
+ * RAID Map descriptor Types.
+ * Each element should uniquely idetify one data structure in the RAID map
+ */
+enum MR_RAID_MAP_DESC_TYPE {
+       /* MR_DEV_HANDLE_INFO data */
+       RAID_MAP_DESC_TYPE_DEVHDL_INFO    = 0x0,
+       /* target to Ld num Index map */
+       RAID_MAP_DESC_TYPE_TGTID_INFO     = 0x1,
+       /* MR_ARRAY_INFO data */
+       RAID_MAP_DESC_TYPE_ARRAY_INFO     = 0x2,
+       /* MR_LD_SPAN_MAP data */
+       RAID_MAP_DESC_TYPE_SPAN_INFO      = 0x3,
+       RAID_MAP_DESC_TYPE_COUNT,
+};
+
+/*
+ * This table defines the offset, size and num elements  of each descriptor
+ * type in the RAID Map buffer
+ */
+struct MR_RAID_MAP_DESC_TABLE {
+       /* Raid map descriptor type */
+       u32 raid_map_desc_type;
+       /* Offset into the RAID map buffer where
+        *  descriptor data is saved
+        */
+       u32 raid_map_desc_offset;
+       /* total size of the
+        * descriptor buffer
+        */
+       u32 raid_map_desc_buffer_size;
+       /* Number of elements contained in the
+        *  descriptor buffer
+        */
+       u32 raid_map_desc_elements;
+};
+
+/*
+ * Dynamic Raid Map Structure.
+ */
+struct MR_FW_RAID_MAP_DYNAMIC {
+       u32 raid_map_size;   /* total size of RAID Map structure */
+       u32 desc_table_offset;/* Offset of desc table into RAID map*/
+       u32 desc_table_size;  /* Total Size of desc table */
+       /* Total Number of elements in the desc table */
+       u32 desc_table_num_elements;
+       u64     reserved1;
+       u32     reserved2[3];   /*future use */
+       /* timeout value used by driver in FP IOs */
+       u8 fp_pd_io_timeout_sec;
+       u8 reserved3[3];
+       /* when this seqNum increments, driver needs to
+        *  release RMW buffers asap
+        */
+       u32 rmw_fp_seq_num;
+       u16 ld_count;   /* count of lds. */
+       u16 ar_count;   /* count of arrays */
+       u16 span_count; /* count of spans */
+       u16 reserved4[3];
+/*
+ * The below structure of pointers is only to be used by the driver.
+ * This is added in the ,API to reduce the amount of code changes
+ * needed in the driver to support dynamic RAID map Firmware should
+ * not update these pointers while preparing the raid map
+ */
+       union {
+               struct {
+                       struct MR_DEV_HANDLE_INFO  *dev_hndl_info;
+                       u16 *ld_tgt_id_to_ld;
+                       struct MR_ARRAY_INFO *ar_map_info;
+                       struct MR_LD_SPAN_MAP *ld_span_map;
+                       };
+               u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
+               };
+/*
+ * RAID Map descriptor table defines the layout of data in the RAID Map.
+ * The size of the descriptor table itself could change.
+ */
+       /* Variable Size descriptor Table. */
+       struct MR_RAID_MAP_DESC_TABLE
+                       raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
+       /* Variable Size buffer containing all data */
+       u32 raid_map_desc_data[1];
+}; /* Dynamicaly sized RAID MAp structure */
+
 #define IEEE_SGE_FLAGS_ADDR_MASK            (0x03)
 #define IEEE_SGE_FLAGS_SYSTEM_ADDR          (0x00)
 #define IEEE_SGE_FLAGS_IOCDDR_ADDR          (0x01)
@@ -759,6 +1055,16 @@ struct MR_LD_TARGET_SYNC {
 #define IEEE_SGE_FLAGS_CHAIN_ELEMENT        (0x80)
 #define IEEE_SGE_FLAGS_END_OF_LIST          (0x40)
 
+#define MPI2_SGE_FLAGS_SHIFT                (0x02)
+#define IEEE_SGE_FLAGS_FORMAT_MASK          (0xC0)
+#define IEEE_SGE_FLAGS_FORMAT_IEEE          (0x00)
+#define IEEE_SGE_FLAGS_FORMAT_NVME          (0x02)
+
+#define MPI26_IEEE_SGE_FLAGS_NSF_MASK           (0x1C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE       (0x00)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP       (0x08)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL       (0x10)
+
 struct megasas_register_set;
 struct megasas_instance;
 
@@ -795,6 +1101,10 @@ struct megasas_cmd_fusion {
        u32 index;
        u8 pd_r1_lb;
        struct completion done;
+       u8 pd_interface;
+       u16 r1_alt_dev_handle; /* raid 1/10 only*/
+       bool cmd_completed;  /* raid 1/10 fp writes status holder */
+
 };
 
 struct LD_LOAD_BALANCE_INFO {
@@ -856,9 +1166,10 @@ struct MR_DRV_RAID_MAP {
        __le16                 spanCount;
        __le16                 reserve3;
 
-       struct MR_DEV_HANDLE_INFO  devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
-       u8                  ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
-       struct MR_ARRAY_INFO       arMapInfo[MAX_API_ARRAYS_EXT];
+       struct MR_DEV_HANDLE_INFO
+               devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
+       u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
+       struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
        struct MR_LD_SPAN_MAP      ldSpanMap[1];
 
 };
@@ -870,7 +1181,7 @@ struct MR_DRV_RAID_MAP {
 struct MR_DRV_RAID_MAP_ALL {
 
        struct MR_DRV_RAID_MAP raidMap;
-       struct MR_LD_SPAN_MAP      ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+       struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
 } __packed;
 
 
@@ -919,7 +1230,8 @@ struct MR_PD_CFG_SEQ {
                u8     reserved:7;
 #endif
        } capability;
-       u8  reserved[3];
+       u8  reserved;
+       u16 pd_target_id;
 } __packed;
 
 struct MR_PD_CFG_SEQ_NUM_SYNC {
@@ -928,6 +1240,30 @@ struct MR_PD_CFG_SEQ_NUM_SYNC {
        struct MR_PD_CFG_SEQ seq[1];
 } __packed;
 
+/* stream detection */
+struct STREAM_DETECT {
+       u64 next_seq_lba; /* next LBA to match sequential access */
+       struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
+       struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
+       u32 count_cmds_in_stream; /* count of host commands in this stream */
+       u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
+       u8 is_read; /* SCSI OpCode for this stream */
+       u8 group_depth; /* total number of host commands in group */
+       /* TRUE if cannot add any more commands to this group */
+       bool group_flush;
+       u8 reserved[7]; /* pad to 64-bit alignment */
+};
+
+struct LD_STREAM_DETECT {
+       bool write_back; /* TRUE if WB, FALSE if WT */
+       bool fp_write_enabled;
+       bool members_ssds;
+       bool fp_cache_bypass_capable;
+       u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
+       /* this is the array of stream detect structures (one per stream) */
+       struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
+};
+
 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
        u64 RDPQBaseAddress;
        u32 Reserved1;
@@ -965,7 +1301,7 @@ struct fusion_context {
        u8      chain_offset_io_request;
        u8      chain_offset_mfi_pthru;
 
-       struct MR_FW_RAID_MAP_ALL *ld_map[2];
+       struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
        dma_addr_t ld_map_phys[2];
 
        /*Non dma-able memory. Driver local copy.*/
@@ -973,14 +1309,18 @@ struct fusion_context {
 
        u32 max_map_sz;
        u32 current_map_sz;
+       u32 old_map_sz;
+       u32 new_map_sz;
        u32 drv_map_sz;
        u32 drv_map_pages;
        struct MR_PD_CFG_SEQ_NUM_SYNC   *pd_seq_sync[JBOD_MAPS_COUNT];
        dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
        u8 fast_path_io;
-       struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+       struct LD_LOAD_BALANCE_INFO *load_balance_info;
+       u32 load_balance_info_pages;
        LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
        u8 adapter_type;
+       struct LD_STREAM_DETECT **stream_detect_by_ld;
 };
 
 union desc_value {
index 8bae305..af4be40 100644 (file)
@@ -624,6 +624,8 @@ typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
 
 /* defines for ReasonCode field */
 #define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER     (0x00)
+#define MPI26_EVENT_ACTIVE_CABLE_PRESENT                (0x01)
+#define MPI26_EVENT_ACTIVE_CABLE_DEGRADED               (0x02)
 
 /*Hard Reset Received Event data */
 
index f00ef88..a3fe1fb 100644 (file)
@@ -1040,6 +1040,25 @@ _base_interrupt(int irq, void *bus_id)
                    reply_q->reply_post_free[reply_q->reply_post_host_index].
                    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
                completed_cmds++;
+               /* Update the reply post host index after continuously
+                * processing the threshold number of Reply Descriptors.
+                * So that FW can find enough entries to post the Reply
+                * Descriptors in the reply descriptor post queue.
+                */
+               if (completed_cmds > ioc->hba_queue_depth/3) {
+                       if (ioc->combined_reply_queue) {
+                               writel(reply_q->reply_post_host_index |
+                                               ((msix_index  & 7) <<
+                                                MPI2_RPHI_MSIX_INDEX_SHIFT),
+                                   ioc->replyPostRegisterIndex[msix_index/8]);
+                       } else {
+                               writel(reply_q->reply_post_host_index |
+                                               (msix_index <<
+                                                MPI2_RPHI_MSIX_INDEX_SHIFT),
+                                               &ioc->chip->ReplyPostHostIndex);
+                       }
+                       completed_cmds = 1;
+               }
                if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
                        goto out;
                if (!reply_q->reply_post_host_index)
@@ -5522,6 +5541,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
                goto out_free_resources;
 
        ioc->non_operational_loop = 0;
+       ioc->got_task_abort_from_ioctl = 0;
        return 0;
 
  out_free_resources:
index dcb33f4..4ab634f 100644 (file)
@@ -73,9 +73,9 @@
 #define MPT3SAS_DRIVER_NAME            "mpt3sas"
 #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
 #define MPT3SAS_DESCRIPTION    "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION         "14.101.00.00"
-#define MPT3SAS_MAJOR_VERSION          14
-#define MPT3SAS_MINOR_VERSION          101
+#define MPT3SAS_DRIVER_VERSION         "15.100.00.00"
+#define MPT3SAS_MAJOR_VERSION          15
+#define MPT3SAS_MINOR_VERSION          100
 #define MPT3SAS_BUILD_VERSION          0
 #define MPT3SAS_RELEASE_VERSION        00
 
@@ -1000,6 +1000,7 @@ struct MPT3SAS_ADAPTER {
        u8              broadcast_aen_busy;
        u16             broadcast_aen_pending;
        u8              shost_recovery;
+       u8              got_task_abort_from_ioctl;
 
        struct mutex    reset_in_progress_mutex;
        spinlock_t      ioc_reset_in_progress_lock;
index 95f0f24..02fe1c4 100644 (file)
@@ -826,16 +826,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                        "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
                        ioc->name,
                    le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
-
+               ioc->got_task_abort_from_ioctl = 1;
                if (tm_request->TaskType ==
                    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
                    tm_request->TaskType ==
                    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
                        if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
                                mpt3sas_base_free_smid(ioc, smid);
+                               ioc->got_task_abort_from_ioctl = 0;
                                goto out;
                        }
                }
+               ioc->got_task_abort_from_ioctl = 0;
 
                if (test_bit(device_handle, ioc->device_remove_in_progress)) {
                        dtmprintk(ioc, pr_info(MPT3SAS_FMT
index c6d5505..46e866c 100644 (file)
@@ -1074,6 +1074,26 @@ _scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
        return ioc->scsi_lookup[smid - 1].scmd;
 }
 
+/**
+ * __scsih_scsi_lookup_get_clear - returns scmd entry without
+ *                                             holding any lock.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will dereference the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
+               u16 smid)
+{
+       struct scsi_cmnd *scmd = NULL;
+
+       swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
+
+       return scmd;
+}
+
 /**
  * _scsih_scsi_lookup_get_clear - returns scmd entry
  * @ioc: per adapter object
@@ -1089,8 +1109,7 @@ _scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
        struct scsi_cmnd *scmd;
 
        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
-       scmd = ioc->scsi_lookup[smid - 1].scmd;
-       ioc->scsi_lookup[smid - 1].scmd = NULL;
+       scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
        return scmd;
@@ -4661,7 +4680,13 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        unsigned int sector_sz;
 
        mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
-       scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+
+       if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
+                       ioc->got_task_abort_from_ioctl)
+               scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+       else
+               scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
+
        if (scmd == NULL)
                return 1;
 
@@ -8044,15 +8069,24 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
        case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
                ActiveCableEventData =
                    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
-               if (ActiveCableEventData->ReasonCode ==
-                               MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
-                       pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
-                           ioc->name, ActiveCableEventData->ReceptacleID);
-                       pr_info("cannot be powered and devices connected to this active cable");
-                       pr_info("will not be seen. This active cable");
-                       pr_info("requires %d mW of power",
-                           ActiveCableEventData->ActiveCablePowerRequirement);
+               switch (ActiveCableEventData->ReasonCode) {
+               case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
+                       pr_notice(MPT3SAS_FMT "Receptacle ID %d: This active cable"
+                                 " requires %d mW of power\n", ioc->name,
+                            ActiveCableEventData->ReceptacleID,
+                            ActiveCableEventData->ActiveCablePowerRequirement);
+                       pr_notice(MPT3SAS_FMT "Receptacle ID %d: Devices connected"
+                                 " to this active cable will not be seen\n",
+                            ioc->name, ActiveCableEventData->ReceptacleID);
+                       break;
+
+               case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
+                       pr_notice(MPT3SAS_FMT "ReceptacleID %d: This cable",
+                               ioc->name, ActiveCableEventData->ReceptacleID);
+                       pr_notice(" is not running at an optimal speed(12 Gb/s)\n");
+                       break;
                }
+
                break;
 
        default: /* ignore the rest */
index 3928507..247df5e 100644 (file)
@@ -2225,15 +2225,12 @@ static struct scsi_host_template mvumi_template = {
        .name = "Marvell Storage Controller",
        .slave_configure = mvumi_slave_configure,
        .queuecommand = mvumi_queue_command,
+       .eh_timed_out = mvumi_timed_out,
        .eh_host_reset_handler = mvumi_host_reset,
        .bios_param = mvumi_bios_param,
        .this_id = -1,
 };
 
-static struct scsi_transport_template mvumi_transport_template = {
-       .eh_timed_out = mvumi_timed_out,
-};
-
 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
 {
        void *base = NULL;
@@ -2451,7 +2448,6 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
        host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
        host->max_id = mhba->max_target_id;
        host->max_cmd_len = MAX_COMMAND_SIZE;
-       host->transportt = &mvumi_transport_template;
 
        ret = scsi_add_host(host, &mhba->pdev->dev);
        if (ret) {
index 9fc675f..417368c 100644 (file)
@@ -888,7 +888,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
        u32 i = 0, j = 0;
        u32 number_of_intr;
        int flag = 0;
-       u32 max_entry;
        int rc;
        static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3];
 
@@ -900,18 +899,14 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
                flag &= ~IRQF_SHARED;
        }
 
-       max_entry = sizeof(pm8001_ha->msix_entries) /
-               sizeof(pm8001_ha->msix_entries[0]);
-       for (i = 0; i < max_entry ; i++)
-               pm8001_ha->msix_entries[i].entry = i;
-       rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries,
-               number_of_intr);
-       pm8001_ha->number_of_intr = number_of_intr;
-       if (rc)
+       rc = pci_alloc_irq_vectors(pm8001_ha->pdev, number_of_intr,
+                       number_of_intr, PCI_IRQ_MSIX);
+       if (rc < 0)
                return rc;
+       pm8001_ha->number_of_intr = number_of_intr;
 
        PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
-               "pci_enable_msix_exact request ret:%d no of intr %d\n",
+               "pci_alloc_irq_vectors request ret:%d no of intr %d\n",
                                rc, pm8001_ha->number_of_intr));
 
        for (i = 0; i < number_of_intr; i++) {
@@ -920,15 +915,15 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->irq_vector[i].irq_id = i;
                pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
 
-               rc = request_irq(pm8001_ha->msix_entries[i].vector,
+               rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i),
                        pm8001_interrupt_handler_msix, flag,
                        intr_drvname[i], &(pm8001_ha->irq_vector[i]));
                if (rc) {
                        for (j = 0; j < i; j++) {
-                               free_irq(pm8001_ha->msix_entries[j].vector,
+                               free_irq(pci_irq_vector(pm8001_ha->pdev, i),
                                        &(pm8001_ha->irq_vector[i]));
                        }
-                       pci_disable_msix(pm8001_ha->pdev);
+                       pci_free_irq_vectors(pm8001_ha->pdev);
                        break;
                }
        }
@@ -1102,11 +1097,10 @@ static void pm8001_pci_remove(struct pci_dev *pdev)
 
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               synchronize_irq(pm8001_ha->msix_entries[i].vector);
+               synchronize_irq(pci_irq_vector(pdev, i));
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector,
-                               &(pm8001_ha->irq_vector[i]));
-       pci_disable_msix(pdev);
+               free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
+       pci_free_irq_vectors(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
 #endif
@@ -1152,11 +1146,10 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
 #ifdef PM8001_USE_MSIX
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               synchronize_irq(pm8001_ha->msix_entries[i].vector);
+               synchronize_irq(pci_irq_vector(pdev, i));
        for (i = 0; i < pm8001_ha->number_of_intr; i++)
-               free_irq(pm8001_ha->msix_entries[i].vector,
-                               &(pm8001_ha->irq_vector[i]));
-       pci_disable_msix(pdev);
+               free_irq(pci_irq_vector(pdev, i), &pm8001_ha->irq_vector[i]);
+       pci_free_irq_vectors(pdev);
 #else
        free_irq(pm8001_ha->irq, sha);
 #endif
index 6628cc3..e81a8fa 100644 (file)
@@ -521,8 +521,6 @@ struct pm8001_hba_info {
        struct pm8001_device    *devices;
        struct pm8001_ccb_info  *ccb_info;
 #ifdef PM8001_USE_MSIX
-       struct msix_entry       msix_entries[PM8001_MAX_MSIX_VEC];
-                                       /*for msi-x interrupt*/
        int                     number_of_intr;/*will be used in remove()*/
 #endif
 #ifdef PM8001_USE_TASKLET
index 337982c..49e70a3 100644 (file)
@@ -4587,16 +4587,14 @@ static void pmcraid_tasklet_function(unsigned long instance)
 static
 void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
 {
+       struct pci_dev *pdev = pinstance->pdev;
        int i;
 
        for (i = 0; i < pinstance->num_hrrq; i++)
-               free_irq(pinstance->hrrq_vector[i].vector,
-                        &(pinstance->hrrq_vector[i]));
+               free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
 
-       if (pinstance->interrupt_mode) {
-               pci_disable_msix(pinstance->pdev);
-               pinstance->interrupt_mode = 0;
-       }
+       pinstance->interrupt_mode = 0;
+       pci_free_irq_vectors(pdev);
 }
 
 /**
@@ -4609,60 +4607,52 @@ void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
 static int
 pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
 {
-       int rc;
        struct pci_dev *pdev = pinstance->pdev;
+       unsigned int irq_flag = PCI_IRQ_LEGACY, flag;
+       int num_hrrq, rc, i;
+       irq_handler_t isr;
 
-       if ((pmcraid_enable_msix) &&
-               (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) {
-               int num_hrrq = PMCRAID_NUM_MSIX_VECTORS;
-               struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS];
-               int i;
-               for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
-                       entries[i].entry = i;
-
-               num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
-               if (num_hrrq < 0)
-                       goto pmcraid_isr_legacy;
-
-               for (i = 0; i < num_hrrq; i++) {
-                       pinstance->hrrq_vector[i].hrrq_id = i;
-                       pinstance->hrrq_vector[i].drv_inst = pinstance;
-                       pinstance->hrrq_vector[i].vector = entries[i].vector;
-                       rc = request_irq(pinstance->hrrq_vector[i].vector,
-                                       pmcraid_isr_msix, 0,
-                                       PMCRAID_DRIVER_NAME,
-                                       &(pinstance->hrrq_vector[i]));
-
-                       if (rc) {
-                               int j;
-                               for (j = 0; j < i; j++)
-                                       free_irq(entries[j].vector,
-                                                &(pinstance->hrrq_vector[j]));
-                               pci_disable_msix(pdev);
-                               goto pmcraid_isr_legacy;
-                       }
-               }
+       if (pmcraid_enable_msix)
+               irq_flag |= PCI_IRQ_MSIX;
 
-               pinstance->num_hrrq = num_hrrq;
+       num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS,
+                       irq_flag);
+       if (num_hrrq < 0)
+               return num_hrrq;
+
+       if (pdev->msix_enabled) {
+               flag = 0;
+               isr = pmcraid_isr_msix;
+       } else {
+               flag = IRQF_SHARED;
+               isr = pmcraid_isr;
+       }
+
+       for (i = 0; i < num_hrrq; i++) {
+               struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i];
+
+               vec->hrrq_id = i;
+               vec->drv_inst = pinstance;
+               rc = request_irq(pci_irq_vector(pdev, i), isr, flag,
+                               PMCRAID_DRIVER_NAME, vec);
+               if (rc)
+                       goto out_unwind;
+       }
+
+       pinstance->num_hrrq = num_hrrq;
+       if (pdev->msix_enabled) {
                pinstance->interrupt_mode = 1;
                iowrite32(DOORBELL_INTR_MODE_MSIX,
                          pinstance->int_regs.host_ioa_interrupt_reg);
                ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
-               goto pmcraid_isr_out;
        }
 
-pmcraid_isr_legacy:
-       /* If MSI-X registration failed fallback to legacy mode, where
-        * only one hrrq entry will be used
-        */
-       pinstance->hrrq_vector[0].hrrq_id = 0;
-       pinstance->hrrq_vector[0].drv_inst = pinstance;
-       pinstance->hrrq_vector[0].vector = pdev->irq;
-       pinstance->num_hrrq = 1;
-
-       rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
-                        PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
-pmcraid_isr_out:
+       return 0;
+
+out_unwind:
+       while (--i > 0)
+               free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
+       pci_free_irq_vectors(pdev);
        return rc;
 }
 
index e1d150f..568b18a 100644 (file)
@@ -628,7 +628,6 @@ struct pmcraid_interrupts {
 /* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
 struct pmcraid_isr_param {
        struct pmcraid_instance *drv_inst;
-       u16 vector;                     /* allocated msi-x vector */
        u8 hrrq_id;                     /* hrrq entry index */
 };
 
index 2bdedb9..8fd28b0 100644 (file)
@@ -52,7 +52,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
        vaf.va = &va;
 
        if (!(qedi_dbg_log & QEDI_LOG_WARN))
-               return;
+               goto ret;
 
        if (likely(qedi) && likely(qedi->pdev))
                pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
@@ -60,6 +60,7 @@ qedi_dbg_warn(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
        else
                pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
 
+ret:
        va_end(va);
 }
 
@@ -80,7 +81,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
        vaf.va = &va;
 
        if (!(qedi_dbg_log & QEDI_LOG_NOTICE))
-               return;
+               goto ret;
 
        if (likely(qedi) && likely(qedi->pdev))
                pr_notice("[%s]:[%s:%d]:%d: %pV",
@@ -89,6 +90,7 @@ qedi_dbg_notice(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
        else
                pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
 
+ret:
        va_end(va);
 }
 
@@ -109,7 +111,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
        vaf.va = &va;
 
        if (!(qedi_dbg_log & level))
-               return;
+               goto ret;
 
        if (likely(qedi) && likely(qedi->pdev))
                pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&qedi->pdev->dev),
@@ -117,6 +119,7 @@ qedi_dbg_info(struct qedi_dbg_ctx *qedi, const char *func, u32 line,
        else
                pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
 
+ret:
        va_end(va);
 }
 
index d6a2054..b9f79d3 100644 (file)
@@ -48,6 +48,7 @@ struct scsi_host_template qedi_host_template = {
        .name = "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver",
        .proc_name = QEDI_MODULE_NAME,
        .queuecommand = iscsi_queuecommand,
+       .eh_timed_out = iscsi_eh_cmd_timed_out,
        .eh_abort_handler = iscsi_eh_abort,
        .eh_device_reset_handler = iscsi_eh_device_reset,
        .eh_target_reset_handler = iscsi_eh_recover_target,
@@ -453,13 +454,9 @@ static int qedi_iscsi_update_conn(struct qedi_ctx *qedi,
        if (rval) {
                rval = -ENXIO;
                QEDI_ERR(&qedi->dbg_ctx, "Could not update connection\n");
-               goto update_conn_err;
        }
 
        kfree(conn_info);
-       rval = 0;
-
-update_conn_err:
        return rval;
 }
 
index 5b1287a..2f14adf 100644 (file)
@@ -2248,7 +2248,7 @@ struct ct_fdmiv2_hba_attr {
                uint32_t num_ports;
                uint8_t fabric_name[WWN_SIZE];
                uint8_t bios_name[32];
-               uint8_t vendor_indentifer[8];
+               uint8_t vendor_identifier[8];
        } a;
 };
 
@@ -2423,7 +2423,7 @@ struct ct_sns_req {
                } rsnn_nn;
 
                struct {
-                       uint8_t hba_indentifier[8];
+                       uint8_t hba_identifier[8];
                } ghat;
 
                struct {
index 94e8a85..ee3df87 100644 (file)
@@ -1939,15 +1939,15 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
        /* Vendor Identifier */
        eiter = entries + size;
        eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
-       snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
+       snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
            "%s", "QLGC");
-       alen = strlen(eiter->a.vendor_indentifer);
+       alen = strlen(eiter->a.vendor_identifier);
        alen += 4 - (alen & 3);
        eiter->len = cpu_to_be16(4 + alen);
        size += 4 + alen;
 
        ql_dbg(ql_dbg_disc, vha, 0x20b1,
-           "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
+           "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
 
        /* Update MS request size. */
        qla2x00_update_ms_fdmi_iocb(vha, size + 16);
index 9281bf4..edc2264 100644 (file)
@@ -2997,14 +2997,14 @@ struct qla_init_msix_entry {
        irq_handler_t handler;
 };
 
-static struct qla_init_msix_entry msix_entries[] = {
+static const struct qla_init_msix_entry msix_entries[] = {
        { "qla2xxx (default)", qla24xx_msix_default },
        { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
        { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
        { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
 };
 
-static struct qla_init_msix_entry qla82xx_msix_entries[] = {
+static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
        { "qla2xxx (default)", qla82xx_msix_default },
        { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
 };
@@ -3078,7 +3078,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                qentry->handle = rsp;
                rsp->msix = qentry;
                scnprintf(qentry->name, sizeof(qentry->name),
-                   msix_entries[i].name);
+                   "%s", msix_entries[i].name);
                if (IS_P3P_TYPE(ha))
                        ret = request_irq(qentry->vector,
                                qla82xx_msix_entries[i].handler,
@@ -3102,7 +3102,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                rsp->msix = qentry;
                qentry->handle = rsp;
                scnprintf(qentry->name, sizeof(qentry->name),
-                   msix_entries[QLA_ATIO_VECTOR].name);
+                   "%s", msix_entries[QLA_ATIO_VECTOR].name);
                qentry->in_use = 1;
                ret = request_irq(qentry->vector,
                        msix_entries[QLA_ATIO_VECTOR].handler,
@@ -3271,7 +3271,7 @@ free_irqs:
 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
        struct qla_msix_entry *msix, int vector_type)
 {
-       struct qla_init_msix_entry *intr = &msix_entries[vector_type];
+       const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
        int ret;
 
index 4066046..d01c90c 100644 (file)
@@ -262,6 +262,7 @@ struct scsi_host_template qla2xxx_driver_template = {
        .name                   = QLA2XXX_DRIVER_NAME,
        .queuecommand           = qla2xxx_queuecommand,
 
+       .eh_timed_out           = fc_eh_timed_out,
        .eh_abort_handler       = qla2xxx_eh_abort,
        .eh_device_reset_handler = qla2xxx_eh_device_reset,
        .eh_target_reset_handler = qla2xxx_eh_target_reset,
index aeebefb..fc23371 100644 (file)
@@ -408,9 +408,6 @@ struct qla4_8xxx_legacy_intr_set {
 };
 
 /* MSI-X Support */
-
-#define QLA_MSIX_DEFAULT       0
-#define QLA_MSIX_RSP_Q         1
 #define QLA_MSIX_ENTRIES       2
 
 /*
index 9fbb33f..ac52150 100644 (file)
@@ -9539,15 +9539,15 @@ exit_host_reset:
  * driver calls the following device driver's callbacks
  *
  * - Fatal Errors - link_reset
- * - Non-Fatal Errors - driver's pci_error_detected() which
+ * - Non-Fatal Errors - driver's error_detected() which
  * returns CAN_RECOVER, NEED_RESET or DISCONNECT.
  *
  * PCI AER driver calls
- * CAN_RECOVER - driver's pci_mmio_enabled(), mmio_enabled
+ * CAN_RECOVER - driver's mmio_enabled(), mmio_enabled()
  *               returns RECOVERED or NEED_RESET if fw_hung
  * NEED_RESET - driver's slot_reset()
  * DISCONNECT - device is dead & cannot recover
- * RECOVERED - driver's pci_resume()
+ * RECOVERED - driver's resume()
  */
 static pci_ers_result_t
 qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
index 03051e1..17249c3 100644 (file)
@@ -125,6 +125,7 @@ static const char *sdebug_version_date = "20160430";
 #define DEF_OPTS   0
 #define DEF_OPT_BLKS 1024
 #define DEF_PHYSBLK_EXP 0
+#define DEF_OPT_XFERLEN_EXP 0
 #define DEF_PTYPE   TYPE_DISK
 #define DEF_REMOVABLE false
 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
@@ -590,6 +591,7 @@ static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
 static int sdebug_opt_blks = DEF_OPT_BLKS;
 static int sdebug_opts = DEF_OPTS;
 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
+static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
 static int sdebug_sector_size = DEF_SECTOR_SIZE;
@@ -1205,7 +1207,11 @@ static int inquiry_vpd_b0(unsigned char *arr)
        memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
 
        /* Optimal transfer length granularity */
-       gran = 1 << sdebug_physblk_exp;
+       if (sdebug_opt_xferlen_exp != 0 &&
+           sdebug_physblk_exp < sdebug_opt_xferlen_exp)
+               gran = 1 << sdebug_opt_xferlen_exp;
+       else
+               gran = 1 << sdebug_physblk_exp;
        put_unaligned_be16(gran, arr + 2);
 
        /* Maximum Transfer Length */
@@ -4161,6 +4167,7 @@ module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
+module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
@@ -4212,6 +4219,7 @@ MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
+MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
index 9e82fa5..f2cafae 100644 (file)
@@ -279,9 +279,7 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
        if (host->eh_deadline != -1 && !host->last_reset)
                host->last_reset = jiffies;
 
-       if (host->transportt->eh_timed_out)
-               rtn = host->transportt->eh_timed_out(scmd);
-       else if (host->hostt->eh_timed_out)
+       if (host->hostt->eh_timed_out)
                rtn = host->hostt->eh_timed_out(scmd);
 
        if (rtn == BLK_EH_NOT_HANDLED) {
index 13dcb9b..2d753c9 100644 (file)
@@ -2055,7 +2055,7 @@ static int fc_vport_match(struct attribute_container *cont,
 
 
 /**
- * fc_timed_out - FC Transport I/O timeout intercept handler
+ * fc_eh_timed_out - FC Transport I/O timeout intercept handler
  * @scmd:      The SCSI command which timed out
  *
  * This routine protects against error handlers getting invoked while a
@@ -2076,8 +2076,8 @@ static int fc_vport_match(struct attribute_container *cont,
  * Notes:
  *     This routine assumes no locks are held on entry.
  */
-static enum blk_eh_timer_return
-fc_timed_out(struct scsi_cmnd *scmd)
+enum blk_eh_timer_return
+fc_eh_timed_out(struct scsi_cmnd *scmd)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
 
@@ -2086,6 +2086,7 @@ fc_timed_out(struct scsi_cmnd *scmd)
 
        return BLK_EH_NOT_HANDLED;
 }
+EXPORT_SYMBOL(fc_eh_timed_out);
 
 /*
  * Called by fc_user_scan to locate an rport on the shost that
@@ -2159,19 +2160,6 @@ fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
        return 0;
 }
 
-static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
-                               int result)
-{
-       struct fc_internal *i = to_fc_internal(shost->transportt);
-       return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
-}
-
-static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
-{
-       struct fc_internal *i = to_fc_internal(shost->transportt);
-       return i->f->it_nexus_response(shost, nexus, result);
-}
-
 struct scsi_transport_template *
 fc_attach_transport(struct fc_function_template *ft)
 {
@@ -2211,14 +2199,8 @@ fc_attach_transport(struct fc_function_template *ft)
        /* Transport uses the shost workq for scsi scanning */
        i->t.create_work_queue = 1;
 
-       i->t.eh_timed_out = fc_timed_out;
-
        i->t.user_scan = fc_user_scan;
 
-       /* target-mode drivers' functions */
-       i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
-       i->t.it_nexus_response = fc_it_nexus_response;
-
        /*
         * Setup SCSI Target Attributes.
         */
index b87a786..3c5d898 100644 (file)
@@ -591,7 +591,7 @@ EXPORT_SYMBOL(srp_reconnect_rport);
  * Note: This function is called from soft-IRQ context and with the request
  * queue lock held.
  */
-static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
 {
        struct scsi_device *sdev = scmd->device;
        struct Scsi_Host *shost = sdev->host;
@@ -603,6 +603,7 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
                i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
                BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
 }
+EXPORT_SYMBOL(srp_timed_out);
 
 static void srp_rport_release(struct device *dev)
 {
@@ -793,19 +794,6 @@ void srp_stop_rport_timers(struct srp_rport *rport)
 }
 EXPORT_SYMBOL_GPL(srp_stop_rport_timers);
 
-static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
-                                int result)
-{
-       struct srp_internal *i = to_srp_internal(shost->transportt);
-       return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
-}
-
-static int srp_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
-{
-       struct srp_internal *i = to_srp_internal(shost->transportt);
-       return i->f->it_nexus_response(shost, nexus, result);
-}
-
 /**
  * srp_attach_transport  -  instantiate SRP transport template
  * @ft:                SRP transport class function template
@@ -820,11 +808,6 @@ srp_attach_transport(struct srp_function_template *ft)
        if (!i)
                return NULL;
 
-       i->t.eh_timed_out = srp_timed_out;
-
-       i->t.tsk_mgmt_response = srp_tsk_mgmt_response;
-       i->t.it_nexus_response = srp_it_nexus_response;
-
        i->t.host_size = sizeof(struct srp_host_attrs);
        i->t.host_attrs.ac.attrs = &i->host_attrs[0];
        i->t.host_attrs.ac.class = &srp_host_class.class;
index 40b4038..cb6e68d 100644 (file)
@@ -703,7 +703,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
 
 /**
  * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
- * @sdp: scsi device to operate one
+ * @sdp: scsi device to operate on
  * @rq: Request to prepare
  *
  * Will issue either UNMAP or WRITE SAME(16) depending on preference
@@ -3226,7 +3226,7 @@ static int sd_probe(struct device *dev)
  *     sd_remove - called whenever a scsi disk (previously recognized by
  *     sd_probe) is detached from the system. It is called (potentially
  *     multiple times) during sd module unload.
- *     @sdp: pointer to mid level scsi device object
+ *     @dev: pointer to device object
  *
  *     Note: this function is invoked from the scsi mid-level.
  *     This function potentially frees up a device name (e.g. /dev/sdc)
index 8ed778d..de0ab5f 100644 (file)
@@ -299,7 +299,6 @@ struct snic {
 
        /* pci related */
        struct pci_dev *pdev;
-       struct msix_entry msix_entry[SNIC_MSIX_INTR_MAX];
        struct snic_msix_entry msix[SNIC_MSIX_INTR_MAX];
 
        /* io related info */
index f552003..d859501 100644 (file)
@@ -93,7 +93,7 @@ snic_free_intr(struct snic *snic)
        /* ONLY interrupt mode MSIX is supported */
        for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
                if (snic->msix[i].requested) {
-                       free_irq(snic->msix_entry[i].vector,
+                       free_irq(pci_irq_vector(snic->pdev, i),
                                 snic->msix[i].devid);
                }
        }
@@ -134,7 +134,7 @@ snic_request_intr(struct snic *snic)
        snic->msix[SNIC_MSIX_ERR_NOTIFY].devid = snic;
 
        for (i = 0; i < ARRAY_SIZE(snic->msix); i++) {
-               ret = request_irq(snic->msix_entry[i].vector,
+               ret = request_irq(pci_irq_vector(snic->pdev, i),
                                  snic->msix[i].isr,
                                  0,
                                  snic->msix[i].devname,
@@ -158,47 +158,37 @@ snic_set_intr_mode(struct snic *snic)
 {
        unsigned int n = ARRAY_SIZE(snic->wq);
        unsigned int m = SNIC_CQ_IO_CMPL_MAX;
-       unsigned int i;
+       unsigned int vecs = n + m + 1;
 
        /*
         * We need n WQs, m CQs, and n+m+1 INTRs
         * (last INTR is used for WQ/CQ errors and notification area
         */
-
        BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) >
                        ARRAY_SIZE(snic->intr));
-       SNIC_BUG_ON(ARRAY_SIZE(snic->msix_entry) < (n + m + 1));
-
-       for (i = 0; i < (n + m + 1); i++)
-               snic->msix_entry[i].entry = i;
-
-       if (snic->wq_count >= n && snic->cq_count >= (n + m)) {
-               if (!pci_enable_msix(snic->pdev,
-                                    snic->msix_entry,
-                                    (n + m + 1))) {
-                       snic->wq_count = n;
-                       snic->cq_count = n + m;
-                       snic->intr_count = n + m + 1;
-                       snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
-
-                       SNIC_ISR_DBG(snic->shost,
-                                    "Using MSI-X Interrupts\n");
-                       svnic_dev_set_intr_mode(snic->vdev,
-                                               VNIC_DEV_INTR_MODE_MSIX);
-
-                       return 0;
-               }
-       }
 
-       svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
+       if (snic->wq_count < n || snic->cq_count < n + m)
+               goto fail;
 
+       if (pci_alloc_irq_vectors(snic->pdev, vecs, vecs, PCI_IRQ_MSIX) < 0)
+               goto fail;
+
+       snic->wq_count = n;
+       snic->cq_count = n + m;
+       snic->intr_count = vecs;
+       snic->err_intr_offset = SNIC_MSIX_ERR_NOTIFY;
+
+       SNIC_ISR_DBG(snic->shost, "Using MSI-X Interrupts\n");
+       svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_MSIX);
+       return 0;
+fail:
+       svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
        return -EINVAL;
 } /* end of snic_set_intr_mode */
 
 void
 snic_clear_intr_mode(struct snic *snic)
 {
-       pci_disable_msix(snic->pdev);
-
+       pci_free_irq_vectors(snic->pdev);
        svnic_dev_set_intr_mode(snic->vdev, VNIC_DEV_INTR_MODE_INTX);
 }
index 05526b7..585e54f 100644 (file)
@@ -136,6 +136,8 @@ struct hv_fc_wwn_packet {
 #define SRB_FLAGS_PORT_DRIVER_RESERVED         0x0F000000
 #define SRB_FLAGS_CLASS_DRIVER_RESERVED                0xF0000000
 
+#define SP_UNTAGGED                    ((unsigned char) ~0)
+#define SRB_SIMPLE_TAG_REQUEST         0x20
 
 /*
  * Platform neutral description of a scsi request -
@@ -375,6 +377,7 @@ enum storvsc_request_type {
 #define SRB_STATUS_SUCCESS     0x01
 #define SRB_STATUS_ABORTED     0x02
 #define SRB_STATUS_ERROR       0x04
+#define SRB_STATUS_DATA_OVERRUN        0x12
 
 #define SRB_STATUS(status) \
        (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
@@ -458,6 +461,15 @@ struct storvsc_device {
         * Max I/O, the device can support.
         */
        u32   max_transfer_bytes;
+       /*
+        * Number of sub-channels we will open.
+        */
+       u16 num_sc;
+       struct vmbus_channel **stor_chns;
+       /*
+        * Mask of CPUs bound to subchannels.
+        */
+       struct cpumask alloced_cpus;
        /* Used for vsc/vsp channel reset process */
        struct storvsc_cmd_request init_request;
        struct storvsc_cmd_request reset_request;
@@ -635,6 +647,11 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
                   (void *)&props,
                   sizeof(struct vmstorage_channel_properties),
                   storvsc_on_channel_callback, new_sc);
+
+       if (new_sc->state == CHANNEL_OPENED_STATE) {
+               stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+               cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+       }
 }
 
 static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
@@ -651,6 +668,7 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
        if (!stor_device)
                return;
 
+       stor_device->num_sc = num_sc;
        request = &stor_device->init_request;
        vstor_packet = &request->vstor_packet;
 
@@ -838,6 +856,25 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
         * support multi-channel.
         */
        max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
+
+       /*
+        * Allocate state to manage the sub-channels.
+        * We allocate an array based on the numbers of possible CPUs
+        * (Hyper-V does not support cpu online/offline).
+        * This Array will be sparseley populated with unique
+        * channels - primary + sub-channels.
+        * We will however populate all the slots to evenly distribute
+        * the load.
+        */
+       stor_device->stor_chns = kzalloc(sizeof(void *) * num_possible_cpus(),
+                                        GFP_KERNEL);
+       if (stor_device->stor_chns == NULL)
+               return -ENOMEM;
+
+       stor_device->stor_chns[device->channel->target_cpu] = device->channel;
+       cpumask_set_cpu(device->channel->target_cpu,
+                       &stor_device->alloced_cpus);
+
        if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
                if (vstor_packet->storage_channel_properties.flags &
                    STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
@@ -888,6 +925,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
 
        switch (SRB_STATUS(vm_srb->srb_status)) {
        case SRB_STATUS_ERROR:
+               /*
+                * Let upper layer deal with error when
+                * sense message is present.
+                */
+
+               if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
+                       break;
                /*
                 * If there is an error; offline the device since all
                 * error recovery strategies would have already been
@@ -953,6 +997,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
        struct scsi_cmnd *scmnd = cmd_request->cmd;
        struct scsi_sense_hdr sense_hdr;
        struct vmscsi_request *vm_srb;
+       u32 data_transfer_length;
        struct Scsi_Host *host;
        u32 payload_sz = cmd_request->payload_sz;
        void *payload = cmd_request->payload;
@@ -960,6 +1005,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
        host = stor_dev->host;
 
        vm_srb = &cmd_request->vstor_packet.vm_srb;
+       data_transfer_length = vm_srb->data_transfer_length;
 
        scmnd->result = vm_srb->scsi_status;
 
@@ -973,13 +1019,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
                                             &sense_hdr);
        }
 
-       if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+       if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
                storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
                                         sense_hdr.ascq);
+               /*
+                * The Windows driver set data_transfer_length on
+                * SRB_STATUS_DATA_OVERRUN. On other errors, this value
+                * is untouched.  In these cases we set it to 0.
+                */
+               if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
+                       data_transfer_length = 0;
+       }
 
        scsi_set_resid(scmnd,
-               cmd_request->payload->range.len -
-               vm_srb->data_transfer_length);
+               cmd_request->payload->range.len - data_transfer_length);
 
        scmnd->scsi_done(scmnd);
 
@@ -1198,17 +1251,64 @@ static int storvsc_dev_remove(struct hv_device *device)
        /* Close the channel */
        vmbus_close(device->channel);
 
+       kfree(stor_device->stor_chns);
        kfree(stor_device);
        return 0;
 }
 
+static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
+                                       u16 q_num)
+{
+       u16 slot = 0;
+       u16 hash_qnum;
+       struct cpumask alloced_mask;
+       int num_channels, tgt_cpu;
+
+       if (stor_device->num_sc == 0)
+               return stor_device->device->channel;
+
+       /*
+        * Our channel array is sparsley populated and we
+        * initiated I/O on a processor/hw-q that does not
+        * currently have a designated channel. Fix this.
+        * The strategy is simple:
+        * I. Ensure NUMA locality
+        * II. Distribute evenly (best effort)
+        * III. Mapping is persistent.
+        */
+
+       cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
+                   cpumask_of_node(cpu_to_node(q_num)));
+
+       num_channels = cpumask_weight(&alloced_mask);
+       if (num_channels == 0)
+               return stor_device->device->channel;
+
+       hash_qnum = q_num;
+       while (hash_qnum >= num_channels)
+               hash_qnum -= num_channels;
+
+       for_each_cpu(tgt_cpu, &alloced_mask) {
+               if (slot == hash_qnum)
+                       break;
+               slot++;
+       }
+
+       stor_device->stor_chns[q_num] = stor_device->stor_chns[tgt_cpu];
+
+       return stor_device->stor_chns[q_num];
+}
+
+
 static int storvsc_do_io(struct hv_device *device,
-                        struct storvsc_cmd_request *request)
+                        struct storvsc_cmd_request *request, u16 q_num)
 {
        struct storvsc_device *stor_device;
        struct vstor_packet *vstor_packet;
        struct vmbus_channel *outgoing_channel;
        int ret = 0;
+       struct cpumask alloced_mask;
+       int tgt_cpu;
 
        vstor_packet = &request->vstor_packet;
        stor_device = get_out_stor_device(device);
@@ -1222,7 +1322,26 @@ static int storvsc_do_io(struct hv_device *device,
         * Select an an appropriate channel to send the request out.
         */
 
-       outgoing_channel = vmbus_get_outgoing_channel(device->channel);
+       if (stor_device->stor_chns[q_num] != NULL) {
+               outgoing_channel = stor_device->stor_chns[q_num];
+               if (outgoing_channel->target_cpu == smp_processor_id()) {
+                       /*
+                        * Ideally, we want to pick a different channel if
+                        * available on the same NUMA node.
+                        */
+                       cpumask_and(&alloced_mask, &stor_device->alloced_cpus,
+                                   cpumask_of_node(cpu_to_node(q_num)));
+                       for_each_cpu(tgt_cpu, &alloced_mask) {
+                               if (tgt_cpu != outgoing_channel->target_cpu) {
+                                       outgoing_channel =
+                                       stor_device->stor_chns[tgt_cpu];
+                                       break;
+                               }
+                       }
+               }
+       } else {
+               outgoing_channel = get_og_chn(stor_device, q_num);
+       }
 
 
        vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
@@ -1267,8 +1386,6 @@ static int storvsc_do_io(struct hv_device *device,
 static int storvsc_device_configure(struct scsi_device *sdevice)
 {
 
-       blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
-
        blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
 
        blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
@@ -1451,6 +1568,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        vm_srb->win8_extension.srb_flags |=
                SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
 
+       if (scmnd->device->tagged_supported) {
+               vm_srb->win8_extension.srb_flags |=
+               (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
+               vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
+               vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
+       }
+
        /* Build the SRB */
        switch (scmnd->sc_data_direction) {
        case DMA_TO_DEVICE:
@@ -1511,20 +1635,14 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                                page_to_pfn(sg_page((cur_sgl)));
                        cur_sgl = sg_next(cur_sgl);
                }
-
-       } else if (scsi_sglist(scmnd)) {
-               payload->range.len = length;
-               payload->range.offset =
-                       virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
-               payload->range.pfn_array[0] =
-                       virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
        }
 
        cmd_request->payload = payload;
        cmd_request->payload_sz = payload_sz;
 
        /* Invokes the vsc to start an IO */
-       ret = storvsc_do_io(dev, cmd_request);
+       ret = storvsc_do_io(dev, cmd_request, get_cpu());
+       put_cpu();
 
        if (ret == -EAGAIN) {
                /* no more space */
@@ -1550,6 +1668,7 @@ static struct scsi_host_template scsi_driver = {
        /* Make sure we dont get a sg segment crosses a page boundary */
        .dma_boundary =         PAGE_SIZE-1,
        .no_write_same =        1,
+       .track_queue_depth =    1,
 };
 
 enum {
@@ -1680,6 +1799,11 @@ static int storvsc_probe(struct hv_device *device,
         * from the host.
         */
        host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
+       /*
+        * Set the number of HW queues we are supporting.
+        */
+       if (stor_device->num_sc != 0)
+               host->nr_hw_queues = stor_device->num_sc + 1;
 
        /* Register the HBA and start the scsi bus scan */
        ret = scsi_add_host(host, &device->device);
@@ -1716,6 +1840,7 @@ err_out2:
        goto err_out0;
 
 err_out1:
+       kfree(stor_device->stor_chns);
        kfree(stor_device);
 
 err_out0:
@@ -1774,11 +1899,6 @@ static int __init storvsc_drv_init(void)
        fc_transport_template = fc_attach_transport(&fc_transport_functions);
        if (!fc_transport_template)
                return -ENODEV;
-
-       /*
-        * Install Hyper-V specific timeout handler.
-        */
-       fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
 #endif
 
        ret = vmbus_driver_register(&storvsc_drv);
index bcf7d05..e64b0c5 100644 (file)
@@ -34,7 +34,6 @@
 #include <asm/dvma.h>
 
 #include <scsi/scsi_host.h>
-#include "sun3_scsi.h"
 
 /* minimum number of bytes to do dma on */
 #define DMA_MIN_SIZE                    129
 #define NCR5380_dma_send_setup          sun3scsi_dma_count
 #define NCR5380_dma_residual            sun3scsi_dma_residual
 
-#define NCR5380_acquire_dma_irq(instance)    (1)
-#define NCR5380_release_dma_irq(instance)
-
 #include "NCR5380.h"
 
+/* dma regs start at regbase + 8, directly after the NCR regs */
+struct sun3_dma_regs {
+       unsigned short dma_addr_hi; /* vme only */
+       unsigned short dma_addr_lo; /* vme only */
+       unsigned short dma_count_hi; /* vme only */
+       unsigned short dma_count_lo; /* vme only */
+       unsigned short udc_data; /* udc dma data reg (obio only) */
+       unsigned short udc_addr; /* uda dma addr reg (obio only) */
+       unsigned short fifo_data; /* fifo data reg,
+                                  * holds extra byte on odd dma reads
+                                  */
+       unsigned short fifo_count;
+       unsigned short csr; /* control/status reg */
+       unsigned short bpack_hi; /* vme only */
+       unsigned short bpack_lo; /* vme only */
+       unsigned short ivect; /* vme only */
+       unsigned short fifo_count_hi; /* vme only */
+};
+
+/* ucd chip specific regs - live in dvma space */
+struct sun3_udc_regs {
+       unsigned short rsel; /* select regs to load */
+       unsigned short addr_hi; /* high word of addr */
+       unsigned short addr_lo; /* low word */
+       unsigned short count; /* words to be xfer'd */
+       unsigned short mode_hi; /* high word of channel mode */
+       unsigned short mode_lo; /* low word of channel mode */
+};
+
+/* addresses of the udc registers */
+#define UDC_MODE 0x38
+#define UDC_CSR 0x2e /* command/status */
+#define UDC_CHN_HI 0x26 /* chain high word */
+#define UDC_CHN_LO 0x22 /* chain lo word */
+#define UDC_CURA_HI 0x1a /* cur reg A high */
+#define UDC_CURA_LO 0x0a /* cur reg A low */
+#define UDC_CURB_HI 0x12 /* cur reg B high */
+#define UDC_CURB_LO 0x02 /* cur reg B low */
+#define UDC_MODE_HI 0x56 /* mode reg high */
+#define UDC_MODE_LO 0x52 /* mode reg low */
+#define UDC_COUNT 0x32 /* words to xfer */
+
+/* some udc commands */
+#define UDC_RESET 0
+#define UDC_CHN_START 0xa0 /* start chain */
+#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
+
+/* udc mode words */
+#define UDC_MODE_HIWORD 0x40
+#define UDC_MODE_LSEND 0xc2
+#define UDC_MODE_LRECV 0xd2
+
+/* udc reg selections */
+#define UDC_RSEL_SEND 0x282
+#define UDC_RSEL_RECV 0x182
+
+/* bits in csr reg */
+#define CSR_DMA_ACTIVE 0x8000
+#define CSR_DMA_CONFLICT 0x4000
+#define CSR_DMA_BUSERR 0x2000
+
+#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
+#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
+#define CSR_DMA_INT 0x100 /* dma interrupt pending */
+
+#define CSR_LEFT 0xc0
+#define CSR_LEFT_3 0xc0
+#define CSR_LEFT_2 0x80
+#define CSR_LEFT_1 0x40
+#define CSR_PACK_ENABLE 0x20
+
+#define CSR_DMA_ENABLE 0x10
+
+#define CSR_SEND 0x8 /* 1 = send  0 = recv */
+#define CSR_FIFO 0x2 /* reset fifo */
+#define CSR_INTR 0x4 /* interrupt enable */
+#define CSR_SCSI 0x1
+
+#define VME_DATA24 0x3d00
 
 extern int sun3_map_test(unsigned long, char *);
 
diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h
deleted file mode 100644 (file)
index d22745f..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
- *
- * Sun3 DMA additions by Sam Creasey (sammy@sammy.net)
- *
- * Adapted from mac_scsinew.h:
- */
-/*
- * Cumana Generic NCR5380 driver defines
- *
- * Copyright 1993, Drew Eckhardt
- *     Visionary Computing
- *     (Unix and Linux consulting and custom programming)
- *     drew@colorado.edu
- *      +1 (303) 440-4894
- */
-
-#ifndef SUN3_SCSI_H
-#define SUN3_SCSI_H
-
-/* additional registers - mainly DMA control regs */
-/* these start at regbase + 8 -- directly after the NCR regs */
-struct sun3_dma_regs {
-       unsigned short dma_addr_hi; /* vme only */
-       unsigned short dma_addr_lo; /* vme only */
-       unsigned short dma_count_hi; /* vme only */
-       unsigned short dma_count_lo; /* vme only */
-       unsigned short udc_data; /* udc dma data reg (obio only) */
-       unsigned short udc_addr; /* uda dma addr reg (obio only) */
-       unsigned short fifo_data; /* fifo data reg, holds extra byte on
-                                    odd dma reads */
-       unsigned short fifo_count; 
-       unsigned short csr; /* control/status reg */
-       unsigned short bpack_hi; /* vme only */
-       unsigned short bpack_lo; /* vme only */
-       unsigned short ivect; /* vme only */
-       unsigned short fifo_count_hi; /* vme only */
-};
-
-/* ucd chip specific regs - live in dvma space */
-struct sun3_udc_regs {
-     unsigned short rsel; /* select regs to load */
-     unsigned short addr_hi; /* high word of addr */
-     unsigned short addr_lo; /* low word */
-     unsigned short count; /* words to be xfer'd */
-     unsigned short mode_hi; /* high word of channel mode */
-     unsigned short mode_lo; /* low word of channel mode */
-};
-
-/* addresses of the udc registers */
-#define UDC_MODE 0x38 
-#define UDC_CSR 0x2e /* command/status */
-#define UDC_CHN_HI 0x26 /* chain high word */
-#define UDC_CHN_LO 0x22 /* chain lo word */
-#define UDC_CURA_HI 0x1a /* cur reg A high */
-#define UDC_CURA_LO 0x0a /* cur reg A low */
-#define UDC_CURB_HI 0x12 /* cur reg B high */
-#define UDC_CURB_LO 0x02 /* cur reg B low */
-#define UDC_MODE_HI 0x56 /* mode reg high */
-#define UDC_MODE_LO 0x52 /* mode reg low */
-#define UDC_COUNT 0x32 /* words to xfer */
-
-/* some udc commands */
-#define UDC_RESET 0
-#define UDC_CHN_START 0xa0 /* start chain */
-#define UDC_INT_ENABLE 0x32 /* channel 1 int on */
-
-/* udc mode words */
-#define UDC_MODE_HIWORD 0x40
-#define UDC_MODE_LSEND 0xc2
-#define UDC_MODE_LRECV 0xd2
-
-/* udc reg selections */
-#define UDC_RSEL_SEND 0x282
-#define UDC_RSEL_RECV 0x182
-
-/* bits in csr reg */
-#define CSR_DMA_ACTIVE 0x8000
-#define CSR_DMA_CONFLICT 0x4000
-#define CSR_DMA_BUSERR 0x2000
-
-#define CSR_FIFO_EMPTY 0x400 /* fifo flushed? */
-#define CSR_SDB_INT 0x200 /* sbc interrupt pending */
-#define CSR_DMA_INT 0x100 /* dma interrupt pending */
-
-#define CSR_LEFT 0xc0
-#define CSR_LEFT_3 0xc0
-#define CSR_LEFT_2 0x80
-#define CSR_LEFT_1 0x40
-#define CSR_PACK_ENABLE 0x20
-
-#define CSR_DMA_ENABLE 0x10
-
-#define CSR_SEND 0x8 /* 1 = send  0 = recv */
-#define CSR_FIFO 0x2 /* reset fifo */
-#define CSR_INTR 0x4 /* interrupt enable */
-#define CSR_SCSI 0x1 
-
-#define VME_DATA24 0x3d00
-
-#endif /* SUN3_SCSI_H */
-
index abe6173..ce5d023 100644 (file)
@@ -1497,17 +1497,21 @@ static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
 
 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
 {
-       if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+       if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+               ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+                               UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
                ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
-       else
+       } else {
+               ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
                ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+       }
 }
 
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
 {
        /* provide a legal default configuration */
-       host->testbus.select_major = TSTBUS_UAWM;
-       host->testbus.select_minor = 1;
+       host->testbus.select_major = TSTBUS_UNIPRO;
+       host->testbus.select_minor = 37;
 }
 
 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
@@ -1524,7 +1528,7 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
         * mappings of select_minor, since there is no harm in
         * configuring a non-existent select_minor
         */
-       if (host->testbus.select_minor > 0x1F) {
+       if (host->testbus.select_minor > 0xFF) {
                dev_err(host->hba->dev,
                        "%s: 0x%05X is not a legal testbus option\n",
                        __func__, host->testbus.select_minor);
@@ -1593,7 +1597,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
                break;
        case TSTBUS_UNIPRO:
                reg = UFS_UNIPRO_CFG;
-               offset = 1;
+               offset = 20;
+               mask = 0xFFF;
                break;
        /*
         * No need for a default case, since
@@ -1612,6 +1617,11 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
                    (u32)host->testbus.select_minor << offset,
                    reg);
        ufs_qcom_enable_test_bus(host);
+       /*
+        * Make sure the test bus configuration is
+        * committed before returning.
+        */
+       mb();
        ufshcd_release(host->hba);
        pm_runtime_put_sync(host->hba->dev);
 
@@ -1623,13 +1633,39 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba)
        ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
 }
 
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
+{
+       struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+       u32 *testbus = NULL;
+       int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+       testbus = kmalloc(testbus_len, GFP_KERNEL);
+       if (!testbus)
+               return;
+
+       host->testbus.select_major = TSTBUS_UNIPRO;
+       for (i = 0; i < nminor; i++) {
+               host->testbus.select_minor = i;
+               ufs_qcom_testbus_config(host);
+               testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+       }
+       print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+                       16, 4, testbus, testbus_len, false);
+       kfree(testbus);
+}
+
 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
 {
        ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
                        "HCI Vendor Specific Registers ");
 
+       /* sleep a bit intermittently as we are dumping too much data */
        ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+       usleep_range(1000, 1100);
        ufs_qcom_testbus_read(hba);
+       usleep_range(1000, 1100);
+       ufs_qcom_print_unipro_testbus(hba);
+       usleep_range(1000, 1100);
 }
 
 /**
@@ -1692,6 +1728,7 @@ static const struct of_device_id ufs_qcom_of_match[] = {
        { .compatible = "qcom,ufshc"},
        {},
 };
+MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
 
 static const struct dev_pm_ops ufs_qcom_pm_ops = {
        .suspend        = ufshcd_pltfrm_suspend,
index fe517cd..076f528 100644 (file)
@@ -95,6 +95,7 @@ enum {
 #define QUNIPRO_SEL    UFS_BIT(0)
 #define TEST_BUS_EN            BIT(18)
 #define TEST_BUS_SEL           GENMASK(22, 19)
+#define UFS_REG_TEST_BUS_EN    BIT(30)
 
 /* bit definitions for REG_UFS_CFG2 register */
 #define UAWM_HW_CGC_EN         (1 << 0)
index 8e6709a..318e4a1 100644 (file)
@@ -523,4 +523,16 @@ struct ufs_dev_info {
        bool is_lu_power_on_wp;
 };
 
+#define MAX_MODEL_LEN 16
+/**
+ * ufs_dev_desc - ufs device details from the device descriptor
+ *
+ * @wmanufacturerid: card details
+ * @model: card model
+ */
+struct ufs_dev_desc {
+       u16 wmanufacturerid;
+       char model[MAX_MODEL_LEN + 1];
+};
+
 #endif /* End of Header */
index 08b799d..71f73d1 100644 (file)
 #define UFS_ANY_VENDOR 0xFFFF
 #define UFS_ANY_MODEL  "ANY_MODEL"
 
-#define MAX_MODEL_LEN 16
-
 #define UFS_VENDOR_TOSHIBA     0x198
 #define UFS_VENDOR_SAMSUNG     0x1CE
 #define UFS_VENDOR_SKHYNIX     0x1AD
 
-/**
- * ufs_device_info - ufs device details
- * @wmanufacturerid: card details
- * @model: card model
- */
-struct ufs_device_info {
-       u16 wmanufacturerid;
-       char model[MAX_MODEL_LEN + 1];
-};
-
 /**
  * ufs_dev_fix - ufs device quirk info
  * @card: ufs card details
  * @quirk: device quirk
  */
 struct ufs_dev_fix {
-       struct ufs_device_info card;
+       struct ufs_dev_desc card;
        unsigned int quirk;
 };
 
 #define END_FIX { { 0 }, 0 }
 
 /* add specific device quirk */
-#define UFS_FIX(_vendor, _model, _quirk) \
-               {                                         \
-                       .card.wmanufacturerid = (_vendor),\
-                       .card.model = (_model),           \
-                       .quirk = (_quirk),                \
-               }
+#define UFS_FIX(_vendor, _model, _quirk) { \
+       .card.wmanufacturerid = (_vendor),\
+       .card.model = (_model),            \
+       .quirk = (_quirk),                 \
+}
 
 /*
  * If UFS device is having issue in processing LCC (Line Control
@@ -144,7 +131,4 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME        (1 << 8)
 
-struct ufs_hba;
-void ufs_advertise_fixup_device(struct ufs_hba *hba);
-
 #endif /* UFS_QUIRKS_H_ */
index 20e5e5f..8b721f4 100644 (file)
@@ -45,6 +45,9 @@
 #include "ufs_quirks.h"
 #include "unipro.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
 #define UFSHCD_REQ_SENSE_SIZE  18
 
 #define UFSHCD_ENABLE_INTRS    (UTP_TRANSFER_REQ_COMPL |\
@@ -94,6 +97,9 @@
                _ret;                                                   \
        })
 
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
 static u32 ufs_query_desc_max_size[] = {
        QUERY_DESC_DEVICE_MAX_SIZE,
        QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -185,6 +191,22 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
        return ufs_pm_lvl_states[lvl].link_state;
 }
 
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+                                       enum uic_link_state link_state)
+{
+       enum ufs_pm_level lvl;
+
+       for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+               if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+                       (ufs_pm_lvl_states[lvl].link_state == link_state))
+                       return lvl;
+       }
+
+       /* if no match found, return the level 0 */
+       return UFS_PM_LVL_0;
+}
+
 static struct ufs_dev_fix ufs_fixups[] = {
        /* UFS cards deviations table */
        UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
@@ -212,6 +234,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba);
@@ -223,6 +246,10 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
 static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
                struct ufs_pa_layer_attr *desired_pwr_mode);
@@ -267,6 +294,214 @@ static inline void ufshcd_remove_non_printable(char *val)
                *val = ' ';
 }
 
+static void ufshcd_add_command_trace(struct ufs_hba *hba,
+               unsigned int tag, const char *str)
+{
+       sector_t lba = -1;
+       u8 opcode = 0;
+       u32 intr, doorbell;
+       struct ufshcd_lrb *lrbp;
+       int transfer_len = -1;
+
+       if (!trace_ufshcd_command_enabled())
+               return;
+
+       lrbp = &hba->lrb[tag];
+
+       if (lrbp->cmd) { /* data phase exists */
+               opcode = (u8)(*lrbp->cmd->cmnd);
+               if ((opcode == READ_10) || (opcode == WRITE_10)) {
+                       /*
+                        * Currently we only fully trace read(10) and write(10)
+                        * commands
+                        */
+                       if (lrbp->cmd->request && lrbp->cmd->request->bio)
+                               lba =
+                                 lrbp->cmd->request->bio->bi_iter.bi_sector;
+                       transfer_len = be32_to_cpu(
+                               lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+               }
+       }
+
+       intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+       doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+       trace_ufshcd_command(dev_name(hba->dev), str, tag,
+                               doorbell, transfer_len, intr, lba, opcode);
+}
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+
+       if (!head || list_empty(head))
+               return;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+                               clki->max_freq)
+                       dev_err(hba->dev, "clk: %s, rate: %u\n",
+                                       clki->name, clki->curr_freq);
+       }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+               struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+       int i;
+
+       for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+               int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+               if (err_hist->reg[p] == 0)
+                       continue;
+               dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
+                       err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+       }
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+       /*
+        * hex_dump reads its data without the readl macro. This might
+        * cause inconsistency issues on some platform, as the printed
+        * values may be from cache and not the most recent value.
+        * To know whether you are looking at an un-cached version verify
+        * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+        * during platform/pci probe function.
+        */
+       ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+       dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
+               hba->ufs_version, hba->capabilities);
+       dev_err(hba->dev,
+               "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
+               (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+       dev_err(hba->dev,
+               "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
+               ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+               hba->ufs_stats.hibern8_exit_cnt);
+
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+       ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+       ufshcd_print_clk_freqs(hba);
+
+       if (hba->vops && hba->vops->dbg_register_dump)
+               hba->vops->dbg_register_dump(hba);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+       struct ufshcd_lrb *lrbp;
+       int prdt_length;
+       int tag;
+
+       for_each_set_bit(tag, &bitmap, hba->nutrs) {
+               lrbp = &hba->lrb[tag];
+
+               dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
+                               tag, ktime_to_us(lrbp->issue_time_stamp));
+               dev_err(hba->dev,
+                       "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
+                       tag, (u64)lrbp->utrd_dma_addr);
+
+               ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+                               sizeof(struct utp_transfer_req_desc));
+               dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
+                       (u64)lrbp->ucd_req_dma_addr);
+               ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+                               sizeof(struct utp_upiu_req));
+               dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
+                       (u64)lrbp->ucd_rsp_dma_addr);
+               ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+                               sizeof(struct utp_upiu_rsp));
+
+               prdt_length = le16_to_cpu(
+                       lrbp->utr_descriptor_ptr->prd_table_length);
+               dev_err(hba->dev,
+                       "UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
+                       tag, prdt_length,
+                       (u64)lrbp->ucd_prdt_dma_addr);
+
+               if (pr_prdt)
+                       ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+                               sizeof(struct ufshcd_sg_entry) * prdt_length);
+       }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+       struct utp_task_req_desc *tmrdp;
+       int tag;
+
+       for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+               tmrdp = &hba->utmrdl_base_addr[tag];
+               dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
+               ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+                               sizeof(struct request_desc_header));
+               dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
+                               tag);
+               ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+                               sizeof(struct utp_upiu_req));
+               dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
+                               tag);
+               ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+                               sizeof(struct utp_task_req_desc));
+       }
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+       dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+       dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+               hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+       dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
+               hba->saved_err, hba->saved_uic_err);
+       dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
+       dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+               hba->pm_op_in_progress, hba->is_sys_suspended);
+       dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+               hba->auto_bkops_enabled, hba->host->host_self_blocked);
+       dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+       dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+               hba->eh_flags, hba->req_abort_count);
+       dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+               hba->capabilities, hba->caps);
+       dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+               hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+       static const char * const names[] = {
+               "INVALID MODE",
+               "FAST MODE",
+               "SLOW_MODE",
+               "INVALID MODE",
+               "FASTAUTO_MODE",
+               "SLOWAUTO_MODE",
+               "INVALID MODE",
+       };
+
+       dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+                __func__,
+                hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+                hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+                names[hba->pwr_info.pwr_rx],
+                names[hba->pwr_info.pwr_tx],
+                hba->pwr_info.hs_rate);
+}
+
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -605,6 +840,28 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
        return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 }
 
+static const char *ufschd_uic_link_state_to_string(
+                       enum uic_link_state state)
+{
+       switch (state) {
+       case UIC_LINK_OFF_STATE:        return "OFF";
+       case UIC_LINK_ACTIVE_STATE:     return "ACTIVE";
+       case UIC_LINK_HIBERN8_STATE:    return "HIBERN8";
+       default:                        return "UNKNOWN";
+       }
+}
+
+static const char *ufschd_ufs_dev_pwr_mode_to_string(
+                       enum ufs_dev_pwr_mode state)
+{
+       switch (state) {
+       case UFS_ACTIVE_PWR_MODE:       return "ACTIVE";
+       case UFS_SLEEP_PWR_MODE:        return "SLEEP";
+       case UFS_POWERDOWN_PWR_MODE:    return "POWERDOWN";
+       default:                        return "UNKNOWN";
+       }
+}
+
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 {
        /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
@@ -633,20 +890,523 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
                return false;
 }
 
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+       ktime_t start = ktime_get();
+       bool clk_state_changed = false;
+
+       if (!head || list_empty(head))
+               goto out;
+
+       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+       if (ret)
+               return ret;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk)) {
+                       if (scale_up && clki->max_freq) {
+                               if (clki->curr_freq == clki->max_freq)
+                                       continue;
+
+                               clk_state_changed = true;
+                               ret = clk_set_rate(clki->clk, clki->max_freq);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                               __func__, clki->name,
+                                               clki->max_freq, ret);
+                                       break;
+                               }
+                               trace_ufshcd_clk_scaling(dev_name(hba->dev),
+                                               "scaled up", clki->name,
+                                               clki->curr_freq,
+                                               clki->max_freq);
+
+                               clki->curr_freq = clki->max_freq;
+
+                       } else if (!scale_up && clki->min_freq) {
+                               if (clki->curr_freq == clki->min_freq)
+                                       continue;
+
+                               clk_state_changed = true;
+                               ret = clk_set_rate(clki->clk, clki->min_freq);
+                               if (ret) {
+                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+                                               __func__, clki->name,
+                                               clki->min_freq, ret);
+                                       break;
+                               }
+                               trace_ufshcd_clk_scaling(dev_name(hba->dev),
+                                               "scaled down", clki->name,
+                                               clki->curr_freq,
+                                               clki->min_freq);
+                               clki->curr_freq = clki->min_freq;
+                       }
+               }
+               dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+                               clki->name, clk_get_rate(clki->clk));
+       }
+
+       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+       if (clk_state_changed)
+               trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+                       (scale_up ? "up" : "down"),
+                       ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+       return ret;
+}
+
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+                                              bool scale_up)
+{
+       struct ufs_clk_info *clki;
+       struct list_head *head = &hba->clk_list_head;
+
+       if (!head || list_empty(head))
+               return false;
+
+       list_for_each_entry(clki, head, list) {
+               if (!IS_ERR_OR_NULL(clki->clk)) {
+                       if (scale_up && clki->max_freq) {
+                               if (clki->curr_freq == clki->max_freq)
+                                       continue;
+                               return true;
+                       } else if (!scale_up && clki->min_freq) {
+                               if (clki->curr_freq == clki->min_freq)
+                                       continue;
+                               return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
+static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
+                                       u64 wait_timeout_us)
+{
+       unsigned long flags;
+       int ret = 0;
+       u32 tm_doorbell;
+       u32 tr_doorbell;
+       bool timeout = false, do_last_check = false;
+       ktime_t start;
+
+       ufshcd_hold(hba, false);
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       /*
+        * Wait for all the outstanding tasks/transfer requests.
+        * Verify by checking the doorbell registers are clear.
+        */
+       start = ktime_get();
+       do {
+               if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+
+               tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+               tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+               if (!tm_doorbell && !tr_doorbell) {
+                       timeout = false;
+                       break;
+               } else if (do_last_check) {
+                       break;
+               }
+
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               schedule();
+               if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+                   wait_timeout_us) {
+                       timeout = true;
+                       /*
+                        * We might have scheduled out for long time so make
+                        * sure to check if doorbells are cleared by this time
+                        * or not.
+                        */
+                       do_last_check = true;
+               }
+               spin_lock_irqsave(hba->host->host_lock, flags);
+       } while (tm_doorbell || tr_doorbell);
+
+       if (timeout) {
+               dev_err(hba->dev,
+                       "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+                       __func__, tm_doorbell, tr_doorbell);
+               ret = -EBUSY;
+       }
+out:
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       ufshcd_release(hba);
+       return ret;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+       #define UFS_MIN_GEAR_TO_SCALE_DOWN      UFS_HS_G1
+       int ret = 0;
+       struct ufs_pa_layer_attr new_pwr_info;
+
+       if (scale_up) {
+               memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+                      sizeof(struct ufs_pa_layer_attr));
+       } else {
+               memcpy(&new_pwr_info, &hba->pwr_info,
+                      sizeof(struct ufs_pa_layer_attr));
+
+               if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
+                   || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
+                       /* save the current power mode */
+                       memcpy(&hba->clk_scaling.saved_pwr_info.info,
+                               &hba->pwr_info,
+                               sizeof(struct ufs_pa_layer_attr));
+
+                       /* scale down gear */
+                       new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+                       new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
+               }
+       }
+
+       /* check if the power mode needs to be changed or not? */
+       ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+
+       if (ret)
+               dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
+                       __func__, ret,
+                       hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+                       new_pwr_info.gear_tx, new_pwr_info.gear_rx);
+
+       return ret;
+}
+
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+       #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
+       int ret = 0;
+       /*
+        * make sure that there are no outstanding requests when
+        * clock scaling is in progress
+        */
+       scsi_block_requests(hba->host);
+       down_write(&hba->clk_scaling_lock);
+       if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+               ret = -EBUSY;
+               up_write(&hba->clk_scaling_lock);
+               scsi_unblock_requests(hba->host);
+       }
+
+       return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+       up_write(&hba->clk_scaling_lock);
+       scsi_unblock_requests(hba->host);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+       int ret = 0;
+
+       /* let's not get into low power until clock scaling is completed */
+       ufshcd_hold(hba, false);
+
+       ret = ufshcd_clock_scaling_prepare(hba);
+       if (ret)
+               return ret;
+
+       /* scale down the gear before scaling down clocks */
+       if (!scale_up) {
+               ret = ufshcd_scale_gear(hba, false);
+               if (ret)
+                       goto out;
+       }
+
+       ret = ufshcd_scale_clks(hba, scale_up);
+       if (ret) {
+               if (!scale_up)
+                       ufshcd_scale_gear(hba, true);
+               goto out;
+       }
+
+       /* scale up the gear after scaling up clocks */
+       if (scale_up) {
+               ret = ufshcd_scale_gear(hba, true);
+               if (ret) {
+                       ufshcd_scale_clks(hba, false);
+                       goto out;
+               }
+       }
+
+       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+
+out:
+       ufshcd_clock_scaling_unprepare(hba);
+       ufshcd_release(hba);
+       return ret;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                                          clk_scaling.suspend_work);
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               return;
+       }
+       hba->clk_scaling.is_suspended = true;
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+       __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+       struct ufs_hba *hba = container_of(work, struct ufs_hba,
+                                          clk_scaling.resume_work);
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (!hba->clk_scaling.is_suspended) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               return;
+       }
+       hba->clk_scaling.is_suspended = false;
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+       devfreq_resume_device(hba->devfreq);
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+                               unsigned long *freq, u32 flags)
+{
+       int ret = 0;
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       ktime_t start;
+       bool scale_up, sched_clk_scaling_suspend_work = false;
+       unsigned long irq_flags;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return -EINVAL;
+
+       if ((*freq > 0) && (*freq < UINT_MAX)) {
+               dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(hba->host->host_lock, irq_flags);
+       if (ufshcd_eh_in_progress(hba)) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               return 0;
+       }
+
+       if (!hba->clk_scaling.active_reqs)
+               sched_clk_scaling_suspend_work = true;
+
+       scale_up = (*freq == UINT_MAX) ? true : false;
+       if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+               ret = 0;
+               goto out; /* no state change required */
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+       start = ktime_get();
+       ret = ufshcd_devfreq_scale(hba, scale_up);
+
+       trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+               (scale_up ? "up" : "down"),
+               ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+       if (sched_clk_scaling_suspend_work)
+               queue_work(hba->clk_scaling.workq,
+                          &hba->clk_scaling.suspend_work);
+
+       return ret;
+}
+
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+               struct devfreq_dev_status *stat)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+       unsigned long flags;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return -EINVAL;
+
+       memset(stat, 0, sizeof(*stat));
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (!scaling->window_start_t)
+               goto start_window;
+
+       if (scaling->is_busy_started)
+               scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+                                       scaling->busy_start_t));
+
+       stat->total_time = jiffies_to_usecs((long)jiffies -
+                               (long)scaling->window_start_t);
+       stat->busy_time = scaling->tot_busy_t;
+start_window:
+       scaling->window_start_t = jiffies;
+       scaling->tot_busy_t = 0;
+
+       if (hba->outstanding_reqs) {
+               scaling->busy_start_t = ktime_get();
+               scaling->is_busy_started = true;
+       } else {
+               scaling->busy_start_t = 0;
+               scaling->is_busy_started = false;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+       .polling_ms     = 100,
+       .target         = ufshcd_devfreq_target,
+       .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+       unsigned long flags;
+
+       devfreq_suspend_device(hba->devfreq);
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       hba->clk_scaling.window_start_t = 0;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
 {
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               devfreq_suspend_device(hba->devfreq);
-               hba->clk_scaling.window_start_t = 0;
+       unsigned long flags;
+       bool suspend = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (!hba->clk_scaling.is_suspended) {
+               suspend = true;
+               hba->clk_scaling.is_suspended = true;
        }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (suspend)
+               __ufshcd_suspend_clkscaling(hba);
 }
 
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
 {
-       if (ufshcd_is_clkscaling_enabled(hba))
+       unsigned long flags;
+       bool resume = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
+               return;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->clk_scaling.is_suspended) {
+               resume = true;
+               hba->clk_scaling.is_suspended = false;
+       }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (resume)
                devfreq_resume_device(hba->devfreq);
 }
 
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       u32 value;
+       int err;
+
+       if (kstrtou32(buf, 0, &value))
+               return -EINVAL;
+
+       value = !!value;
+       if (value == hba->clk_scaling.is_allowed)
+               goto out;
+
+       pm_runtime_get_sync(hba->dev);
+       ufshcd_hold(hba, false);
+
+       cancel_work_sync(&hba->clk_scaling.suspend_work);
+       cancel_work_sync(&hba->clk_scaling.resume_work);
+
+       hba->clk_scaling.is_allowed = value;
+
+       if (value) {
+               ufshcd_resume_clkscaling(hba);
+       } else {
+               ufshcd_suspend_clkscaling(hba);
+               err = ufshcd_devfreq_scale(hba, true);
+               if (err)
+                       dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+                                       __func__, err);
+       }
+
+       ufshcd_release(hba);
+       pm_runtime_put_sync(hba->dev);
+out:
+       return count;
+}
+
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+       hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+       hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+       sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+       hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+       hba->clk_scaling.enable_attr.attr.mode = 0644;
+       if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+
 static void ufshcd_ungate_work(struct work_struct *work)
 {
        int ret;
@@ -680,7 +1440,6 @@ static void ufshcd_ungate_work(struct work_struct *work)
                hba->clk_gating.is_suspended = false;
        }
 unblock_reqs:
-       ufshcd_resume_clkscaling(hba);
        scsi_unblock_requests(hba->host);
 }
 
@@ -727,6 +1486,8 @@ start:
        case REQ_CLKS_OFF:
                if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
                        hba->clk_gating.state = CLKS_ON;
+                       trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                               hba->clk_gating.state);
                        break;
                }
                /*
@@ -737,6 +1498,8 @@ start:
        case CLKS_OFF:
                scsi_block_requests(hba->host);
                hba->clk_gating.state = REQ_CLKS_ON;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                       hba->clk_gating.state);
                schedule_work(&hba->clk_gating.ungate_work);
                /*
                 * fall through to check if we should wait for this
@@ -781,6 +1544,8 @@ static void ufshcd_gate_work(struct work_struct *work)
        if (hba->clk_gating.is_suspended ||
                (hba->clk_gating.state == REQ_CLKS_ON)) {
                hba->clk_gating.state = CLKS_ON;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                       hba->clk_gating.state);
                goto rel_lock;
        }
 
@@ -796,13 +1561,13 @@ static void ufshcd_gate_work(struct work_struct *work)
        if (ufshcd_can_hibern8_during_gating(hba)) {
                if (ufshcd_uic_hibern8_enter(hba)) {
                        hba->clk_gating.state = CLKS_ON;
+                       trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                               hba->clk_gating.state);
                        goto out;
                }
                ufshcd_set_link_hibern8(hba);
        }
 
-       ufshcd_suspend_clkscaling(hba);
-
        if (!ufshcd_is_link_active(hba))
                ufshcd_setup_clocks(hba, false);
        else
@@ -819,9 +1584,11 @@ static void ufshcd_gate_work(struct work_struct *work)
         * new requests arriving before the current cancel work is done.
         */
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->clk_gating.state == REQ_CLKS_OFF)
+       if (hba->clk_gating.state == REQ_CLKS_OFF) {
                hba->clk_gating.state = CLKS_OFF;
-
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                       hba->clk_gating.state);
+       }
 rel_lock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
@@ -844,6 +1611,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
                return;
 
        hba->clk_gating.state = REQ_CLKS_OFF;
+       trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
        schedule_delayed_work(&hba->clk_gating.gate_work,
                        msecs_to_jiffies(hba->clk_gating.delay_ms));
 }
@@ -881,6 +1649,41 @@ static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
        return count;
 }
 
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags;
+       u32 value;
+
+       if (kstrtou32(buf, 0, &value))
+               return -EINVAL;
+
+       value = !!value;
+       if (value == hba->clk_gating.is_enabled)
+               goto out;
+
+       if (value) {
+               ufshcd_release(hba);
+       } else {
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               hba->clk_gating.active_reqs++;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+       }
+
+       hba->clk_gating.is_enabled = value;
+out:
+       return count;
+}
+
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
        if (!ufshcd_is_clkgating_allowed(hba))
@@ -890,13 +1693,23 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
        INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
        INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 
+       hba->clk_gating.is_enabled = true;
+
        hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
        hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
        sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
        hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
-       hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+       hba->clk_gating.delay_attr.attr.mode = 0644;
        if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
                dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+       hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+       hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+       sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+       hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+       hba->clk_gating.enable_attr.attr.mode = 0644;
+       if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+               dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
 }
 
 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
@@ -904,6 +1717,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
        if (!ufshcd_is_clkgating_allowed(hba))
                return;
        device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+       device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
        cancel_work_sync(&hba->clk_gating.ungate_work);
        cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 }
@@ -911,9 +1725,27 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 /* Must be called with host lock acquired */
 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 {
-       if (!ufshcd_is_clkscaling_enabled(hba))
+       bool queue_resume_work = false;
+
+       if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
+       if (!hba->clk_scaling.active_reqs++)
+               queue_resume_work = true;
+
+       if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+               return;
+
+       if (queue_resume_work)
+               queue_work(hba->clk_scaling.workq,
+                          &hba->clk_scaling.resume_work);
+
+       if (!hba->clk_scaling.window_start_t) {
+               hba->clk_scaling.window_start_t = jiffies;
+               hba->clk_scaling.tot_busy_t = 0;
+               hba->clk_scaling.is_busy_started = false;
+       }
+
        if (!hba->clk_scaling.is_busy_started) {
                hba->clk_scaling.busy_start_t = ktime_get();
                hba->clk_scaling.is_busy_started = true;
@@ -924,7 +1756,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 {
        struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 
-       if (!ufshcd_is_clkscaling_enabled(hba))
+       if (!ufshcd_is_clkscaling_supported(hba))
                return;
 
        if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -942,11 +1774,13 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
 static inline
 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
+       hba->lrb[task_tag].issue_time_stamp = ktime_get();
        ufshcd_clk_scaling_start_busy(hba);
        __set_bit(task_tag, &hba->outstanding_reqs);
        ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* Make sure that doorbell is committed immediately */
        wmb();
+       ufshcd_add_command_trace(hba, task_tag, "send");
 }
 
 /**
@@ -1484,6 +2318,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                BUG();
        }
 
+       if (!down_read_trylock(&hba->clk_scaling_lock))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
        spin_lock_irqsave(hba->host->host_lock, flags);
        switch (hba->ufshcd_state) {
        case UFSHCD_STATE_OPERATIONAL:
@@ -1512,6 +2349,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       hba->req_abort_count = 0;
+
        /* acquire the tag to make sure device cmds don't use it */
        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
                /*
@@ -1541,6 +2380,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        lrbp->task_tag = tag;
        lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
        lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+       lrbp->req_abort_skip = false;
 
        ufshcd_comp_scsi_upiu(hba, lrbp);
 
@@ -1560,6 +2400,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+       up_read(&hba->clk_scaling_lock);
        return err;
 }
 
@@ -1622,6 +2463,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        int resp;
        int err = 0;
 
+       hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
        resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 
        switch (resp) {
@@ -1748,6 +2590,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
        struct completion wait;
        unsigned long flags;
 
+       down_read(&hba->clk_scaling_lock);
+
        /*
         * Get free slot, sleep if slots are unavailable.
         * Even though we use wait_event() which sleeps indefinitely,
@@ -1776,6 +2620,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 out_put_tag:
        ufshcd_put_dev_cmd_tag(hba, tag);
        wake_up(&hba->dev_cmd.tag_wq);
+       up_read(&hba->clk_scaling_lock);
        return err;
 }
 
@@ -2073,9 +2918,11 @@ out:
  * The buf_len parameter will contain, on return, the length parameter
  * received on the response.
  */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
-                       enum query_opcode opcode, enum desc_idn idn, u8 index,
-                       u8 selector, u8 *desc_buf, int *buf_len)
+static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+                                        enum query_opcode opcode,
+                                        enum desc_idn idn, u8 index,
+                                        u8 selector,
+                                        u8 *desc_buf, int *buf_len)
 {
        int err;
        int retries;
@@ -2089,7 +2936,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
 
        return err;
 }
-EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
 
 /**
  * ufshcd_read_desc_param - read the specified descriptor parameter
@@ -2207,11 +3053,10 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
        return err;
 }
 
-int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
 {
        return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
 }
-EXPORT_SYMBOL(ufshcd_read_device_desc);
 
 /**
  * ufshcd_read_string_desc - read string descriptor
@@ -2223,8 +3068,9 @@ EXPORT_SYMBOL(ufshcd_read_device_desc);
  *
  * Return 0 in case of success, non-zero otherwise
  */
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
-                               u32 size, bool ascii)
+#define ASCII_STD true
+static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
+                                  u8 *buf, u32 size, bool ascii)
 {
        int err = 0;
 
@@ -2280,7 +3126,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
 out:
        return err;
 }
-EXPORT_SYMBOL(ufshcd_read_string_desc);
 
 /**
  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
@@ -2453,12 +3298,19 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
                }
 
                hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+               hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+                               (i * sizeof(struct utp_transfer_req_desc));
                hba->lrb[i].ucd_req_ptr =
                        (struct utp_upiu_req *)(cmd_descp + i);
+               hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
                hba->lrb[i].ucd_rsp_ptr =
                        (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+               hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+                               response_offset;
                hba->lrb[i].ucd_prdt_ptr =
                        (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+               hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+                               prdt_offset;
        }
 }
 
@@ -2482,7 +3334,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
 
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
-               dev_err(hba->dev,
+               dev_dbg(hba->dev,
                        "dme-link-startup: error code %d\n", ret);
        return ret;
 }
@@ -2702,6 +3554,12 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
                ret = (status != PWR_OK) ? status : -1;
        }
 out:
+       if (ret) {
+               ufshcd_print_host_state(hba);
+               ufshcd_print_pwr_info(hba);
+               ufshcd_print_host_regs(hba);
+       }
+
        spin_lock_irqsave(hba->host->host_lock, flags);
        hba->active_uic_cmd = NULL;
        hba->uic_async_done = NULL;
@@ -2776,11 +3634,14 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 {
        int ret;
        struct uic_command uic_cmd = {0};
+       ktime_t start = ktime_get();
 
        ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
 
        uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+                            ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 
        if (ret) {
                dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
@@ -2816,18 +3677,25 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 {
        struct uic_command uic_cmd = {0};
        int ret;
+       ktime_t start = ktime_get();
 
        ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
 
        uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
        ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+       trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+                            ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
        if (ret) {
                dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
                        __func__, ret);
                ret = ufshcd_link_recovery(hba);
-       } else
+       } else {
                ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
                                                                POST_CHANGE);
+               hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+               hba->ufs_stats.hibern8_exit_cnt++;
+       }
 
        return ret;
 }
@@ -2994,6 +3862,8 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
                memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
 
        ret = ufshcd_change_power_mode(hba, &final_params);
+       if (!ret)
+               ufshcd_print_pwr_info(hba);
 
        return ret;
 }
@@ -3265,6 +4135,10 @@ link_startup:
                goto link_startup;
        }
 
+       /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+       ufshcd_init_pwr_info(hba);
+       ufshcd_print_pwr_info(hba);
+
        if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
                ret = ufshcd_disable_device_tx_lcc(hba);
                if (ret)
@@ -3278,8 +4152,12 @@ link_startup:
 
        ret = ufshcd_make_hba_operational(hba);
 out:
-       if (ret)
+       if (ret) {
                dev_err(hba->dev, "link startup failed %d\n", ret);
+               ufshcd_print_host_state(hba);
+               ufshcd_print_pwr_info(hba);
+               ufshcd_print_host_regs(hba);
+       }
        return ret;
 }
 
@@ -3591,7 +4469,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        switch (ocs) {
        case OCS_SUCCESS:
                result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
+               hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
                switch (result) {
                case UPIU_TRANSACTION_RESPONSE:
                        /*
@@ -3652,10 +4530,15 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        default:
                result |= DID_ERROR << 16;
                dev_err(hba->dev,
-               "OCS error from controller = %x\n", ocs);
+                               "OCS error from controller = %x for tag %d\n",
+                               ocs, lrbp->task_tag);
+               ufshcd_print_host_regs(hba);
+               ufshcd_print_host_state(hba);
                break;
        } /* end of switch */
 
+       if (host_byte(result) != DID_OK)
+               ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
        return result;
 }
 
@@ -3695,6 +4578,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
                lrbp = &hba->lrb[index];
                cmd = lrbp->cmd;
                if (cmd) {
+                       ufshcd_add_command_trace(hba, index, "complete");
                        result = ufshcd_transfer_rsp_status(hba, lrbp);
                        scsi_dma_unmap(cmd);
                        cmd->result = result;
@@ -3706,9 +4590,16 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
                        __ufshcd_release(hba);
                } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
                        lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
-                       if (hba->dev_cmd.complete)
+                       if (hba->dev_cmd.complete) {
+                               ufshcd_add_command_trace(hba, index,
+                                               "dev_complete");
                                complete(hba->dev_cmd.complete);
+                       }
                }
+               if (ufshcd_is_clkscaling_supported(hba))
+                       hba->clk_scaling.active_reqs--;
+               if (ufshcd_is_clkscaling_supported(hba))
+                       hba->clk_scaling.active_reqs--;
        }
 
        /* clear corresponding bits of completed commands */
@@ -3828,6 +4719,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
        }
 
        hba->auto_bkops_enabled = true;
+       trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
 
        /* No need of URGENT_BKOPS exception from the device */
        err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3878,23 +4770,31 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
        }
 
        hba->auto_bkops_enabled = false;
+       trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
 out:
        return err;
 }
 
 /**
- * ufshcd_force_reset_auto_bkops - force enable of auto bkops
+ * ufshcd_force_reset_auto_bkops - force reset auto bkops state
  * @hba: per adapter instance
  *
  * After a device reset the device may toggle the BKOPS_EN flag
  * to default value. The s/w tracking variables should be updated
- * as well. Do this by forcing enable of auto bkops.
+ * as well. This function would change the auto-bkops state based on
+ * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
  */
-static void  ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
 {
-       hba->auto_bkops_enabled = false;
-       hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
-       ufshcd_enable_auto_bkops(hba);
+       if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
+               hba->auto_bkops_enabled = false;
+               hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+               ufshcd_enable_auto_bkops(hba);
+       } else {
+               hba->auto_bkops_enabled = true;
+               hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
+               ufshcd_disable_auto_bkops(hba);
+       }
 }
 
 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -4246,6 +5146,14 @@ out:
        pm_runtime_put_sync(hba->dev);
 }
 
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+               u32 reg)
+{
+       reg_hist->reg[reg_hist->pos] = reg;
+       reg_hist->tstamp[reg_hist->pos] = ktime_get();
+       reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
 /**
  * ufshcd_update_uic_error - check and set fatal UIC error flags.
  * @hba: per-adapter instance
@@ -4258,15 +5166,20 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
        /* Ignore LINERESET indication, as this is not an error */
        if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
-                       (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
+                       (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
                /*
                 * To know whether this error is fatal or not, DB timeout
                 * must be checked but this error is handled separately.
                 */
                dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+       }
 
        /* PA_INIT_ERROR is fatal and needs UIC reset */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+       if (reg)
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
        if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
                hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
        else if (hba->dev_quirks &
@@ -4280,16 +5193,22 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
 
        /* UIC NL/TL/DME errors needs software retry */
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
-       if (reg)
+       if (reg) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
                hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+       }
 
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
-       if (reg)
+       if (reg) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
                hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+       }
 
        reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
-       if (reg)
+       if (reg) {
+               ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
                hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+       }
 
        dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
                        __func__, hba->uic_error);
@@ -4327,6 +5246,22 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
                        scsi_block_requests(hba->host);
 
                        hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
+
+                       /* dump controller state before resetting */
+                       if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+                               bool pr_prdt = !!(hba->saved_err &
+                                               SYSTEM_BUS_FATAL_ERROR);
+
+                               dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+                                       __func__, hba->saved_err,
+                                       hba->saved_uic_err);
+
+                               ufshcd_print_host_regs(hba);
+                               ufshcd_print_pwr_info(hba);
+                               ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+                               ufshcd_print_trs(hba, hba->outstanding_reqs,
+                                                       pr_prdt);
+                       }
                        schedule_work(&hba->eh_work);
                }
        }
@@ -4557,7 +5492,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
        spin_lock_irqsave(host->host_lock, flags);
        ufshcd_transfer_req_compl(hba);
        spin_unlock_irqrestore(host->host_lock, flags);
+
 out:
+       hba->req_abort_count = 0;
        if (!err) {
                err = SUCCESS;
        } else {
@@ -4567,6 +5504,17 @@ out:
        return err;
 }
 
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+       struct ufshcd_lrb *lrbp;
+       int tag;
+
+       for_each_set_bit(tag, &bitmap, hba->nutrs) {
+               lrbp = &hba->lrb[tag];
+               lrbp->req_abort_skip = true;
+       }
+}
+
 /**
  * ufshcd_abort - abort a specific command
  * @cmd: SCSI command pointer
@@ -4594,6 +5542,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
+       lrbp = &hba->lrb[tag];
        if (!ufshcd_valid_tag(hba, tag)) {
                dev_err(hba->dev,
                        "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
@@ -4601,6 +5550,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                BUG();
        }
 
+       /*
+        * Task abort to the device W-LUN is illegal. When this command
+        * will fail, due to spec violation, scsi err handling next step
+        * will be to send LU reset which, again, is a spec violation.
+        * To avoid these unnecessary/illegal step we skip to the last error
+        * handling stage: reset and restore.
+        */
+       if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+               return ufshcd_eh_host_reset_handler(cmd);
+
        ufshcd_hold(hba, false);
        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
        /* If command is already aborted/completed, return SUCCESS */
@@ -4617,18 +5576,48 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                __func__, tag);
        }
 
-       lrbp = &hba->lrb[tag];
+       /* Print Transfer Request of aborted task */
+       dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
+
+       /*
+        * Print detailed info about aborted request.
+        * As more than one request might get aborted at the same time,
+        * print full information only for the first aborted request in order
+        * to reduce repeated printouts. For other aborted requests only print
+        * basic details.
+        */
+       scsi_print_command(hba->lrb[tag].cmd);
+       if (!hba->req_abort_count) {
+               ufshcd_print_host_regs(hba);
+               ufshcd_print_host_state(hba);
+               ufshcd_print_pwr_info(hba);
+               ufshcd_print_trs(hba, 1 << tag, true);
+       } else {
+               ufshcd_print_trs(hba, 1 << tag, false);
+       }
+       hba->req_abort_count++;
+
+       /* Skip task abort in case previous aborts failed and report failure */
+       if (lrbp->req_abort_skip) {
+               err = -EIO;
+               goto out;
+       }
+
        for (poll_cnt = 100; poll_cnt; poll_cnt--) {
                err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                                UFS_QUERY_TASK, &resp);
                if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
                        /* cmd pending in the device */
+                       dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+                               __func__, tag);
                        break;
                } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                        /*
                         * cmd not pending in the device, check if it is
                         * in transition.
                         */
+                       dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+                               __func__, tag);
                        reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
                        if (reg & (1 << tag)) {
                                /* sleep for max. 200us to stabilize */
@@ -4636,8 +5625,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
                                continue;
                        }
                        /* command completed already */
+                       dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+                               __func__, tag);
                        goto out;
                } else {
+                       dev_err(hba->dev,
+                               "%s: no response from device. tag = %d, err %d\n",
+                               __func__, tag, err);
                        if (!err)
                                err = resp; /* service response error */
                        goto out;
@@ -4652,14 +5646,20 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
                        UFS_ABORT_TASK, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
-               if (!err)
+               if (!err) {
                        err = resp; /* service response error */
+                       dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+                               __func__, tag, err);
+               }
                goto out;
        }
 
        err = ufshcd_clear_cmd(hba, tag);
-       if (err)
+       if (err) {
+               dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+                       __func__, tag, err);
                goto out;
+       }
 
        scsi_dma_unmap(cmd);
 
@@ -4676,6 +5676,7 @@ out:
                err = SUCCESS;
        } else {
                dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+               ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
                err = FAILED;
        }
 
@@ -4707,6 +5708,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        ufshcd_hba_stop(hba, false);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       /* scale up clocks to max frequency before full reinitialization */
+       ufshcd_scale_clks(hba, true);
+
        err = ufshcd_hba_enable(hba);
        if (err)
                goto out;
@@ -4822,7 +5826,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
        u16 unit;
 
        for (i = start_scan; i >= 0; i--) {
-               data = be16_to_cpu(*((u16 *)(buff + 2*i)));
+               data = be16_to_cpup((__be16 *)&buff[2 * i]);
                unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
                                                ATTR_ICC_LVL_UNIT_OFFSET;
                curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
@@ -5008,8 +6012,8 @@ out:
        return ret;
 }
 
-static int ufs_get_device_info(struct ufs_hba *hba,
-                               struct ufs_device_info *card_data)
+static int ufs_get_device_desc(struct ufs_hba *hba,
+                              struct ufs_dev_desc *dev_desc)
 {
        int err;
        u8 model_index;
@@ -5028,7 +6032,7 @@ static int ufs_get_device_info(struct ufs_hba *hba,
         * getting vendor (manufacturerID) and Bank Index in big endian
         * format
         */
-       card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+       dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
                                     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
 
        model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -5042,36 +6046,26 @@ static int ufs_get_device_info(struct ufs_hba *hba,
        }
 
        str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
-       strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+       strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
                min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
                      MAX_MODEL_LEN));
 
        /* Null terminate the model string */
-       card_data->model[MAX_MODEL_LEN] = '\0';
+       dev_desc->model[MAX_MODEL_LEN] = '\0';
 
 out:
        return err;
 }
 
-void ufs_advertise_fixup_device(struct ufs_hba *hba)
+static void ufs_fixup_device_setup(struct ufs_hba *hba,
+                                  struct ufs_dev_desc *dev_desc)
 {
-       int err;
        struct ufs_dev_fix *f;
-       struct ufs_device_info card_data;
-
-       card_data.wmanufacturerid = 0;
-
-       err = ufs_get_device_info(hba, &card_data);
-       if (err) {
-               dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
-                       __func__, err);
-               return;
-       }
 
        for (f = ufs_fixups; f->quirk; f++) {
-               if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
-                   (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
-                   (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
+               if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
+                    f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
+                   (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
                     !strcmp(f->card.model, UFS_ANY_MODEL)))
                        hba->dev_quirks |= f->quirk;
        }
@@ -5241,6 +6235,22 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
        ufshcd_vops_apply_dev_quirks(hba);
 }
 
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+       int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+       hba->ufs_stats.hibern8_exit_cnt = 0;
+       hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+
+       memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+       memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+       hba->req_abort_count = 0;
+}
+
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -5249,18 +6259,21 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
  */
 static int ufshcd_probe_hba(struct ufs_hba *hba)
 {
+       struct ufs_dev_desc card = {0};
        int ret;
+       ktime_t start = ktime_get();
 
        ret = ufshcd_link_startup(hba);
        if (ret)
                goto out;
 
-       ufshcd_init_pwr_info(hba);
-
        /* set the default level for urgent bkops */
        hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
        hba->is_urgent_bkops_lvl_checked = false;
 
+       /* Debug counters initialization */
+       ufshcd_clear_dbg_ufs_stats(hba);
+
        /* UniPro link is active now */
        ufshcd_set_link_active(hba);
 
@@ -5272,7 +6285,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (ret)
                goto out;
 
-       ufs_advertise_fixup_device(hba);
+       ret = ufs_get_device_desc(hba, &card);
+       if (ret) {
+               dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
+                       __func__, ret);
+               goto out;
+       }
+
+       ufs_fixup_device_setup(hba, &card);
        ufshcd_tune_unipro_params(hba);
 
        ret = ufshcd_set_vccq_rail_unused(hba,
@@ -5320,6 +6340,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
                if (ufshcd_scsi_add_wlus(hba))
                        goto out;
 
+               /* Initialize devfreq after UFS device is detected */
+               if (ufshcd_is_clkscaling_supported(hba)) {
+                       memcpy(&hba->clk_scaling.saved_pwr_info.info,
+                               &hba->pwr_info,
+                               sizeof(struct ufs_pa_layer_attr));
+                       hba->clk_scaling.saved_pwr_info.is_valid = true;
+                       if (!hba->devfreq) {
+                               hba->devfreq = devm_devfreq_add_device(hba->dev,
+                                                       &ufs_devfreq_profile,
+                                                       "simple_ondemand",
+                                                       NULL);
+                               if (IS_ERR(hba->devfreq)) {
+                                       ret = PTR_ERR(hba->devfreq);
+                                       dev_err(hba->dev, "Unable to register with devfreq %d\n",
+                                                       ret);
+                                       goto out;
+                               }
+                       }
+                       hba->clk_scaling.is_allowed = true;
+               }
+
                scsi_scan_host(hba->host);
                pm_runtime_put_sync(hba->dev);
        }
@@ -5327,9 +6368,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
        if (!hba->is_init_prefetch)
                hba->is_init_prefetch = true;
 
-       /* Resume devfreq after UFS device is detected */
-       ufshcd_resume_clkscaling(hba);
-
 out:
        /*
         * If we failed to initialize the device or the device is not
@@ -5340,6 +6378,9 @@ out:
                ufshcd_hba_exit(hba);
        }
 
+       trace_ufshcd_init(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
        return ret;
 }
 
@@ -5650,6 +6691,8 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
        struct ufs_clk_info *clki;
        struct list_head *head = &hba->clk_list_head;
        unsigned long flags;
+       ktime_t start = ktime_get();
+       bool clk_state_changed = false;
 
        if (!head || list_empty(head))
                goto out;
@@ -5663,6 +6706,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
                        if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
                                continue;
 
+                       clk_state_changed = on ^ clki->enabled;
                        if (on && !clki->enabled) {
                                ret = clk_prepare_enable(clki->clk);
                                if (ret) {
@@ -5689,11 +6733,18 @@ out:
                        if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
                                clk_disable_unprepare(clki->clk);
                }
-       } else if (on) {
+       } else if (!ret && on) {
                spin_lock_irqsave(hba->host->host_lock, flags);
                hba->clk_gating.state = CLKS_ON;
+               trace_ufshcd_clk_gating(dev_name(hba->dev),
+                                       hba->clk_gating.state);
                spin_unlock_irqrestore(hba->host->host_lock, flags);
        }
+
+       if (clk_state_changed)
+               trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+                       (on ? "on" : "off"),
+                       ktime_to_us(ktime_sub(ktime_get(), start)), ret);
        return ret;
 }
 
@@ -5835,6 +6886,11 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
                ufshcd_variant_hba_exit(hba);
                ufshcd_setup_vreg(hba, false);
                ufshcd_suspend_clkscaling(hba);
+               if (ufshcd_is_clkscaling_supported(hba)) {
+                       if (hba->devfreq)
+                               ufshcd_suspend_clkscaling(hba);
+                       destroy_workqueue(hba->clk_scaling.workq);
+               }
                ufshcd_setup_clocks(hba, false);
                ufshcd_setup_hba_vreg(hba, false);
                hba->is_powered = false;
@@ -6110,7 +7166,11 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
        ufshcd_hold(hba, false);
        hba->clk_gating.is_suspended = true;
 
-       ufshcd_suspend_clkscaling(hba);
+       if (hba->clk_scaling.is_allowed) {
+               cancel_work_sync(&hba->clk_scaling.suspend_work);
+               cancel_work_sync(&hba->clk_scaling.resume_work);
+               ufshcd_suspend_clkscaling(hba);
+       }
 
        if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
                        req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -6176,6 +7236,7 @@ disable_clks:
                __ufshcd_setup_clocks(hba, false, true);
 
        hba->clk_gating.state = CLKS_OFF;
+       trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
        /*
         * Disable the host irq as host controller as there won't be any
         * host controller transaction expected till resume.
@@ -6186,7 +7247,8 @@ disable_clks:
        goto out;
 
 set_link_active:
-       ufshcd_resume_clkscaling(hba);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
        ufshcd_vreg_set_hpm(hba);
        if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
                ufshcd_set_link_active(hba);
@@ -6196,7 +7258,8 @@ set_dev_active:
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
                ufshcd_disable_auto_bkops(hba);
 enable_gating:
-       ufshcd_resume_clkscaling(hba);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
        hba->clk_gating.is_suspended = false;
        ufshcd_release(hba);
 out:
@@ -6268,14 +7331,19 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                        goto set_old_link_state;
        }
 
-       /*
-        * If BKOPs operations are urgently needed at this moment then
-        * keep auto-bkops enabled or else disable it.
-        */
-       ufshcd_urgent_bkops(hba);
+       if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
+               ufshcd_enable_auto_bkops(hba);
+       else
+               /*
+                * If BKOPs operations are urgently needed at this moment then
+                * keep auto-bkops enabled or else disable it.
+                */
+               ufshcd_urgent_bkops(hba);
+
        hba->clk_gating.is_suspended = false;
 
-       ufshcd_resume_clkscaling(hba);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_resume_clkscaling(hba);
 
        /* Schedule clock gating in case of no access to UFS device yet */
        ufshcd_release(hba);
@@ -6289,7 +7357,8 @@ disable_vreg:
        ufshcd_vreg_set_lpm(hba);
 disable_irq_and_vops_clks:
        ufshcd_disable_irq(hba);
-       ufshcd_suspend_clkscaling(hba);
+       if (hba->clk_scaling.is_allowed)
+               ufshcd_suspend_clkscaling(hba);
        ufshcd_setup_clocks(hba, false);
 out:
        hba->pm_op_in_progress = 0;
@@ -6308,6 +7377,7 @@ out:
 int ufshcd_system_suspend(struct ufs_hba *hba)
 {
        int ret = 0;
+       ktime_t start = ktime_get();
 
        if (!hba || !hba->is_powered)
                return 0;
@@ -6334,6 +7404,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
 
        ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
 out:
+       trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
        if (!ret)
                hba->is_sys_suspended = true;
        return ret;
@@ -6349,6 +7422,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
 
 int ufshcd_system_resume(struct ufs_hba *hba)
 {
+       int ret = 0;
+       ktime_t start = ktime_get();
+
        if (!hba)
                return -EINVAL;
 
@@ -6357,9 +7433,14 @@ int ufshcd_system_resume(struct ufs_hba *hba)
                 * Let the runtime resume take care of resuming
                 * if runtime suspended.
                 */
-               return 0;
-
-       return ufshcd_resume(hba, UFS_SYSTEM_PM);
+               goto out;
+       else
+               ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+       trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
+       return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
 
@@ -6373,13 +7454,21 @@ EXPORT_SYMBOL(ufshcd_system_resume);
  */
 int ufshcd_runtime_suspend(struct ufs_hba *hba)
 {
+       int ret = 0;
+       ktime_t start = ktime_get();
+
        if (!hba)
                return -EINVAL;
 
        if (!hba->is_powered)
-               return 0;
-
-       return ufshcd_suspend(hba, UFS_RUNTIME_PM);
+               goto out;
+       else
+               ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+       trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
+       return ret;
 }
 EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
@@ -6406,13 +7495,21 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
  */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
+       int ret = 0;
+       ktime_t start = ktime_get();
+
        if (!hba)
                return -EINVAL;
 
        if (!hba->is_powered)
-               return 0;
-
-       return ufshcd_resume(hba, UFS_RUNTIME_PM);
+               goto out;
+       else
+               ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+       trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+               ktime_to_us(ktime_sub(ktime_get(), start)),
+               hba->curr_dev_pwr_mode, hba->uic_link_state);
+       return ret;
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -6422,6 +7519,127 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
 }
 EXPORT_SYMBOL(ufshcd_runtime_idle);
 
+static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count,
+                                          bool rpm)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       unsigned long flags, value;
+
+       if (kstrtoul(buf, 0, &value))
+               return -EINVAL;
+
+       if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX))
+               return -EINVAL;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (rpm)
+               hba->rpm_lvl = value;
+       else
+               hba->spm_lvl = value;
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+       return count;
+}
+
+static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       int curr_len;
+       u8 lvl;
+
+       curr_len = snprintf(buf, PAGE_SIZE,
+                           "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
+                           hba->rpm_lvl,
+                           ufschd_ufs_dev_pwr_mode_to_string(
+                               ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
+                           ufschd_uic_link_state_to_string(
+                               ufs_pm_lvl_states[hba->rpm_lvl].link_state));
+
+       curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                            "\nAll available Runtime PM levels info:\n");
+       for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+               curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                                    "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
+                                   lvl,
+                                   ufschd_ufs_dev_pwr_mode_to_string(
+                                       ufs_pm_lvl_states[lvl].dev_state),
+                                   ufschd_uic_link_state_to_string(
+                                       ufs_pm_lvl_states[lvl].link_state));
+
+       return curr_len;
+}
+
+static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
+}
+
+static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+       hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
+       hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
+       sysfs_attr_init(&hba->rpm_lvl_attr.attr);
+       hba->rpm_lvl_attr.attr.name = "rpm_lvl";
+       hba->rpm_lvl_attr.attr.mode = 0644;
+       if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
+               dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
+}
+
+static ssize_t ufshcd_spm_lvl_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       int curr_len;
+       u8 lvl;
+
+       curr_len = snprintf(buf, PAGE_SIZE,
+                           "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
+                           hba->spm_lvl,
+                           ufschd_ufs_dev_pwr_mode_to_string(
+                               ufs_pm_lvl_states[hba->spm_lvl].dev_state),
+                           ufschd_uic_link_state_to_string(
+                               ufs_pm_lvl_states[hba->spm_lvl].link_state));
+
+       curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                            "\nAll available System PM levels info:\n");
+       for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+               curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+                                    "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
+                                   lvl,
+                                   ufschd_ufs_dev_pwr_mode_to_string(
+                                       ufs_pm_lvl_states[lvl].dev_state),
+                                   ufschd_uic_link_state_to_string(
+                                       ufs_pm_lvl_states[lvl].link_state));
+
+       return curr_len;
+}
+
+static ssize_t ufshcd_spm_lvl_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
+}
+
+static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+       hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
+       hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
+       sysfs_attr_init(&hba->spm_lvl_attr.attr);
+       hba->spm_lvl_attr.attr.name = "spm_lvl";
+       hba->spm_lvl_attr.attr.mode = 0644;
+       if (device_create_file(hba->dev, &hba->spm_lvl_attr))
+               dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
+}
+
+static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
+{
+       ufshcd_add_rpm_lvl_sysfs_nodes(hba);
+       ufshcd_add_spm_lvl_sysfs_nodes(hba);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -6465,6 +7683,8 @@ void ufshcd_remove(struct ufs_hba *hba)
        ufshcd_hba_stop(hba, true);
 
        ufshcd_exit_clk_gating(hba);
+       if (ufshcd_is_clkscaling_supported(hba))
+               device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
        ufshcd_hba_exit(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -6531,149 +7751,6 @@ out_error:
 }
 EXPORT_SYMBOL(ufshcd_alloc_host);
 
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
-{
-       int ret = 0;
-       struct ufs_clk_info *clki;
-       struct list_head *head = &hba->clk_list_head;
-
-       if (!head || list_empty(head))
-               goto out;
-
-       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
-       if (ret)
-               return ret;
-
-       list_for_each_entry(clki, head, list) {
-               if (!IS_ERR_OR_NULL(clki->clk)) {
-                       if (scale_up && clki->max_freq) {
-                               if (clki->curr_freq == clki->max_freq)
-                                       continue;
-                               ret = clk_set_rate(clki->clk, clki->max_freq);
-                               if (ret) {
-                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-                                               __func__, clki->name,
-                                               clki->max_freq, ret);
-                                       break;
-                               }
-                               clki->curr_freq = clki->max_freq;
-
-                       } else if (!scale_up && clki->min_freq) {
-                               if (clki->curr_freq == clki->min_freq)
-                                       continue;
-                               ret = clk_set_rate(clki->clk, clki->min_freq);
-                               if (ret) {
-                                       dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-                                               __func__, clki->name,
-                                               clki->min_freq, ret);
-                                       break;
-                               }
-                               clki->curr_freq = clki->min_freq;
-                       }
-               }
-               dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
-                               clki->name, clk_get_rate(clki->clk));
-       }
-
-       ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
-
-out:
-       return ret;
-}
-
-static int ufshcd_devfreq_target(struct device *dev,
-                               unsigned long *freq, u32 flags)
-{
-       int err = 0;
-       struct ufs_hba *hba = dev_get_drvdata(dev);
-       bool release_clk_hold = false;
-       unsigned long irq_flags;
-
-       if (!ufshcd_is_clkscaling_enabled(hba))
-               return -EINVAL;
-
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (ufshcd_eh_in_progress(hba)) {
-               spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-               return 0;
-       }
-
-       if (ufshcd_is_clkgating_allowed(hba) &&
-           (hba->clk_gating.state != CLKS_ON)) {
-               if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
-                       /* hold the vote until the scaling work is completed */
-                       hba->clk_gating.active_reqs++;
-                       release_clk_hold = true;
-                       hba->clk_gating.state = CLKS_ON;
-               } else {
-                       /*
-                        * Clock gating work seems to be running in parallel
-                        * hence skip scaling work to avoid deadlock between
-                        * current scaling work and gating work.
-                        */
-                       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-                       return 0;
-               }
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
-       if (*freq == UINT_MAX)
-               err = ufshcd_scale_clks(hba, true);
-       else if (*freq == 0)
-               err = ufshcd_scale_clks(hba, false);
-
-       spin_lock_irqsave(hba->host->host_lock, irq_flags);
-       if (release_clk_hold)
-               __ufshcd_release(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
-
-       return err;
-}
-
-static int ufshcd_devfreq_get_dev_status(struct device *dev,
-               struct devfreq_dev_status *stat)
-{
-       struct ufs_hba *hba = dev_get_drvdata(dev);
-       struct ufs_clk_scaling *scaling = &hba->clk_scaling;
-       unsigned long flags;
-
-       if (!ufshcd_is_clkscaling_enabled(hba))
-               return -EINVAL;
-
-       memset(stat, 0, sizeof(*stat));
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       if (!scaling->window_start_t)
-               goto start_window;
-
-       if (scaling->is_busy_started)
-               scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
-                                       scaling->busy_start_t));
-
-       stat->total_time = jiffies_to_usecs((long)jiffies -
-                               (long)scaling->window_start_t);
-       stat->busy_time = scaling->tot_busy_t;
-start_window:
-       scaling->window_start_t = jiffies;
-       scaling->tot_busy_t = 0;
-
-       if (hba->outstanding_reqs) {
-               scaling->busy_start_t = ktime_get();
-               scaling->is_busy_started = true;
-       } else {
-               scaling->busy_start_t = 0;
-               scaling->is_busy_started = false;
-       }
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-       return 0;
-}
-
-static struct devfreq_dev_profile ufs_devfreq_profile = {
-       .polling_ms     = 100,
-       .target         = ufshcd_devfreq_target,
-       .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
-
 /**
  * ufshcd_init - Driver initialization routine
  * @hba: per-adapter instance
@@ -6757,6 +7834,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        /* Initialize mutex for device management commands */
        mutex_init(&hba->dev_cmd.lock);
 
+       init_rwsem(&hba->clk_scaling_lock);
+
        /* Initialize device management tag acquire wait queue */
        init_waitqueue_head(&hba->dev_cmd.tag_wq);
 
@@ -6795,22 +7874,38 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        err = ufshcd_hba_enable(hba);
        if (err) {
                dev_err(hba->dev, "Host controller enable failed\n");
+               ufshcd_print_host_regs(hba);
+               ufshcd_print_host_state(hba);
                goto out_remove_scsi_host;
        }
 
-       if (ufshcd_is_clkscaling_enabled(hba)) {
-               hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
-                                                  "simple_ondemand", NULL);
-               if (IS_ERR(hba->devfreq)) {
-                       dev_err(hba->dev, "Unable to register with devfreq %ld\n",
-                                       PTR_ERR(hba->devfreq));
-                       err = PTR_ERR(hba->devfreq);
-                       goto out_remove_scsi_host;
-               }
-               /* Suspend devfreq until the UFS device is detected */
-               ufshcd_suspend_clkscaling(hba);
+       if (ufshcd_is_clkscaling_supported(hba)) {
+               char wq_name[sizeof("ufs_clkscaling_00")];
+
+               INIT_WORK(&hba->clk_scaling.suspend_work,
+                         ufshcd_clk_scaling_suspend_work);
+               INIT_WORK(&hba->clk_scaling.resume_work,
+                         ufshcd_clk_scaling_resume_work);
+
+               snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+                        host->host_no);
+               hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+               ufshcd_clkscaling_init_sysfs(hba);
        }
 
+       /*
+        * Set the default power management level for runtime and system PM.
+        * Default power saving mode is to keep UFS link in Hibern8 state
+        * and UFS device in sleep state.
+        */
+       hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+                                               UFS_SLEEP_PWR_MODE,
+                                               UIC_LINK_HIBERN8_STATE);
+       hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+                                               UFS_SLEEP_PWR_MODE,
+                                               UIC_LINK_HIBERN8_STATE);
+
        /* Hold auto suspend until async scan completes */
        pm_runtime_get_sync(dev);
 
@@ -6823,6 +7918,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        ufshcd_set_ufs_dev_active(hba);
 
        async_schedule(ufshcd_async_scan, hba);
+       ufshcd_add_sysfs_nodes(hba);
 
        return 0;
 
index 08cd26e..7630600 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -152,6 +153,10 @@ struct ufs_pm_lvl_states {
  * @ucd_req_ptr: UCD address of the command
  * @ucd_rsp_ptr: Response UPIU address for this command
  * @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
  * @cmd: pointer to SCSI command
  * @sense_buffer: pointer to sense buffer address of the SCSI command
  * @sense_bufflen: Length of the sense buffer
@@ -160,6 +165,8 @@ struct ufs_pm_lvl_states {
  * @task_tag: Task tag of the command
  * @lun: LUN of the command
  * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes
+ * @req_abort_skip: skip request abort task flag
  */
 struct ufshcd_lrb {
        struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -167,6 +174,11 @@ struct ufshcd_lrb {
        struct utp_upiu_rsp *ucd_rsp_ptr;
        struct ufshcd_sg_entry *ucd_prdt_ptr;
 
+       dma_addr_t utrd_dma_addr;
+       dma_addr_t ucd_req_dma_addr;
+       dma_addr_t ucd_rsp_dma_addr;
+       dma_addr_t ucd_prdt_dma_addr;
+
        struct scsi_cmnd *cmd;
        u8 *sense_buffer;
        unsigned int sense_bufflen;
@@ -176,6 +188,9 @@ struct ufshcd_lrb {
        int task_tag;
        u8 lun; /* UPIU LUN id field is only 8-bit wide */
        bool intr_cmd;
+       ktime_t issue_time_stamp;
+
+       bool req_abort_skip;
 };
 
 /**
@@ -320,6 +335,8 @@ enum clk_gating_state {
  * @is_suspended: clk gating is suspended when set to 1 which can be used
  * during suspend/resume
  * @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
  */
@@ -330,14 +347,47 @@ struct ufs_clk_gating {
        unsigned long delay_ms;
        bool is_suspended;
        struct device_attribute delay_attr;
+       struct device_attribute enable_attr;
+       bool is_enabled;
        int active_reqs;
 };
 
+struct ufs_saved_pwr_info {
+       struct ufs_pa_layer_attr info;
+       bool is_valid;
+};
+
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ */
 struct ufs_clk_scaling {
-       ktime_t  busy_start_t;
-       bool is_busy_started;
-       unsigned long  tot_busy_t;
+       int active_reqs;
+       unsigned long tot_busy_t;
        unsigned long window_start_t;
+       ktime_t busy_start_t;
+       struct device_attribute enable_attr;
+       struct ufs_saved_pwr_info saved_pwr_info;
+       struct workqueue_struct *workq;
+       struct work_struct suspend_work;
+       struct work_struct resume_work;
+       bool is_allowed;
+       bool is_busy_started;
+       bool is_suspended;
 };
 
 /**
@@ -349,6 +399,41 @@ struct ufs_init_prefetch {
        u32 icc_level;
 };
 
+#define UIC_ERR_REG_HIST_LENGTH 8
+/**
+ * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * @pos: index to indicate cyclic buffer position
+ * @reg: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ */
+struct ufs_uic_err_reg_hist {
+       int pos;
+       u32 reg[UIC_ERR_REG_HIST_LENGTH];
+       ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+};
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ *             reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ *             Clear after the first successful command completion.
+ * @pa_err: tracks pa-uic errors
+ * @dl_err: tracks dl-uic errors
+ * @nl_err: tracks nl-uic errors
+ * @tl_err: tracks tl-uic errors
+ * @dme_err: tracks dme errors
+ */
+struct ufs_stats {
+       u32 hibern8_exit_cnt;
+       ktime_t last_hibern8_exit_tstamp;
+       struct ufs_uic_err_reg_hist pa_err;
+       struct ufs_uic_err_reg_hist dl_err;
+       struct ufs_uic_err_reg_hist nl_err;
+       struct ufs_uic_err_reg_hist tl_err;
+       struct ufs_uic_err_reg_hist dme_err;
+};
+
 /**
  * struct ufs_hba - per adapter private structure
  * @mmio_base: UFSHCI base register address
@@ -429,6 +514,8 @@ struct ufs_hba {
        enum ufs_pm_level rpm_lvl;
        /* Desired UFS power management level during system PM */
        enum ufs_pm_level spm_lvl;
+       struct device_attribute rpm_lvl_attr;
+       struct device_attribute spm_lvl_attr;
        int pm_op_in_progress;
 
        struct ufshcd_lrb *lrb;
@@ -523,6 +610,7 @@ struct ufs_hba {
        u32 uic_error;
        u32 saved_err;
        u32 saved_uic_err;
+       struct ufs_stats ufs_stats;
 
        /* Device management request data */
        struct ufs_dev_cmd dev_cmd;
@@ -536,6 +624,9 @@ struct ufs_hba {
 
        bool wlun_dev_clr_ua;
 
+       /* Number of requests aborts */
+       int req_abort_count;
+
        /* Number of lanes available (1 or 2) for Rx/Tx */
        u32 lanes_per_direction;
        struct ufs_pa_layer_attr pwr_info;
@@ -558,6 +649,14 @@ struct ufs_hba {
         * CAUTION: Enabling this might reduce overall UFS throughput.
         */
 #define UFSHCD_CAP_INTR_AGGR (1 << 4)
+       /*
+        * This capability allows the device auto-bkops to be always enabled
+        * except during suspend (both runtime and suspend).
+        * Enabling this capability means that device will always be allowed
+        * to do background operation when it's active but it might degrade
+        * the performance of ongoing read/write operations.
+        */
+#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
 
        struct devfreq *devfreq;
        struct ufs_clk_scaling clk_scaling;
@@ -565,6 +664,8 @@ struct ufs_hba {
 
        enum bkops_status urgent_bkops_lvl;
        bool is_urgent_bkops_lvl_checked;
+
+       struct rw_semaphore clk_scaling_lock;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
@@ -576,7 +677,7 @@ static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
 {
        return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 }
-static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
 {
        return hba->caps & UFSHCD_CAP_CLK_SCALING;
 }
@@ -655,6 +756,11 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba)
        BUG_ON(!hba);
        return hba->priv;
 }
+static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
+                                                       struct ufs_hba *hba)
+{
+       return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
+}
 
 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
@@ -713,8 +819,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
        return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
 }
 
-int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
-
 static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
 {
        return (pwr_info->pwr_rx == FAST_MODE ||
@@ -723,11 +827,6 @@ static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
                pwr_info->pwr_tx == FASTAUTO_MODE);
 }
 
-#define ASCII_STD true
-
-int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
-                               u32 size, bool ascii);
-
 /* Expose Query-Request API */
 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
        enum flag_idn idn, bool *flag_res);
index 8c5190e..d14e9b9 100644 (file)
@@ -72,6 +72,9 @@ enum {
        REG_UIC_COMMAND_ARG_1                   = 0x94,
        REG_UIC_COMMAND_ARG_2                   = 0x98,
        REG_UIC_COMMAND_ARG_3                   = 0x9C,
+
+       UFSHCI_REG_SPACE_SIZE                   = 0xA0,
+
        REG_UFS_CCAP                            = 0x100,
        REG_UFS_CRYPTOCAP                       = 0x104,
 
index 15ca09c..ef474a7 100644 (file)
@@ -68,10 +68,7 @@ struct pvscsi_ctx {
 
 struct pvscsi_adapter {
        char                            *mmioBase;
-       unsigned int                    irq;
        u8                              rev;
-       bool                            use_msi;
-       bool                            use_msix;
        bool                            use_msg;
        bool                            use_req_threshold;
 
@@ -1161,30 +1158,26 @@ static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
 static irqreturn_t pvscsi_isr(int irq, void *devp)
 {
        struct pvscsi_adapter *adapter = devp;
-       int handled;
-
-       if (adapter->use_msi || adapter->use_msix)
-               handled = true;
-       else {
-               u32 val = pvscsi_read_intr_status(adapter);
-               handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
-               if (handled)
-                       pvscsi_write_intr_status(devp, val);
-       }
-
-       if (handled) {
-               unsigned long flags;
+       unsigned long flags;
 
-               spin_lock_irqsave(&adapter->hw_lock, flags);
+       spin_lock_irqsave(&adapter->hw_lock, flags);
+       pvscsi_process_completion_ring(adapter);
+       if (adapter->use_msg && pvscsi_msg_pending(adapter))
+               queue_work(adapter->workqueue, &adapter->work);
+       spin_unlock_irqrestore(&adapter->hw_lock, flags);
 
-               pvscsi_process_completion_ring(adapter);
-               if (adapter->use_msg && pvscsi_msg_pending(adapter))
-                       queue_work(adapter->workqueue, &adapter->work);
+       return IRQ_HANDLED;
+}
 
-               spin_unlock_irqrestore(&adapter->hw_lock, flags);
-       }
+static irqreturn_t pvscsi_shared_isr(int irq, void *devp)
+{
+       struct pvscsi_adapter *adapter = devp;
+       u32 val = pvscsi_read_intr_status(adapter);
 
-       return IRQ_RETVAL(handled);
+       if (!(val & PVSCSI_INTR_ALL_SUPPORTED))
+               return IRQ_NONE;
+       pvscsi_write_intr_status(devp, val);
+       return pvscsi_isr(irq, devp);
 }
 
 static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
@@ -1196,34 +1189,10 @@ static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
                free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
 }
 
-static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
-                            unsigned int *irq)
-{
-       struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
-       int ret;
-
-       ret = pci_enable_msix_exact(adapter->dev, &entry, 1);
-       if (ret)
-               return ret;
-
-       *irq = entry.vector;
-
-       return 0;
-}
-
 static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
 {
-       if (adapter->irq) {
-               free_irq(adapter->irq, adapter);
-               adapter->irq = 0;
-       }
-       if (adapter->use_msi) {
-               pci_disable_msi(adapter->dev);
-               adapter->use_msi = 0;
-       } else if (adapter->use_msix) {
-               pci_disable_msix(adapter->dev);
-               adapter->use_msix = 0;
-       }
+       free_irq(pci_irq_vector(adapter->dev, 0), adapter);
+       pci_free_irq_vectors(adapter->dev);
 }
 
 static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
@@ -1359,11 +1328,11 @@ exit:
 
 static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY;
        struct pvscsi_adapter *adapter;
        struct pvscsi_adapter adapter_temp;
        struct Scsi_Host *host = NULL;
        unsigned int i;
-       unsigned long flags = 0;
        int error;
        u32 max_id;
 
@@ -1512,30 +1481,33 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_reset_adapter;
        }
 
-       if (!pvscsi_disable_msix &&
-           pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
-               printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
-               adapter->use_msix = 1;
-       } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
-               printk(KERN_INFO "vmw_pvscsi: using MSI\n");
-               adapter->use_msi = 1;
-               adapter->irq = pdev->irq;
-       } else {
-               printk(KERN_INFO "vmw_pvscsi: using INTx\n");
-               adapter->irq = pdev->irq;
-               flags = IRQF_SHARED;
-       }
+       if (pvscsi_disable_msix)
+               irq_flag &= ~PCI_IRQ_MSIX;
+       if (pvscsi_disable_msi)
+               irq_flag &= ~PCI_IRQ_MSI;
+
+       error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
+       if (error)
+               goto out_reset_adapter;
 
        adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
        printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
               adapter->use_req_threshold ? "en" : "dis");
 
-       error = request_irq(adapter->irq, pvscsi_isr, flags,
-                           "vmw_pvscsi", adapter);
+       if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) {
+               printk(KERN_INFO "vmw_pvscsi: using MSI%s\n",
+                       adapter->dev->msix_enabled ? "-X" : "");
+               error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr,
+                               0, "vmw_pvscsi", adapter);
+       } else {
+               printk(KERN_INFO "vmw_pvscsi: using INTx\n");
+               error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr,
+                               IRQF_SHARED, "vmw_pvscsi", adapter);
+       }
+
        if (error) {
                printk(KERN_ERR
                       "vmw_pvscsi: unable to request IRQ: %d\n", error);
-               adapter->irq = 0;
                goto out_reset_adapter;
        }
 
index d41292e..75966d3 100644 (file)
@@ -422,11 +422,6 @@ struct PVSCSIConfigPageController {
  */
 #define PVSCSI_MAX_INTRS        24
 
-/*
- * Enumeration of supported MSI-X vectors
- */
-#define PVSCSI_VECTOR_COMPLETION   0
-
 /*
  * Misc constants for the rings.
  */
index c170be5..46e18c0 100644 (file)
@@ -1130,6 +1130,7 @@ extern int ata_sas_port_start(struct ata_port *ap);
 extern void ata_sas_port_stop(struct ata_port *ap);
 extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
 extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
 extern int sata_scr_valid(struct ata_link *link);
 extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
 extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
@@ -1355,6 +1356,7 @@ extern struct device_attribute *ata_common_sdev_attrs[];
        .proc_name              = drv_name,                     \
        .slave_configure        = ata_scsi_slave_config,        \
        .slave_destroy          = ata_scsi_slave_destroy,       \
+       .eh_timed_out           = ata_scsi_timed_out,           \
        .bios_param             = ata_std_bios_param,           \
        .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
        .sdev_attrs             = ata_common_sdev_attrs
index 4d1c46a..b0e275d 100644 (file)
@@ -383,6 +383,7 @@ extern int iscsi_eh_recover_target(struct scsi_cmnd *sc);
 extern int iscsi_eh_session_reset(struct scsi_cmnd *sc);
 extern int iscsi_eh_device_reset(struct scsi_cmnd *sc);
 extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc);
+extern enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc);
 
 /*
  * iSCSI host helpers.
index 8ec7c30..a1e1930 100644 (file)
@@ -29,16 +29,6 @@ enum scsi_timeouts {
  */
 #define SCAN_WILD_CARD ~0
 
-#ifdef CONFIG_ACPI
-struct acpi_bus_type;
-
-extern int
-scsi_register_acpi_bus_type(struct acpi_bus_type *bus);
-
-extern void
-scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus);
-#endif
-
 /** scsi_status_is_good - check the status return.
  *
  * @status: the status passed up from the driver (including host and
index b6e07b5..a3dcb1b 100644 (file)
@@ -56,29 +56,6 @@ struct scsi_transport_template {
         * Allows a transport to override the default error handler.
         */
        void (* eh_strategy_handler)(struct Scsi_Host *);
-
-       /*
-        * This is an optional routine that allows the transport to become
-        * involved when a scsi io timer fires. The return value tells the
-        * timer routine how to finish the io timeout handling:
-        * EH_HANDLED:          I fixed the error, please complete the command
-        * EH_RESET_TIMER:      I need more time, reset the timer and
-        *                      begin counting again
-        * EH_NOT_HANDLED       Begin normal error recovery
-        */
-       enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
-
-       /*
-        * Used as callback for the completion of i_t_nexus request
-        * for target drivers.
-        */
-       int (* it_nexus_response)(struct Scsi_Host *, u64, int);
-
-       /*
-        * Used as callback for the completion of task management
-        * request for target drivers.
-        */
-       int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
 };
 
 #define transport_class_to_shost(tc) \
index 924c8e6..b21b8aa 100644 (file)
@@ -808,6 +808,7 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
                struct fc_vport_identifiers *);
 int fc_vport_terminate(struct fc_vport *vport);
 int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
+enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd);
 
 static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
 {
index d40d3ef..dd09633 100644 (file)
@@ -88,10 +88,6 @@ struct srp_rport {
  * @terminate_rport_io: Callback function for terminating all outstanding I/O
  *     requests for an rport.
  * @rport_delete: Callback function that deletes an rport.
- *
- * Fields that are only relevant for SRP target drivers:
- * @tsk_mgmt_response: Callback function for sending a task management response.
- * @it_nexus_response: Callback function for processing an IT nexus response.
  */
 struct srp_function_template {
        /* for initiator drivers */
@@ -103,9 +99,6 @@ struct srp_function_template {
        int (*reconnect)(struct srp_rport *rport);
        void (*terminate_rport_io)(struct srp_rport *rport);
        void (*rport_delete)(struct srp_rport *rport);
-       /* for target drivers */
-       int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
-       int (* it_nexus_response)(struct Scsi_Host *, u64, int);
 };
 
 extern struct scsi_transport_template *
@@ -124,6 +117,7 @@ extern int srp_reconnect_rport(struct srp_rport *rport);
 extern void srp_start_tl_fail_timers(struct srp_rport *rport);
 extern void srp_remove_host(struct Scsi_Host *);
 extern void srp_stop_rport_timers(struct srp_rport *rport);
+enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd);
 
 /**
  * srp_chkready() - evaluate the transport layer state before I/O
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
new file mode 100644 (file)
index 0000000..bf6f826
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufs
+
+#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_UFS_H
+
+#include <linux/tracepoint.h>
+
+#define UFS_LINK_STATES                        \
+       EM(UIC_LINK_OFF_STATE)          \
+       EM(UIC_LINK_ACTIVE_STATE)       \
+       EMe(UIC_LINK_HIBERN8_STATE)
+
+#define UFS_PWR_MODES                  \
+       EM(UFS_ACTIVE_PWR_MODE)         \
+       EM(UFS_SLEEP_PWR_MODE)          \
+       EMe(UFS_POWERDOWN_PWR_MODE)
+
+#define UFSCHD_CLK_GATING_STATES       \
+       EM(CLKS_OFF)                    \
+       EM(CLKS_ON)                     \
+       EM(REQ_CLKS_OFF)                \
+       EMe(REQ_CLKS_ON)
+
+/* Enums require being exported to userspace, for user tool parsing */
+#undef EM
+#undef EMe
+#define EM(a)  TRACE_DEFINE_ENUM(a);
+#define EMe(a) TRACE_DEFINE_ENUM(a);
+
+UFS_LINK_STATES;
+UFS_PWR_MODES;
+UFSCHD_CLK_GATING_STATES;
+
+/*
+ * Now redefine the EM() and EMe() macros to map the enums to the strings
+ * that will be printed in the output.
+ */
+#undef EM
+#undef EMe
+#define EM(a)  { a, #a },
+#define EMe(a) { a, #a }
+
+TRACE_EVENT(ufshcd_clk_gating,
+
+       TP_PROTO(const char *dev_name, int state),
+
+       TP_ARGS(dev_name, state),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __field(int, state)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __entry->state = state;
+       ),
+
+       TP_printk("%s: gating state changed to %s",
+               __get_str(dev_name),
+               __print_symbolic(__entry->state, UFSCHD_CLK_GATING_STATES))
+);
+
+TRACE_EVENT(ufshcd_clk_scaling,
+
+       TP_PROTO(const char *dev_name, const char *state, const char *clk,
+               u32 prev_state, u32 curr_state),
+
+       TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __string(state, state)
+               __string(clk, clk)
+               __field(u32, prev_state)
+               __field(u32, curr_state)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __assign_str(state, state);
+               __assign_str(clk, clk);
+               __entry->prev_state = prev_state;
+               __entry->curr_state = curr_state;
+       ),
+
+       TP_printk("%s: %s %s from %u to %u Hz",
+               __get_str(dev_name), __get_str(state), __get_str(clk),
+               __entry->prev_state, __entry->curr_state)
+);
+
+TRACE_EVENT(ufshcd_auto_bkops_state,
+
+       TP_PROTO(const char *dev_name, const char *state),
+
+       TP_ARGS(dev_name, state),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __string(state, state)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __assign_str(state, state);
+       ),
+
+       TP_printk("%s: auto bkops - %s",
+               __get_str(dev_name), __get_str(state))
+);
+
+DECLARE_EVENT_CLASS(ufshcd_profiling_template,
+       TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+                int err),
+
+       TP_ARGS(dev_name, profile_info, time_us, err),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __string(profile_info, profile_info)
+               __field(s64, time_us)
+               __field(int, err)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __assign_str(profile_info, profile_info);
+               __entry->time_us = time_us;
+               __entry->err = err;
+       ),
+
+       TP_printk("%s: %s: took %lld usecs, err %d",
+               __get_str(dev_name), __get_str(profile_info),
+               __entry->time_us, __entry->err)
+);
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
+       TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+                int err),
+       TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
+       TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+                int err),
+       TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
+       TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+                int err),
+       TP_ARGS(dev_name, profile_info, time_us, err));
+
+DECLARE_EVENT_CLASS(ufshcd_template,
+       TP_PROTO(const char *dev_name, int err, s64 usecs,
+                int dev_state, int link_state),
+
+       TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+
+       TP_STRUCT__entry(
+               __field(s64, usecs)
+               __field(int, err)
+               __string(dev_name, dev_name)
+               __field(int, dev_state)
+               __field(int, link_state)
+       ),
+
+       TP_fast_assign(
+               __entry->usecs = usecs;
+               __entry->err = err;
+               __assign_str(dev_name, dev_name);
+               __entry->dev_state = dev_state;
+               __entry->link_state = link_state;
+       ),
+
+       TP_printk(
+               "%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
+               __get_str(dev_name),
+               __entry->usecs,
+               __print_symbolic(__entry->dev_state, UFS_PWR_MODES),
+               __print_symbolic(__entry->link_state, UFS_LINK_STATES),
+               __entry->err
+       )
+);
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
+            TP_PROTO(const char *dev_name, int err, s64 usecs,
+                     int dev_state, int link_state),
+            TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
+            TP_PROTO(const char *dev_name, int err, s64 usecs,
+                     int dev_state, int link_state),
+            TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
+            TP_PROTO(const char *dev_name, int err, s64 usecs,
+                     int dev_state, int link_state),
+            TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
+            TP_PROTO(const char *dev_name, int err, s64 usecs,
+                     int dev_state, int link_state),
+            TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_init,
+            TP_PROTO(const char *dev_name, int err, s64 usecs,
+                     int dev_state, int link_state),
+            TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+TRACE_EVENT(ufshcd_command,
+       TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
+                       u32 doorbell, int transfer_len, u32 intr, u64 lba,
+                       u8 opcode),
+
+       TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __string(str, str)
+               __field(unsigned int, tag)
+               __field(u32, doorbell)
+               __field(int, transfer_len)
+               __field(u32, intr)
+               __field(u64, lba)
+               __field(u8, opcode)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __assign_str(str, str);
+               __entry->tag = tag;
+               __entry->doorbell = doorbell;
+               __entry->transfer_len = transfer_len;
+               __entry->intr = intr;
+               __entry->lba = lba;
+               __entry->opcode = opcode;
+       ),
+
+       TP_printk(
+               "%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
+               __get_str(str), __get_str(dev_name), __entry->tag,
+               __entry->doorbell, __entry->transfer_len,
+               __entry->intr, __entry->lba, (u32)__entry->opcode
+       )
+);
+
+#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 6bf1f8a..e9fdc12 100644 (file)
@@ -40,6 +40,7 @@ struct dk_cxlflash_hdr {
  */
 #define DK_CXLFLASH_ALL_PORTS_ACTIVE   0x0000000000000001ULL
 #define DK_CXLFLASH_APP_CLOSE_ADAP_FD  0x0000000000000002ULL
+#define DK_CXLFLASH_CONTEXT_SQ_CMD_MODE        0x0000000000000004ULL
 
 /*
  * General Notes: