struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
+ struct ib_device *ib_dev;
u32 max_fr_sectors;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
}
ib_conn = &iser_conn->ib_conn;
+ ib_dev = ib_conn->device->ib_device;
if (ib_conn->pi_support) {
- u32 sig_caps = ib_conn->device->ib_device->attrs.sig_prot_cap;
+ u32 sig_caps = ib_dev->attrs.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
- if (iscsi_host_add(shost,
- ib_conn->device->ib_device->dev.parent)) {
+ if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ shost->virt_boundary_mask = ~MASK_4K;
+
+ if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
goto free_host;
}
return 0;
}
-static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
-{
- struct iscsi_session *session;
- struct iser_conn *iser_conn;
- struct ib_device *ib_dev;
-
- mutex_lock(&unbind_iser_conn_mutex);
-
- session = starget_to_session(scsi_target(sdev))->dd_data;
- iser_conn = session->leadconn->dd_data;
- if (!iser_conn) {
- mutex_unlock(&unbind_iser_conn_mutex);
- return -ENOTCONN;
- }
- ib_dev = iser_conn->ib_conn.device->ib_device;
-
- if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
-
- mutex_unlock(&unbind_iser_conn_mutex);
-
- return 0;
-}
-
static struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over iSER",
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
- .slave_alloc = iscsi_iser_slave_alloc,
.proc_name = "iscsi_iser",
.this_id = -1,
.track_queue_depth = 1,
return 0;
}
-static int srp_slave_alloc(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost = sdev->host;
- struct srp_target_port *target = host_to_target(shost);
- struct srp_device *srp_dev = target->srp_host->srp_dev;
- struct ib_device *ibdev = srp_dev->dev;
-
- if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
- blk_queue_virt_boundary(sdev->request_queue,
- ~srp_dev->mr_page_mask);
-
- return 0;
-}
-
static int srp_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.target_alloc = srp_target_alloc,
- .slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
+ if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
+
target = host_to_target(target_host);
target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
+#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
+ if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+ need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+ return NULL;
+
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/blktrace_api.h>
+#include <linux/types.h>
#include <linux/slab.h>
#include <scsi/fc/fc_els.h>
#include "zfcp_ext.h"
static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
{
+ const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_qdio *qdio = adapter->qdio;
int req_id = req->req_id;
return -EIO;
}
+ /*
+ * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
+ * ONLY TOUCH SYNC req AGAIN ON req->completion.
+ *
+ * The request might complete and be freed concurrently at any point
+ * now. This is not protected by the QDIO-lock (req_q_lock). So any
+ * uncontrolled access after this might result in an use-after-free bug.
+ * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
+ * when it is completed via req->completion, is it safe to use req
+ * again.
+ */
+
/* Don't increase for unsolicited status */
- if (!zfcp_fsf_req_is_status_read_buffer(req))
+ if (!is_srb)
adapter->fsf_req_seq_no++;
adapter->req_no++;
retval = zfcp_fsf_req_send(req);
if (retval)
goto failed_req_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
req->qtcb->bottom.support.req_handle = (u64) old_req_id;
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
- if (!zfcp_fsf_req_send(req))
+ if (!zfcp_fsf_req_send(req)) {
+ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
+ }
out_error_free:
zfcp_fsf_req_free(req);
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
ret = zfcp_fsf_req_send(req);
if (ret)
goto failed_send;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
- if (!retval)
+ if (!retval) {
+ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+ }
zfcp_fsf_req_free(req);
return retval;
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
retval = zfcp_fsf_req_send(req);
spin_unlock_irq(&qdio->req_q_lock);
- if (!retval)
+ if (!retval) {
+ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
wait_for_completion(&req->completion);
+ }
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
put_device(&port->dev);
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
+ unsigned long req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
req->data = wka_port;
+ req_id = req->req_id;
+
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
+ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
return retval;
}
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req;
+ unsigned long req_id = 0;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
req->data = wka_port;
req->qtcb->header.port_handle = wka_port->handle;
+ req_id = req->req_id;
+
zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
retval = zfcp_fsf_req_send(req);
if (retval)
zfcp_fsf_req_free(req);
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
if (!retval)
- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
+ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
return retval;
}
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
zfcp_fsf_req_free(req);
erp_action->fsf_req_id = 0;
}
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
out:
spin_unlock_irq(&qdio->req_q_lock);
return retval;
retval = zfcp_fsf_req_send(req);
if (unlikely(retval))
goto failed_scsi_cmnd;
+ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
goto out;
zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
- if (!zfcp_fsf_req_send(req))
+ if (!zfcp_fsf_req_send(req)) {
+ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
goto out;
+ }
zfcp_fsf_req_free(req);
req = NULL;
else
shost->dma_boundary = 0xffffffff;
+ if (sht->virt_boundary_mask)
+ shost->virt_boundary_mask = sht->virt_boundary_mask;
+
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
- FC_LPORT_DBG(lport, "Receiving frames for an lport that "
+ FC_LIBFC_DBG("Receiving frames for an lport that "
"has not been initialized correctly\n");
fc_frame_free(fp);
return;
goto retry;
}
}
-EXPORT_SYMBOL(sas_wait_eh);
static int sas_queue_reset(struct domain_device *dev, int reset_type,
u64 lun, int wait)
* This function dumps an entry indexed by @idx from a queue specified by the
* queue descriptor @q.
**/
-static inline void
+static void
lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx)
{
char line_buf[LPFC_LBUF_SZ];
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.710.06.00-rc1"
-#define MEGASAS_RELDATE "June 18, 2019"
+#define MEGASAS_VERSION "07.710.50.00-rc1"
+#define MEGASAS_RELDATE "June 28, 2019"
/*
* Device IDs
"default mode is 'balanced'"
);
+int event_log_level = MFI_EVT_CLASS_CRITICAL;
+module_param(event_log_level, int, 0644);
+MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
}
}
-void
+static void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
instance->instancet->fire_cmd(instance,
union megasas_evt_class_locale class_locale;
class_locale.word = le32_to_cpu(evt_detail->cl.word);
- if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
+ if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+ (event_log_level > MFI_EVT_CLASS_DEAD)) {
+ printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ event_log_level = MFI_EVT_CLASS_CRITICAL;
+ }
+
+ if (class_locale.members.class >= event_log_level)
dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
le32_to_cpu(evt_detail->seq_num),
format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
static void
process_fw_state_change_wq(struct work_struct *work);
-void megasas_do_ocr(struct megasas_instance *instance)
+static void megasas_do_ocr(struct megasas_instance *instance)
{
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
static DEVICE_ATTR_RO(dump_system_regs);
static DEVICE_ATTR_RO(raid_map_id);
-struct device_attribute *megaraid_host_attrs[] = {
+static struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
&dev_attr_fw_crash_buffer,
&dev_attr_fw_crash_state,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.change_queue_depth = scsi_change_queue_depth,
+ .max_segment_size = 0xffffffff,
.no_write_same = 1,
};
instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
1 : 0;
- if (!instance->msix_combined) {
+ if (instance->adapter_type >= INVADER_SERIES &&
+ !instance->msix_combined) {
instance->msix_load_balance = true;
instance->smp_affinity_enable = false;
}
int ret;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- u16 targetId = (sdev->channel % 2) + sdev->id;
+ u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
+ sdev->id;
cmd = megasas_get_cmd(instance);
goto err_pcidrv;
}
+ if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
+ (event_log_level > MFI_EVT_CLASS_DEAD)) {
+ printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ event_log_level = MFI_EVT_CLASS_CRITICAL;
+ }
+
rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_version);
if (rval)
.this_id = -1,
.sg_tablesize = MPT3SAS_SG_DEPTH,
.max_sectors = 32767,
+ .max_segment_size = 0xffffffff,
.cmd_per_lun = 7,
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
dev, 1, 0);
+ while (pm8001_dev->running_req)
+ msleep(20);
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("Waiting for Port reset\n"));
wait_for_completion(&completion_reset);
- if (phy->port_reset_status)
+ if (phy->port_reset_status) {
+ pm8001_dev_gone_notify(dev);
goto out;
+ }
/*
* 4. SATA Abort ALL
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &=
0x0000ffff;
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
- 0x140000;
+ CHIP_8006_PORT_RECOVERY_TIMEOUT;
}
pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
#define SAS_MAX_AIP 0x200000
#define IT_NEXUS_TIMEOUT 0x7D0
#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30)
+/* Port recovery timeout, 10000 ms for PM8006 controller */
+#define CHIP_8006_PORT_RECOVERY_TIMEOUT 0x640000
#ifdef __LITTLE_ENDIAN_BITFIELD
struct sas_identify_frame_local {
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
+ BLIST_INQUIRY_36},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
struct kmem_cache *cache;
int ret = 0;
+ mutex_lock(&scsi_sense_cache_mutex);
cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
- return 0;
+ goto exit;
- mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
if (!scsi_sense_cache)
ret = -ENOMEM;
}
-
+ exit:
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
disposition = scsi_decide_disposition(cmd);
if (disposition != SUCCESS &&
time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- sdev_printk(KERN_ERR, cmd->device,
+ scmd_printk(KERN_ERR, cmd,
"timing out command, waited %lus\n",
wait_for/HZ);
disposition = SUCCESS;
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dev) << SECTOR_SHIFT);
blk_queue_max_hw_sectors(q, shost->max_sectors);
if (shost->unchecked_isa_dma)
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
dma_set_seg_boundary(dev, shost->dma_boundary);
blk_queue_max_segment_size(q, shost->max_segment_size);
- dma_set_max_seg_size(dev, shost->max_segment_size);
+ blk_queue_virt_boundary(q, shost->virt_boundary_mask);
+ dma_set_max_seg_size(dev, queue_max_segment_size(q));
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
{
struct gendisk *disk = sdkp->disk;
unsigned int nr_zones;
- u32 zone_blocks;
+ u32 zone_blocks = 0;
int ret;
if (!sd_is_zoned(sdkp))
{
blk_queue_rq_timeout(sdevice->request_queue, (storvsc_timeout * HZ));
- /* Ensure there are no gaps in presented sgls */
- blk_queue_virt_boundary(sdevice->request_queue, PAGE_SIZE - 1);
-
sdevice->no_write_same = 1;
/*
.this_id = -1,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
+ /* Ensure there are no gaps in presented sgls */
+ .virt_boundary_mask = PAGE_SIZE-1,
.no_write_same = 1,
.track_queue_depth = 1,
.change_queue_depth = storvsc_change_queue_depth,
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
-
return 0;
}
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
+ .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
*/
unsigned long dma_boundary;
+ unsigned long virt_boundary_mask;
+
/*
* This specifies "machine infinity" for host templates which don't
* limit the transfer size. Note this limit represents an absolute
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned long dma_boundary;
+ unsigned long virt_boundary_mask;
/*
* In scsi-mq mode, the number of hardware queues supported by the LLD.
*