+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for linux/drivers/scsi
#
$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
+ $(obj)/scsi_sysfs.o: $(obj)/scsi_devinfo_tbl.c
+
+ quiet_cmd_bflags = GEN $@
+ cmd_bflags = sed -n 's/.*BLIST_\([A-Z0-9_]*\) *.*/BLIST_FLAG_NAME(\1),/p' $< > $@
+
+ $(obj)/scsi_devinfo_tbl.c: include/scsi/scsi_devinfo.h
+ $(call if_changed,bflags)
+
# If you want to play with the firmware, uncomment
# GENERATE_FIRMWARE := 1
+// SPDX-License-Identifier: GPL-2.0
/*
* NCR 5380 generic driver routines. These should make it *trivial*
* to implement 5380 SCSI drivers under Linux with a non-trantor
switch (extended_msg[2]) {
case EXTENDED_SDTR:
case EXTENDED_WDTR:
- case EXTENDED_MODIFY_DATA_POINTER:
- case EXTENDED_EXTENDED_IDENTIFY:
tmp = 0;
}
} else if (len) {
* reject it.
*/
default:
- if (!tmp) {
- shost_printk(KERN_ERR, instance, "rejecting message ");
- spi_print_msg(extended_msg);
- printk("\n");
- } else if (tmp != EXTENDED_MESSAGE)
- scmd_printk(KERN_INFO, cmd,
- "rejecting unknown message %02x\n",
- tmp);
- else
+ if (tmp == EXTENDED_MESSAGE)
scmd_printk(KERN_INFO, cmd,
"rejecting unknown extended message code %02x, length %d\n",
- extended_msg[1], extended_msg[0]);
+ extended_msg[2], extended_msg[1]);
+ else if (tmp)
+ scmd_printk(KERN_INFO, cmd,
+ "rejecting unknown message code %02x\n",
+ tmp);
msgout = MESSAGE_REJECT;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
*/
static void asd_chip_reset(struct asd_ha_struct *asd_ha)
{
- struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
-
ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev));
asd_chip_hardrst(asd_ha);
- sas_ha->notify_ha_event(sas_ha, HAE_RESET);
}
/* ---------- Done List Routines ---------- */
struct asd_ascb *ascb;
list_for_each_entry(ascb, list, list) {
if (!ascb->uldd_timer) {
- ascb->timer.data = (unsigned long) ascb;
- ascb->timer.function = asd_ascb_timedout;
+ ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
}
/*
- * Copyright 2017 Broadcom. All Rights Reserved.
+ * Copyright 2017 Broadcom. All Rights Reserved.
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
*
* This program is free software; you can redistribute it and/or
return -ENOMEM;
phba->ctrl.csr = addr;
phba->csr_va = addr;
- phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
if (addr == NULL)
goto pci_map_err;
phba->ctrl.db = addr;
phba->db_va = addr;
- phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
if (phba->generation == BE_GEN2)
pcicfg_reg = 1;
goto pci_map_err;
phba->ctrl.pcicfg = addr;
phba->pci_va = addr;
- phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
return 0;
pci_map_err:
return IRQ_HANDLED;
}
+ static void beiscsi_free_irqs(struct beiscsi_hba *phba)
+ {
+ struct hwi_context_memory *phwi_context;
+ int i;
+
+ if (!phba->pcidev->msix_enabled) {
+ if (phba->pcidev->irq)
+ free_irq(phba->pcidev->irq, phba);
+ return;
+ }
+
+ phwi_context = phba->phwi_ctrlr->phwi_ctxt;
+ for (i = 0; i <= phba->num_cpus; i++) {
+ free_irq(pci_irq_vector(phba->pcidev, i),
+ &phwi_context->be_eq[i]);
+ kfree(phba->msi_name[i]);
+ }
+ }
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
{
if (pcidev->msix_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
- phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
- GFP_KERNEL);
+ phba->msi_name[i] = kasprintf(GFP_KERNEL,
+ "beiscsi_%02x_%02x",
+ phba->shost->host_no, i);
if (!phba->msi_name[i]) {
ret = -ENOMEM;
goto free_msix_irqs;
}
- sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
- phba->shost->host_no, i);
ret = request_irq(pci_irq_vector(pcidev, i),
be_isr_msix, 0, phba->msi_name[i],
&phwi_context->be_eq[i]);
goto free_msix_irqs;
}
}
- phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
+ phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x",
+ phba->shost->host_no);
if (!phba->msi_name[i]) {
ret = -ENOMEM;
goto free_msix_irqs;
}
- sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
- phba->shost->host_no);
ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0,
phba->msi_name[i], &phwi_context->be_eq[i]);
if (ret) {
* this can happen if clean_task is called on a task that
* failed in xmit_task or alloc_pdu.
*/
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
- "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
- "value there=%p\n", phba->io_sgl_free_index,
- phba->io_sgl_hndl_base
- [phba->io_sgl_free_index]);
- spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
+ "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n",
+ phba->io_sgl_free_index,
+ phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
- 32] & CQE_CODE_MASK);
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] &
+ CQE_CODE_MASK);
/* Get the CID */
if (is_chip_be2_be3r(phba)) {
mem->dma = paddr;
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
- phwi_context->cur_eqd);
+ BEISCSI_EQ_DELAY_DEF);
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_cmd_eq_create"
goto err;
/* Ask BE to create MCC compl queue; */
if (phba->pcidev->msix_enabled) {
- if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
- [phba->num_cpus].q, false, true, 0))
- goto mcc_cq_free;
+ if (beiscsi_cmd_cq_create(ctrl, cq,
+ &phwi_context->be_eq[phba->num_cpus].q,
+ false, true, 0))
+ goto mcc_cq_free;
} else {
if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
false, true, 0))
- goto mcc_cq_free;
+ goto mcc_cq_free;
}
/* Alloc MCC queue */
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- phwi_context->max_eqd = 128;
- phwi_context->min_eqd = 0;
- phwi_context->cur_eqd = 32;
/* set port optic state to unknown */
phba->optic_state = 0xff;
sg = scsi_sglist(sc);
if (sc->sc_data_direction == DMA_TO_DEVICE)
writedir = 1;
- else
+ else
writedir = 0;
- return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
}
/**
schedule_work(&phba->boot_work);
}
+ /**
+ * Boot flag info for iscsi-utilities
+ * Bit 0 Block valid flag
+ * Bit 1 Firmware booting selected
+ */
+ #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3
+
static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
{
struct beiscsi_hba *phba = data;
auth_data.chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "2\n");
+ rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
if (eqd < 8)
eqd = 0;
- eqd = min_t(u32, eqd, phwi_context->max_eqd);
- eqd = max_t(u32, eqd, phwi_context->min_eqd);
+ eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX);
+ eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN);
aic->jiffies = now;
aic->eq_prev = pbe_eq->cq_count;
msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
}
-static void beiscsi_hw_tpe_check(unsigned long ptr)
+static void beiscsi_hw_tpe_check(struct timer_list *t)
{
- struct beiscsi_hba *phba;
+ struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
u32 wait;
- phba = (struct beiscsi_hba *)ptr;
/* if not TPE, do nothing */
if (!beiscsi_detect_tpe(phba))
return;
msecs_to_jiffies(wait));
}
-static void beiscsi_hw_health_check(unsigned long ptr)
+static void beiscsi_hw_health_check(struct timer_list *t)
{
- struct beiscsi_hba *phba;
+ struct beiscsi_hba *phba = from_timer(phba, t, hw_check);
- phba = (struct beiscsi_hba *)ptr;
beiscsi_detect_ue(phba);
if (beiscsi_detect_ue(phba)) {
__beiscsi_log(phba, KERN_ERR,
if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
return;
/* modify this timer to check TPE */
- phba->hw_check.function = beiscsi_hw_tpe_check;
+ phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check;
}
mod_timer(&phba->hw_check,
be2iscsi_enable_msix(phba);
beiscsi_get_params(phba);
+ beiscsi_set_host_data(phba);
/* Re-enable UER. If different TPE occurs then it is recoverable. */
beiscsi_set_uer_feature(phba);
* Timer function gets modified for TPE detection.
* Always reinit to do health check first.
*/
- phba->hw_check.function = beiscsi_hw_health_check;
+ phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check;
mod_timer(&phba->hw_check,
jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
return 0;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
hwi_disable_intr(phba);
- if (phba->pcidev->msix_enabled) {
- for (i = 0; i <= phba->num_cpus; i++) {
- free_irq(pci_irq_vector(phba->pcidev, i),
- &phwi_context->be_eq[i]);
- kfree(phba->msi_name[i]);
- }
- } else
- if (phba->pcidev->irq)
- free_irq(phba->pcidev->irq, phba);
+ beiscsi_free_irqs(phba);
pci_free_irq_vectors(phba->pcidev);
for (i = 0; i < phba->num_cpus; i++) {
if (ret) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : be_ctrl_init failed\n");
- goto hba_free;
+ goto free_hba;
}
ret = beiscsi_init_sliport(phba);
if (ret)
- goto hba_free;
+ goto free_hba;
spin_lock_init(&phba->io_sgl_lock);
spin_lock_init(&phba->mgmt_sgl_lock);
}
beiscsi_get_port_name(&phba->ctrl, phba);
beiscsi_get_params(phba);
+ beiscsi_set_host_data(phba);
beiscsi_set_uer_feature(phba);
be2iscsi_enable_msix(phba);
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
"BM_%d : beiscsi_dev_probe-"
"Failed to beiscsi_init_irqs\n");
- goto free_blkenbld;
+ goto disable_iopoll;
}
hwi_enable_intr(phba);
ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
if (ret)
- goto free_blkenbld;
+ goto free_irqs;
/* set online bit after port is operational */
set_bit(BEISCSI_HBA_ONLINE, &phba->state);
* Start UE detection here. UE before this will cause stall in probe
* and eventually fail the probe.
*/
- init_timer(&phba->hw_check);
- phba->hw_check.function = beiscsi_hw_health_check;
- phba->hw_check.data = (unsigned long)phba;
+ timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0);
mod_timer(&phba->hw_check,
jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
- free_blkenbld:
- destroy_workqueue(phba->wq);
+ free_irqs:
+ hwi_disable_intr(phba);
+ beiscsi_free_irqs(phba);
+ disable_iopoll:
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
irq_poll_disable(&pbe_eq->iopoll);
}
+ destroy_workqueue(phba->wq);
free_twq:
hwi_cleanup_port(phba);
beiscsi_cleanup_port(phba);
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
- phba->ctrl.mbox_mem_alloced.dma);
+ phba->ctrl.mbox_mem_alloced.dma);
beiscsi_unmap_pci_function(phba);
- hba_free:
+ free_hba:
pci_disable_msix(phba->pcidev);
pci_dev_put(phba->pcidev);
iscsi_host_free(phba->shost);
int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_login_request *login_wqe;
struct iscsi_login_req *login_hdr;
u32 dword;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
login_hdr = (struct iscsi_login_req *)task->hdr;
login_wqe = (struct bnx2i_login_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
struct iscsi_tm *tmfabort_hdr;
struct scsi_cmnd *ref_sc;
struct iscsi_task *ctask;
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_tmf_request *tmfabort_wqe;
u32 dword;
u32 scsi_lun[2];
- bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
tmfabort_wqe = (struct bnx2i_tmf_request *)
bnx2i_conn->ep->qp.sq_prod_qe;
int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *mtask)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_text_request *text_wqe;
struct iscsi_text *text_hdr;
u32 dword;
- bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
text_hdr = (struct iscsi_text *)mtask->hdr;
text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
char *datap, int data_len, int unsol)
{
struct bnx2i_endpoint *ep = bnx2i_conn->ep;
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_nop_out_request *nopout_wqe;
struct iscsi_nopout *nopout_hdr;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
struct iscsi_task *task)
{
- struct bnx2i_cmd *bnx2i_cmd;
struct bnx2i_logout_request *logout_wqe;
struct iscsi_logout *logout_hdr;
- bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
logout_hdr = (struct iscsi_logout *)task->hdr;
logout_wqe = (struct bnx2i_logout_request *)
*
* routine to handle connection offload/destroy request timeout
*/
-void bnx2i_ep_ofld_timer(unsigned long data)
+void bnx2i_ep_ofld_timer(struct timer_list *t)
{
- struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+ struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer);
if (ep->state == EP_STATE_OFLD_START) {
printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
uint32_t iq_start_stop = (iq_params->iq_start) ?
FW_IQ_CMD_IQSTART_F :
FW_IQ_CMD_IQSTOP_F;
+ int relaxed = !(hw->flags & CSIO_HWF_ROOT_NO_RELAXED_ORDERING);
/*
* If this IQ write is cascaded with IQ alloc request, do not
cmdp->iqns_to_fl0congen |= htonl(
FW_IQ_CMD_FL0HOSTFCMODE_V(iq_params->fl0hostfcmode)|
FW_IQ_CMD_FL0CPRIO_V(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
+ FW_IQ_CMD_FL0DATARO_V(relaxed) |
FW_IQ_CMD_FL0PADEN_V(iq_params->fl0paden) |
FW_IQ_CMD_FL0PACKEN_V(iq_params->fl0packen));
cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
*/
int
csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
- void (*timer_fn)(uintptr_t))
+ void (*timer_fn)(struct timer_list *))
{
- struct timer_list *timer = &mbm->timer;
-
- init_timer(timer);
- timer->function = timer_fn;
- timer->data = (unsigned long)hw;
+ mbm->hw = hw;
+ timer_setup(&mbm->timer, timer_fn, 0);
INIT_LIST_HEAD(&mbm->req_q);
INIT_LIST_HEAD(&mbm->cbfn_q);
}
}
-static void csk_act_open_retry_timer(unsigned long data)
+static void csk_act_open_retry_timer(struct timer_list *t)
{
struct sk_buff *skb = NULL;
- struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
+ struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
struct l2t_entry *);
spin_lock_bh(&csk->lock);
if (status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != csk_act_open_retry_timer) {
- csk->retry_timer.function = csk_act_open_retry_timer;
+ csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) {
+ csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
csk, csk->state, csk->flags, csk->tid);
cxgbi_sock_free_cpl_skbs(csk);
+ cxgbi_sock_purge_write_queue(csk);
if (csk->wr_cred != csk->wr_max_cred) {
cxgbi_sock_purge_wr_queue(csk);
cxgbi_sock_reset_wr_list(csk);
kref_init(&csk->refcnt);
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
- setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
+ timer_setup(&csk->retry_timer, NULL, 0);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
rel_rt:
ip_rt_put(rt);
- if (csk)
- cxgbi_sock_closed(csk);
err_out:
return ERR_PTR(err);
}
struct iscsi_tcp_task *tcp_task = task->dd_data;
struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
struct scsi_cmnd *sc = task->sc;
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
tcp_task->dd_data = tdata;
task->hdr = NULL;
- if (tdata->skb) {
- kfree_skb(tdata->skb);
- tdata->skb = NULL;
- }
-
if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
(opcode == ISCSI_OP_SCSI_DATA_OUT ||
(opcode == ISCSI_OP_SCSI_CMD &&
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- struct cxgbi_sock *csk = cconn->cep->csk;
- struct net_device *ndev = cdev->ports[csk->port_id];
ndev->stats.tx_dropped++;
return -ENOMEM;
}
- skb_get(tdata->skb);
skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
- task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+
+ if (task->sc) {
+ task->hdr = (struct iscsi_hdr *)tdata->skb->data;
+ } else {
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ if (!task->hdr) {
+ __kfree_skb(tdata->skb);
+ tdata->skb = NULL;
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ }
task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
/* data_out uses scsi_cmd's itt */
unsigned int datalen;
int err;
- if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) {
+ if (!skb) {
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
- "task 0x%p, skb 0x%p\n", task, skb);
+ "task 0x%p\n", task);
return 0;
}
return -EPIPE;
}
+ tdata->skb = NULL;
datalen = skb->data_len;
/* write ppod first if using ofldq to write ppod */
/* continue. Let fl get the data */
}
+ if (!task->sc)
+ memcpy(skb->data, task->hdr, SKB_TX_ISCSI_PDU_HEADER_MAX);
+
err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
if (err > 0) {
int pdulen = err;
pdulen += ISCSI_DIGEST_SIZE;
task->conn->txdata_octets += pdulen;
- cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE);
return 0;
}
"task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
task, skb, skb->len, skb->data_len, err);
/* reset skb to send when we are called again */
+ tdata->skb = skb;
return err;
}
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
- __kfree_skb(tdata->skb);
- tdata->skb = NULL;
+ __kfree_skb(skb);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
task, tdata->skb, task->hdr_itt);
tcp_task->dd_data = NULL;
+
+ if (!task->sc)
+ kfree(task->hdr);
+ task->hdr = NULL;
+
/* never reached the xmit task callout */
if (tdata->skb) {
- kfree_skb(tdata->skb);
+ __kfree_skb(tdata->skb);
tdata->skb = NULL;
}
goto err_out;
}
- ifindex = hba->ndev->ifindex;
+ rtnl_lock();
+ if (!vlan_uses_dev(hba->ndev))
+ ifindex = hba->ndev->ifindex;
+ rtnl_unlock();
}
if (dst_addr->sa_family == AF_INET) {
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
-
+ static void fcoe_vport_remove(struct fc_lport *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
.set_fcoe_ctlr_mode = fcoe_ctlr_mode,
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
- rtnl_lock();
- if (!fcoe->removed)
- fcoe_interface_remove(fcoe);
- rtnl_unlock();
-
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
skb_queue_head_init(&port->fcoe_pending_queue);
port->fcoe_pending_queue_active = 0;
- setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport);
+ timer_setup(&port->timer, fcoe_queue_timer, 0);
fcoe_link_speed_update(lport);
* fcoe_if_destroy() - Tear down a SW FCoE instance
* @lport: The local port to be destroyed
*
+ * Locking: Must be called with the RTNL mutex held.
+ *
*/
static void fcoe_if_destroy(struct fc_lport *lport)
{
/* Free existing transmit skbs */
fcoe_clean_pending_queue(lport);
- rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
dev_uc_del(netdev, port->data_src_addr);
if (lport->vport)
synchronize_net();
else
fcoe_interface_remove(fcoe);
- rtnl_unlock();
/* Free queued packets for the per-CPU receive threads */
fcoe_percpu_clean(lport);
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
port = lport_priv(ctlr->lp);
- queue_work(fcoe_wq, &port->destroy_work);
+ fcoe_vport_remove(lport);
+ mutex_lock(&fcoe_config_mutex);
+ fcoe_if_destroy(lport);
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ fcoe_interface_cleanup(fcoe);
+ mutex_unlock(&fcoe_config_mutex);
+ fcoe_ctlr_device_delete(fcoe_ctlr_to_ctlr_dev(ctlr));
goto out;
break;
case NETDEV_FEAT_CHANGE:
struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
struct fcoe_interface *fcoe;
- struct Scsi_Host *shost;
- struct fc_host_attrs *fc_host;
- unsigned long flags;
- struct fc_vport *vport;
- struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
- shost = port->lport->host;
- fc_host = shost_to_fc_host(shost);
-
- /* Loop through all the vports and mark them for deletion */
- spin_lock_irqsave(shost->host_lock, flags);
- list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
- if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
- continue;
- } else {
- vport->flags |= FC_VPORT_DELETING;
- queue_work(fc_host_work_q(shost),
- &vport->vport_delete_work);
- }
- }
- spin_unlock_irqrestore(shost->host_lock, flags);
- flush_workqueue(fc_host_work_q(shost));
+ fcoe_vport_remove(port->lport);
mutex_lock(&fcoe_config_mutex);
ctlr = fcoe_to_ctlr(fcoe);
cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ rtnl_lock();
fcoe_if_destroy(port->lport);
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
+ rtnl_unlock();
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
rc = -EIO;
+ if (!fcoe->removed)
+ fcoe_interface_remove(fcoe);
rtnl_unlock();
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
mutex_unlock(&n_port->lp_mutex);
mutex_lock(&fcoe_config_mutex);
+ rtnl_lock();
fcoe_if_destroy(vn_port);
+ rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
}
+ /**
+ * fcoe_vport_remove() - remove attached vports
+ * @lport: lport for which the vports should be removed
+ */
+ static void fcoe_vport_remove(struct fc_lport *lport)
+ {
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
+
+ shost = lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+ }
+
/**
* fcoe_vport_disable() - change vport state
* @vport: vport to bring online/offline
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
#define HISI_SAS_QUEUE_SLOTS 512
- #define HISI_SAS_MAX_ITCT_ENTRIES 2048
+ #define HISI_SAS_MAX_ITCT_ENTRIES 1024
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
int shift;
const char *msg;
int reg;
+ const struct hisi_sas_hw_error *sub;
};
struct hisi_sas_phy {
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct timer_list timer;
struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
int (*slot_complete)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
void (*phys_init)(struct hisi_hba *hisi_hba);
- void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
+ void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no);
void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no);
void (*get_events)(struct hisi_hba *hisi_hba, int phy_no);
};
struct hisi_sas_breakpoint {
- u8 data[128]; /*io128 byte*/
+ u8 data[128];
+ };
+
+ struct hisi_sas_sata_breakpoint {
+ struct hisi_sas_breakpoint tag[32];
};
struct hisi_sas_sge {
struct sas_task *task,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
+ extern void hisi_sas_rst_work_handler(struct work_struct *work);
+ extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
#endif
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ if (!task->lldd_task)
+ return;
+
+ task->lldd_task = NULL;
+
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
dma_unmap_sg(dev, task->scatter, slot->n_elem,
task->data_dir);
- task->lldd_task = NULL;
-
if (sas_dev)
atomic64_dec(&sas_dev->running_req);
}
if (slot->buf)
dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
-
list_del_init(&slot->entry);
+ slot->buf = NULL;
slot->task = NULL;
slot->port = NULL;
hisi_sas_slot_index_free(hisi_hba, slot->idx);
goto err_out_buf;
}
+ spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
{
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct hisi_sas_device *sas_dev = NULL;
+ unsigned long flags;
int i;
- spin_lock(&hisi_hba->lock);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
int queue = i % hisi_hba->queue_count;
break;
}
}
- spin_unlock(&hisi_hba->lock);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
phy->hisi_hba = hisi_hba;
phy->port = NULL;
- init_timer(&phy->timer);
sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
case PHY_FUNC_LINK_RESET:
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100);
- hisi_hba->hw->phy_enable(hisi_hba, phy_no);
+ hisi_hba->hw->phy_start(hisi_hba, phy_no);
break;
case PHY_FUNC_DISABLE:
complete(&task->slow_task->completion);
}
-static void hisi_sas_tmf_timedout(unsigned long data)
+static void hisi_sas_tmf_timedout(struct timer_list *t)
{
- struct sas_task *task = (struct sas_task *)data;
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
}
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.data = (unsigned long) task;
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
{
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
struct Scsi_Host *shost = hisi_hba->shost;
u32 old_state, state;
hisi_sas_release_tasks(hisi_hba);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
- sas_ha->notify_ha_event(sas_ha, HAE_RESET);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
/* Init and wait for PHYs to come up and all libsas event finished. */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED) {
+ if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
if (rc)
goto err_out_buf;
-
+ spin_lock_irqsave(&hisi_hba->lock, flags);
list_add_tail(&slot->entry, &sas_dev->list);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
task->dev = device;
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.data = (unsigned long)task;
- task->slow_task->timer.function = hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
if (slot)
slot->task = NULL;
dev_err(dev, "internal task abort: timeout.\n");
+ goto exit;
}
}
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
+ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba)
+ {
+ int i;
+
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ tasklet_kill(&cq->tasklet);
+ }
+ }
+ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
memset(hisi_hba->breakpoint, 0, s);
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
memset(hisi_hba->sata_breakpoint, 0, s);
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
if (!hisi_hba->initial_fis)
goto err_out;
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
if (!hisi_hba->sata_breakpoint)
hisi_hba->initial_fis,
hisi_hba->initial_fis_dma);
- s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2;
+ s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
if (hisi_hba->sata_breakpoint)
dma_free_coherent(dev, s,
hisi_hba->sata_breakpoint,
}
EXPORT_SYMBOL_GPL(hisi_sas_free);
- static void hisi_sas_rst_work_handler(struct work_struct *work)
+ void hisi_sas_rst_work_handler(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
container_of(work, struct hisi_hba, rst_work);
hisi_sas_controller_reset(hisi_hba);
}
+ EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
hisi_hba->shost = shost;
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
- init_timer(&hisi_hba->timer);
+ timer_setup(&hisi_hba->timer, NULL, 0);
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
start_phy_v1_hw(hisi_hba, phy_no);
}
-static void start_phys_v1_hw(unsigned long data)
+static void start_phys_v1_hw(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
int i;
for (i = 0; i < hisi_hba->n_phy; i++) {
hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
}
- setup_timer(timer, start_phys_v1_hw, (unsigned long)hisi_hba);
+ timer_setup(timer, start_phys_v1_hw, 0);
mod_timer(timer, jiffies + HZ);
}
.start_delivery = start_delivery_v1_hw,
.slot_complete = slot_complete_v1_hw,
.phys_init = phys_init_v1_hw,
- .phy_enable = enable_phy_v1_hw,
+ .phy_start = start_phy_v1_hw,
.phy_disable = disable_phy_v1_hw,
.phy_hard_reset = phy_hard_reset_v1_hw,
.phy_set_linkrate = phy_set_linkrate_v1_hw,
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF),
.msk = HGC_DQE_ECC_1B_ADDR_MSK,
.shift = HGC_DQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_dqe_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_1B_OFF),
.msk = HGC_IOST_ECC_1B_ADDR_MSK,
.shift = HGC_IOST_ECC_1B_ADDR_OFF,
- .msg = "hgc_iost_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_iost_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_1B_OFF),
.msk = HGC_ITCT_ECC_1B_ADDR_MSK,
.shift = HGC_ITCT_ECC_1B_ADDR_OFF,
- .msg = "hgc_itct_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_itct_acc1b_intr found: am address is 0x%08X\n",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_iostl_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_itctl_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_1B_OFF),
.msk = HGC_CQE_ECC_1B_ADDR_MSK,
.shift = HGC_CQE_ECC_1B_ADDR_OFF,
- .msg = "hgc_cqe_acc1b_intr found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_acc1b_intr found: Ram address is 0x%08X\n",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem0_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem1_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem2_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_acc1b_intr found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem3_acc1b_intr found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS15,
},
};
.irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
.msk = HGC_DQE_ECC_MB_ADDR_MSK,
.shift = HGC_DQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_dqe_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_dqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_DQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
.msk = HGC_IOST_ECC_MB_ADDR_MSK,
.shift = HGC_IOST_ECC_MB_ADDR_OFF,
- .msg = "hgc_iost_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_iost_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_IOST_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
.msk = HGC_ITCT_ECC_MB_ADDR_MSK,
.shift = HGC_ITCT_ECC_MB_ADDR_OFF,
- .msg = "hgc_itct_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_itct_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_ITCT_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
- .msg = "hgc_iostl_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_iostl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
.msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
.shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
- .msg = "hgc_itctl_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "hgc_itctl_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_LM_DFX_STATUS2,
},
{
.irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
.msk = HGC_CQE_ECC_MB_ADDR_MSK,
.shift = HGC_CQE_ECC_MB_ADDR_OFF,
- .msg = "hgc_cqe_accbad_intr (0x%x) found: \
- Ram address is 0x%08X\n",
+ .msg = "hgc_cqe_accbad_intr (0x%x) found: Ram address is 0x%08X\n",
.reg = HGC_CQE_ECC_ADDR,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
- .msg = "rxm_mem0_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem0_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
- .msg = "rxm_mem1_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem1_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
.shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
- .msg = "rxm_mem2_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem2_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS14,
},
{
.irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
.msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
.shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
- .msg = "rxm_mem3_accbad_intr (0x%x) found: \
- memory address is 0x%08X\n",
+ .msg = "rxm_mem3_accbad_intr (0x%x) found: memory address is 0x%08X\n",
.reg = HGC_RXM_DFX_STATUS15,
},
};
#define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \
err_phase == 0x20 || err_phase == 0x40)
-static void link_timeout_disable_link(unsigned long data);
+static void link_timeout_disable_link(struct timer_list *t);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
{
struct hisi_sas_device *sas_dev = NULL;
int i, sata_dev = dev_is_sata(device);
int sata_idx = -1;
+ unsigned long flags;
- spin_lock(&hisi_hba->lock);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
if (sata_dev)
if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx))
}
out:
- spin_unlock(&hisi_hba->lock);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return sas_dev;
}
upper_32_bits(hisi_hba->initial_fis_dma));
}
-static void link_timeout_enable_link(unsigned long data)
+static void link_timeout_enable_link(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
int i, reg_val;
for (i = 0; i < hisi_hba->n_phy; i++) {
}
}
- hisi_hba->timer.function = link_timeout_disable_link;
+ hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
}
-static void link_timeout_disable_link(unsigned long data)
+static void link_timeout_disable_link(struct timer_list *t)
{
- struct hisi_hba *hisi_hba = (struct hisi_hba *)data;
+ struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer);
int i, reg_val;
reg_val = hisi_sas_read32(hisi_hba, PHY_STATE);
}
}
- hisi_hba->timer.function = link_timeout_enable_link;
+ hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
}
static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
{
- hisi_hba->timer.data = (unsigned long)hisi_hba;
- hisi_hba->timer.function = link_timeout_disable_link;
+ hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&hisi_hba->timer);
}
if (unlikely(aborted)) {
ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -1;
}
return 0;
}
-static void hisi_sas_internal_abort_quirk_timeout(unsigned long data)
+static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t)
{
- struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data;
+ struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer);
struct hisi_sas_port *port = slot->port;
struct asd_sas_port *asd_sas_port;
struct asd_sas_phy *sas_phy;
struct timer_list *timer = &slot->internal_abort_timer;
/* setup the quirk timer */
- setup_timer(timer, hisi_sas_internal_abort_quirk_timeout,
- (unsigned long)slot);
+ timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0);
/* Set the timeout to 10ms less than internal abort timeout */
mod_timer(timer, jiffies + msecs_to_jiffies(100));
return IRQ_HANDLED;
}
- #define AXI_ERR_NR 8
- static const char axi_err_info[AXI_ERR_NR][32] = {
- "IOST_AXI_W_ERR",
- "IOST_AXI_R_ERR",
- "ITCT_AXI_W_ERR",
- "ITCT_AXI_R_ERR",
- "SATA_AXI_W_ERR",
- "SATA_AXI_R_ERR",
- "DQE_AXI_R_ERR",
- "CQE_AXI_W_ERR"
+ static const struct hisi_sas_hw_error axi_error[] = {
+ { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
+ { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
+ { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
+ { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
+ { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
+ { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
+ { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
+ { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
+ {},
+ };
+
+ static const struct hisi_sas_hw_error fifo_error[] = {
+ { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
+ { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
+ { .msk = BIT(10), .msg = "GETDQE_FIFO" },
+ { .msk = BIT(11), .msg = "CMDP_FIFO" },
+ { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
+ {},
};
- #define FIFO_ERR_NR 5
- static const char fifo_err_info[FIFO_ERR_NR][32] = {
- "CQE_WINFO_FIFO",
- "CQE_MSG_FIFIO",
- "GETDQE_FIFO",
- "CMDP_FIFO",
- "AWTCTRL_FIFO"
+ static const struct hisi_sas_hw_error fatal_axi_errors[] = {
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
+ .msg = "write pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
+ .msg = "iptt no match slot",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
+ .msg = "read pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = axi_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = fifo_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
+ .msg = "LM add/fetch list",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
+ .msg = "SAS_HGC_ABT fetch LM list",
+ },
};
static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
struct hisi_hba *hisi_hba = p;
u32 irq_value, irq_msk, err_value;
struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *axi_error;
+ int i;
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
- if (irq_value) {
- if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_WP_DEPTH_OFF);
- dev_warn(dev, "write pointer and depth error (0x%x) \
- found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 <<
- ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF);
- dev_warn(dev, "iptt no match slot error (0x%x) found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
- if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) {
- dev_warn(dev, "read pointer and depth error (0x%x) \
- found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) {
- int i;
-
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_AXI_OFF);
- err_value = hisi_sas_read32(hisi_hba,
- HGC_AXI_FIFO_ERR_INFO);
-
- for (i = 0; i < AXI_ERR_NR; i++) {
- if (err_value & BIT(i)) {
- dev_warn(dev, "%s (0x%x) found!\n",
- axi_err_info[i], irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
- }
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_FIFO_OFF)) {
- int i;
-
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_FIFO_OFF);
- err_value = hisi_sas_read32(hisi_hba,
- HGC_AXI_FIFO_ERR_INFO);
+ for (i = 0; i < ARRAY_SIZE(fatal_axi_errors); i++) {
+ axi_error = &fatal_axi_errors[i];
+ if (!(irq_value & axi_error->irq_msk))
+ continue;
- for (i = 0; i < FIFO_ERR_NR; i++) {
- if (err_value & BIT(AXI_ERR_NR + i)) {
- dev_warn(dev, "%s (0x%x) found!\n",
- fifo_err_info[i], irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ 1 << axi_error->shift);
+ if (axi_error->sub) {
+ const struct hisi_sas_hw_error *sub = axi_error->sub;
+
+ err_value = hisi_sas_read32(hisi_hba, axi_error->reg);
+ for (; sub->msk || sub->msg; sub++) {
+ if (!(err_value & sub->msk))
+ continue;
+ dev_warn(dev, "%s (0x%x) found!\n",
+ sub->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
-
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_LM_OFF);
- dev_warn(dev, "LM add/fetch list error (0x%x) found!\n",
- irq_value);
- queue_work(hisi_hba->wq, &hisi_hba->rst_work);
- }
-
- if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) {
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- 1 << ENT_INT_SRC3_ABT_OFF);
- dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n",
- irq_value);
+ } else {
+ dev_warn(dev, "%s (0x%x) found!\n",
+ axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
+ }
- if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
- u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
- u32 dev_id = reg_val & ITCT_DEV_MSK;
- struct hisi_sas_device *sas_dev =
- &hisi_hba->devices[dev_id];
+ if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+ u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ u32 dev_id = reg_val & ITCT_DEV_MSK;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[dev_id];
- hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
- dev_dbg(dev, "clear ITCT ok\n");
- complete(sas_dev->completion);
- }
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ complete(sas_dev->completion);
}
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value);
interrupt_disable_v2_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
.start_delivery = start_delivery_v2_hw,
.slot_complete = slot_complete_v2_hw,
.phys_init = phys_init_v2_hw,
- .phy_enable = enable_phy_v2_hw,
+ .phy_start = start_phy_v2_hw,
.phy_disable = disable_phy_v2_hw,
.phy_hard_reset = phy_hard_reset_v2_hw,
.get_events = phy_get_events_v2_hw,
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- int i;
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
- for (i = 0; i < hisi_hba->queue_count; i++) {
- struct hisi_sas_cq *cq = &hisi_hba->cq[i];
-
- tasklet_kill(&cq->tasklet);
- }
+ hisi_sas_kill_tasklets(hisi_hba);
return hisi_sas_remove(pdev);
}
#define HGC_IOMB_PROC1_STATUS 0x104
#define CFG_1US_TIMER_TRSH 0xcc
#define CHNL_INT_STATUS 0x148
+ #define HGC_AXI_FIFO_ERR_INFO 0x154
+ #define AXI_ERR_INFO_OFF 0
+ #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF)
+ #define FIFO_ERR_INFO_OFF 8
+ #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF)
#define INT_COAL_EN 0x19c
#define OQ_INT_COAL_TIME 0x1a0
#define OQ_INT_COAL_CNT 0x1a4
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+ #define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+ #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+ #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+ #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+ #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define DMA_RX_STATUS (PORT_BASE + 0x2e8)
#define DMA_RX_STATUS_BUSY_OFF 0
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
+ #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
+ #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
+ #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
+ #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
- #define MAX_ITCT_HW 4096 /* max the hw can support */
#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
#error Max ITCT exceeded
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
0xa03e8);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER,
0x7f7a120);
+ hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER,
+ 0x2a0a80);
}
for (i = 0; i < hisi_hba->queue_count; i++) {
/* Delivery queue */
static void free_device_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
+ DECLARE_COMPLETION_ONSTACK(completion);
u64 dev_id = sas_dev->device_id;
- struct device *dev = hisi_hba->dev;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ sas_dev->completion = &completion;
+
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
ENT_INT_SRC3_ITC_INT_MSK);
/* clear the itct table*/
- reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
- reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
- udelay(10);
- reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
- if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
- dev_dbg(dev, "got clear ITCT done interrupt\n");
-
- /* invalid the itct state*/
- memset(itct, 0, sizeof(struct hisi_sas_itct));
- hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
- ENT_INT_SRC3_ITC_INT_MSK);
-
- /* clear the itct */
- hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
- dev_dbg(dev, "clear ITCT ok\n");
- }
+ wait_for_completion(sas_dev->completion);
+ memset(itct, 0, sizeof(struct hisi_sas_itct));
}
static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
{
int i, bitmap = 0;
u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
for (i = 0; i < hisi_hba->n_phy; i++)
- if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
- bitmap |= 1 << i;
+ if (phy_state & BIT(i))
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= BIT(i);
return bitmap;
}
return rc;
}
- static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
- {
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
- return 0;
- }
-
static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
return res;
}
+ static const struct hisi_sas_hw_error port_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+ };
+
static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
if ((irq_msk & (4 << (phy_no * 4))) &&
irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
- dev_name(dev), irq_value1);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT1, irq_value1);
return IRQ_HANDLED;
}
+ static const struct hisi_sas_hw_error axi_error[] = {
+ { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
+ { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
+ { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
+ { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
+ { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
+ { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
+ { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
+ { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
+ {},
+ };
+
+ static const struct hisi_sas_hw_error fifo_error[] = {
+ { .msk = BIT(8), .msg = "CQE_WINFO_FIFO" },
+ { .msk = BIT(9), .msg = "CQE_MSG_FIFIO" },
+ { .msk = BIT(10), .msg = "GETDQE_FIFO" },
+ { .msk = BIT(11), .msg = "CMDP_FIFO" },
+ { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
+ {},
+ };
+
+ static const struct hisi_sas_hw_error fatal_axi_error[] = {
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
+ .msg = "write pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
+ .msg = "iptt no match slot",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
+ .msg = "read pointer and depth",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = axi_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
+ .reg = HGC_AXI_FIFO_ERR_INFO,
+ .sub = fifo_error,
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
+ .msg = "LM add/fetch list",
+ },
+ {
+ .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
+ .msg = "SAS_HGC_ABT fetch LM list",
+ },
+ };
+
+ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
+ {
+ u32 irq_value, irq_msk;
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
+
+ irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+
+ for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
+ const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
+
+ if (!(irq_value & error->irq_msk))
+ continue;
+
+ if (error->sub) {
+ const struct hisi_sas_hw_error *sub = error->sub;
+ u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
+
+ for (; sub->msk || sub->msg; sub++) {
+ if (!(err_value & sub->msk))
+ continue;
+
+ dev_warn(dev, "%s error (0x%x) found!\n",
+ sub->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ } else {
+ dev_warn(dev, "%s error (0x%x) found!\n",
+ error->msg, irq_value);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ }
+
+ if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
+ u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ u32 dev_id = reg_val & ITCT_DEV_MSK;
+ struct hisi_sas_device *sas_dev =
+ &hisi_hba->devices[dev_id];
+
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ complete(sas_dev->completion);
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
+
+ return IRQ_HANDLED;
+ }
+
static void
slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
ts->stat = SAS_ABORTED_TASK;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -1;
}
goto free_phy_irq;
}
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
+ fatal_axi_int_v3_hw, 0,
+ DRV_NAME " fatal", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_chnl_interrupt;
+ }
+
/* Init tasklets for cq only */
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
free_irq(pci_irq_vector(pdev, k+16), cq);
}
+ free_irq(pci_irq_vector(pdev, 11), hisi_hba);
+ free_chnl_interrupt:
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
free_phy_irq:
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
return hisi_sas_read32(hisi_hba, PHY_STATE);
}
+ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+ {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_phy *sphy = sas_phy->phy;
+ u32 reg_value;
+
+ /* loss dword sync */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
+ sphy->loss_of_dword_sync_count += reg_value;
+
+ /* phy reset problem */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
+ sphy->phy_reset_problem_count += reg_value;
+
+ /* invalid dword */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
+ sphy->invalid_dword_count += reg_value;
+
+ /* disparity err */
+ reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
+ sphy->running_disparity_error_count += reg_value;
+
+ }
+
static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
interrupt_disable_v3_hw(hisi_hba);
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
hisi_sas_stop_phys(hisi_hba);
.start_delivery = start_delivery_v3_hw,
.slot_complete = slot_complete_v3_hw,
.phys_init = phys_init_v3_hw,
- .phy_enable = enable_phy_v3_hw,
+ .phy_start = start_phy_v3_hw,
.phy_disable = disable_phy_v3_hw,
.phy_hard_reset = phy_hard_reset_v3_hw,
.phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
.dereg_device = dereg_device_v3_hw,
.soft_reset = soft_reset_v3_hw,
.get_phys_state = get_phys_state_v3_hw,
+ .get_events = phy_get_events_v3_hw,
};
static struct Scsi_Host *
}
hisi_hba = shost_priv(shost);
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
hisi_hba->hw = &hisi_sas_v3_hw;
hisi_hba->pci_dev = pdev;
hisi_hba->dev = dev;
hisi_hba->shost = shost;
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
- init_timer(&hisi_hba->timer);
+ timer_setup(&hisi_hba->timer, NULL, 0);
if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
free_irq(pci_irq_vector(pdev, 1), hisi_hba);
free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+ free_irq(pci_irq_vector(pdev, 11), hisi_hba);
for (i = 0; i < hisi_hba->queue_count; i++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
free_irq(pci_irq_vector(pdev, i+16), cq);
- tasklet_kill(&cq->tasklet);
}
pci_free_irq_vectors(pdev);
}
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
+ hisi_sas_kill_tasklets(hisi_hba);
pci_release_regions(pdev);
pci_disable_device(pdev);
hisi_sas_free(hisi_hba);
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
- #define HPSA_DRIVER_VERSION "3.4.20-0"
+ #define HPSA_DRIVER_VERSION "3.4.20-125"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
}
offload_enabled = hdev->offload_enabled;
spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "%d\n", offload_enabled);
+
+ if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
+ return snprintf(buf, 20, "%d\n", offload_enabled);
+ else
+ return snprintf(buf, 40, "%s\n",
+ "Not applicable for a controller");
}
#define MAX_PATHS 8
dev->model,
label,
dev->offload_config ? '+' : '-',
- dev->offload_enabled ? '+' : '-',
+ dev->offload_to_be_enabled ? '+' : '-',
dev->expose_device);
}
(*nadded)++;
hpsa_show_dev_msg(KERN_INFO, h, device,
device->expose_device ? "added" : "masked");
- device->offload_to_be_enabled = device->offload_enabled;
- device->offload_enabled = 0;
return 0;
}
- /* Update an entry in h->dev[] array. */
+ /*
+ * Called during a scan operation.
+ *
+ * Update an entry in h->dev[] array.
+ */
static void hpsa_scsi_update_entry(struct ctlr_info *h,
int entry, struct hpsa_scsi_dev_t *new_entry)
{
- int offload_enabled;
/* assumes h->devlock is held */
BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
/* Raid level changed. */
h->dev[entry]->raid_level = new_entry->raid_level;
+ /*
+ * ioacccel_handle may have changed for a dual domain disk
+ */
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
+
/* Raid offload parameters changed. Careful about the ordering. */
- if (new_entry->offload_config && new_entry->offload_enabled) {
+ if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
/*
* if drive is newly offload_enabled, we want to copy the
* raid map data first. If previously offload_enabled and
* offload_config were set, raid map data had better be
- * the same as it was before. if raid map data is changed
+ * the same as it was before. If raid map data has changed
* then it had better be the case that
* h->dev[entry]->offload_enabled is currently 0.
*/
h->dev[entry]->raid_map = new_entry->raid_map;
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
}
- if (new_entry->hba_ioaccel_enabled) {
+ if (new_entry->offload_to_be_enabled) {
h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
}
/*
* We can turn off ioaccel offload now, but need to delay turning
- * it on until we can update h->dev[entry]->phys_disk[], but we
+ * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
* can't do that until all the devices are updated.
*/
- h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
- if (!new_entry->offload_enabled)
+ h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
+
+ /*
+ * turn ioaccel off immediately if told to do so.
+ */
+ if (!new_entry->offload_to_be_enabled)
h->dev[entry]->offload_enabled = 0;
- offload_enabled = h->dev[entry]->offload_enabled;
- h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
- h->dev[entry]->offload_enabled = offload_enabled;
}
/* Replace an entry from h->dev[] array. */
h->dev[entry] = new_entry;
added[*nadded] = new_entry;
(*nadded)++;
+
hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
- new_entry->offload_to_be_enabled = new_entry->offload_enabled;
- new_entry->offload_enabled = 0;
}
/* Remove an entry from h->dev[] array. */
return 1;
if (dev1->offload_config != dev2->offload_config)
return 1;
- if (dev1->offload_enabled != dev2->offload_enabled)
+ if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
return 1;
if (!is_logical_dev_addr_mode(dev1->scsi3addr))
if (dev1->queue_depth != dev2->queue_depth)
return 1;
+ /*
+ * This can happen for dual domain devices. An active
+ * path change causes the ioaccel handle to change
+ *
+ * for example note the handle differences between p0 and p1
+ * Device WWN ,WWN hash,Handle
+ * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
+ * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
+ */
+ if (dev1->ioaccel_handle != dev2->ioaccel_handle)
+ return 1;
return 0;
}
* be 0, but we'll turn it off here just in case
*/
if (!logical_drive->phys_disk[i]) {
+ dev_warn(&h->pdev->dev,
+ "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
+ __func__,
+ h->scsi_host->host_no, logical_drive->bus,
+ logical_drive->target, logical_drive->lun);
logical_drive->offload_enabled = 0;
logical_drive->offload_to_be_enabled = 0;
logical_drive->queue_depth = 8;
* way too high for partial stripe writes
*/
logical_drive->queue_depth = qdepth;
- else
- logical_drive->queue_depth = h->nr_cmds;
+ else {
+ if (logical_drive->external)
+ logical_drive->queue_depth = EXTERNAL_QD;
+ else
+ logical_drive->queue_depth = h->nr_cmds;
+ }
}
static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
/*
* If offload is currently enabled, the RAID map and
* phys_disk[] assignment *better* not be changing
- * and since it isn't changing, we do not need to
- * update it.
+ * because we would be changing ioaccel phsy_disk[] pointers
+ * on a ioaccel volume processing I/O requests.
+ *
+ * If an ioaccel volume status changed, initially because it was
+ * re-configured and thus underwent a transformation, or
+ * a drive failed, we would have received a state change
+ * request and ioaccel should have been turned off. When the
+ * transformation completes, we get another state change
+ * request to turn ioaccel back on. In this case, we need
+ * to update the ioaccel information.
+ *
+ * Thus: If it is not currently enabled, but will be after
+ * the scan completes, make sure the ioaccel pointers
+ * are up to date.
*/
- if (dev[i]->offload_enabled)
- continue;
- hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
+ if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
+ hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
}
}
break;
if (++waits > 20)
break;
+ msleep(1000);
+ }
+
+ if (waits > 20)
dev_warn(&h->pdev->dev,
"%s: removing device with %d outstanding commands!\n",
__func__, cmds);
- msleep(1000);
- }
}
static void hpsa_remove_device(struct ctlr_info *h,
if (!h->scsi_host)
return;
+ /*
+ * Allow for commands to drain
+ */
+ device->removed = 1;
+ hpsa_wait_for_outstanding_commands_for_dev(h, device);
+
if (is_logical_device(device)) { /* RAID */
sdev = scsi_device_lookup(h->scsi_host, device->bus,
device->target, device->lun);
}
} else { /* HBA */
- device->removed = 1;
- hpsa_wait_for_outstanding_commands_for_dev(h, device);
-
hpsa_remove_sas_device(device);
}
}
}
hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
- /* Now that h->dev[]->phys_disk[] is coherent, we can enable
+ /*
+ * Now that h->dev[]->phys_disk[] is coherent, we can enable
* any logical drives that need it enabled.
+ *
+ * The raid map should be current by now.
+ *
+ * We are updating the device list used for I/O requests.
*/
for (i = 0; i < h->ndevices; i++) {
if (h->dev[i] == NULL)
/*
* Any RAID offload error results in retry which will use
- * the normal I/O path so the controller can handle whatever's
+ * the normal I/O path so the controller can handle whatever is
* wrong.
*/
if (is_logical_device(dev) &&
}
}
+ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
+ u8 page, u8 *buf, size_t bufsize)
+ {
+ int rc = IO_OK;
+ struct CommandList *c;
+ struct ErrorInfo *ei;
+
+ c = cmd_alloc(h);
+ if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ if (rc)
+ goto out;
+ ei = c->err_info;
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+ hpsa_scsi_interpret_error(h, c);
+ rc = -1;
+ }
+ out:
+ cmd_free(h, c);
+ return rc;
+ }
+
+ static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
+ u8 *scsi3addr)
+ {
+ u8 *buf;
+ u64 sa = 0;
+ int rc = 0;
+
+ buf = kzalloc(1024, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
+ buf, 1024);
+
+ if (rc)
+ goto out;
+
+ sa = get_unaligned_be64(buf+12);
+
+ out:
+ kfree(buf);
+ return sa;
+ }
+
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
u16 page, unsigned char *buf,
unsigned char bufsize)
goto out;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
return -1;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
goto out;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+ encl_dev->sas_address =
+ hpsa_get_enclosure_logical_identifier(h, scsi3addr);
+
if (encl_dev->target == -1 || encl_dev->lun == -1) {
rc = IO_OK;
goto out;
c->Request.CDB[5] = 0;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc)
goto out;
dev->sas_address = sa;
}
+ static void hpsa_ext_ctrl_present(struct ctlr_info *h,
+ struct ReportExtendedLUNdata *physdev)
+ {
+ u32 nphysicals;
+ int i;
+
+ if (h->discovery_polling)
+ return;
+
+ nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
+
+ for (i = 0; i < nphysicals; i++) {
+ if (physdev->LUN[i].device_type ==
+ BMIC_DEVICE_TYPE_CONTROLLER
+ && !is_hba_lunid(physdev->LUN[i].lunid)) {
+ dev_info(&h->pdev->dev,
+ "External controller present, activate discovery polling and disable rld caching\n");
+ hpsa_disable_rld_caching(h);
+ h->discovery_polling = 1;
+ break;
+ }
+ }
+ }
+
/* Get a device id from inquiry page 0x83 */
static bool hpsa_vpd_page_supported(struct ctlr_info *h,
unsigned char scsi3addr[], u8 page)
return true;
}
+ /*
+ * Called during a scan operation.
+ * Sets ioaccel status on the new device list, not the existing device list
+ *
+ * The device list used during I/O will be updated later in
+ * adjust_hpsa_scsi_table.
+ */
static void hpsa_get_ioaccel_status(struct ctlr_info *h,
unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
{
this_device->offload_config =
!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
if (this_device->offload_config) {
- this_device->offload_enabled =
+ this_device->offload_to_be_enabled =
!!(ioaccel_status & OFFLOAD_ENABLED_BIT);
if (hpsa_get_raid_map(h, scsi3addr, this_device))
- this_device->offload_enabled = 0;
+ this_device->offload_to_be_enabled = 0;
}
- this_device->offload_to_be_enabled = this_device->offload_enabled;
+
out:
kfree(buf);
return;
if (extended_response)
c->Request.CDB[1] = extended_response;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
+ NO_TIMEOUT);
if (rc) {
cmd_free(h, c);
return HPSA_VPD_LV_STATUS_UNSUPPORTED;
memset(id_ctlr, 0, sizeof(*id_ctlr));
rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
if (!rc)
- if (id_ctlr->configured_logical_drive_count < 256)
+ if (id_ctlr->configured_logical_drive_count < 255)
*nlocals = id_ctlr->configured_logical_drive_count;
else
*nlocals = le16_to_cpu(
*/
ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
+ hpsa_ext_ctrl_present(h, physdev_list);
+
/* Allocate the per device structures */
for (i = 0; i < ndevs_to_allocate; i++) {
if (i >= HPSA_MAX_DEVICES) {
int phys_dev_index = i - (raid_ctlr_position == 0);
bool skip_device = false;
+ memset(tmpdevice, 0, sizeof(*tmpdevice));
+
physical_device = i < nphysicals + (raid_ctlr_position == 0);
/* Figure out where the LUN ID info is coming from */
continue;
}
- /* Get device type, vendor, model, device id */
+ /* Get device type, vendor, model, device id, raid_map */
rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
&is_OBDR);
if (rc == -ENOMEM) {
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
this_device = currentsd[ncurrent];
- /* Turn on discovery_polling if there are ext target devices.
- * Event-based change notification is unreliable for those.
- */
- if (!h->discovery_polling) {
- if (tmpdevice->external) {
- h->discovery_polling = 1;
- dev_info(&h->pdev->dev,
- "External target, activate discovery polling.\n");
- }
- }
-
-
*this_device = *tmpdevice;
this_device->physical_device = physical_device;
c->Request.CDB[0] = HPSA_INQUIRY;
c->Request.CDB[4] = size & 0xFF;
break;
+ case RECEIVE_DIAGNOSTIC:
+ c->Request.CDBLen = 6;
+ c->Request.type_attr_dir =
+ TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
+ c->Request.Timeout = 0;
+ c->Request.CDB[0] = cmd;
+ c->Request.CDB[1] = 1;
+ c->Request.CDB[2] = 1;
+ c->Request.CDB[3] = (size >> 8) & 0xFF;
+ c->Request.CDB[4] = size & 0xFF;
+ break;
case HPSA_REPORT_LOG:
case HPSA_REPORT_PHYS:
/* Talking to controller so It's a physical command
spin_unlock_irqrestore(&h->lock, flags);
dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
lockup_detected, h->heartbeat_sample_interval / HZ);
+ if (lockup_detected == 0xffff0000) {
+ dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
+ writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
+ }
pci_disable_device(h->pdev);
fail_all_outstanding_cmds(h);
}
return false;
}
- static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+ /*
+ * Set ioaccel status for all ioaccel volumes.
+ *
+ * Called from monitor controller worker (hpsa_event_monitor_worker)
+ *
+ * A Volume (or Volumes that comprise an Array set may be undergoing a
+ * transformation, so we will be turning off ioaccel for all volumes that
+ * make up the Array.
+ */
+ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
{
+ int rc;
int i;
+ u8 ioaccel_status;
+ unsigned char *buf;
+ struct hpsa_scsi_dev_t *device;
+
+ if (!h)
+ return;
+
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ /*
+ * Run through current device list used during I/O requests.
+ */
+ for (i = 0; i < h->ndevices; i++) {
+ device = h->dev[i];
+
+ if (!device)
+ continue;
+ if (!device->scsi3addr)
+ continue;
+ if (!hpsa_vpd_page_supported(h, device->scsi3addr,
+ HPSA_VPD_LV_IOACCEL_STATUS))
+ continue;
+
+ memset(buf, 0, 64);
+
+ rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
+ buf, 64);
+ if (rc != 0)
+ continue;
+
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+ device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (device->offload_config)
+ device->offload_to_be_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+
+ /*
+ * Immediately turn off ioaccel for any volume the
+ * controller tells us to. Some of the reasons could be:
+ * transformation - change to the LVs of an Array.
+ * degraded volume - component failure
+ *
+ * If ioaccel is to be re-enabled, re-enable later during the
+ * scan operation so the driver can get a fresh raidmap
+ * before turning ioaccel back on.
+ *
+ */
+ if (!device->offload_to_be_enabled)
+ device->offload_enabled = 0;
+ }
+
+ kfree(buf);
+ }
+
+ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
+ {
char *event_type;
if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
event_type = "configuration change";
/* Stop sending new RAID offload reqs via the IO accelerator */
scsi_block_requests(h->scsi_host);
- for (i = 0; i < h->ndevices; i++) {
- h->dev[i]->offload_enabled = 0;
- h->dev[i]->offload_to_be_enabled = 0;
- }
+ hpsa_set_ioaccel_status(h);
hpsa_drain_accel_commands(h);
/* Set 'accelerator path config change' bit */
dev_warn(&h->pdev->dev,
writel(h->events, &(h->cfgtable->clear_event_notify));
writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_clear_event_notify_ack(h);
- #if 0
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- hpsa_wait_for_mode_change_ack(h);
- #endif
}
return;
}
if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
hpsa_perform_rescan(h);
} else if (h->discovery_polling) {
- hpsa_disable_rld_caching(h);
if (hpsa_luns_changed(h)) {
dev_info(&h->pdev->dev,
"driver discovery polling rescan.\n");
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_TODEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
+ PCI_DMA_FROMDEVICE, NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
+ hpsa_delete_sas_host(h);
+
/*
* Call before disabling interrupts.
* scsi_remove_host can trigger I/O operations especially
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
- hpsa_delete_sas_host(h);
-
kfree(h); /* init_one 1 */
}
struct sas_phy *phy = hpsa_sas_phy->phy;
sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
- sas_phy_free(phy);
if (hpsa_sas_phy->added_to_port)
list_del(&hpsa_sas_phy->phy_list_entry);
+ sas_phy_delete(phy);
kfree(hpsa_sas_phy);
}
struct hpsa_sas_port *hpsa_sas_port;
struct hpsa_sas_phy *hpsa_sas_phy;
- parent_dev = &h->scsi_host->shost_gendev;
+ parent_dev = &h->scsi_host->shost_dev;
hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
if (!hpsa_sas_node)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
- *identifier = 0;
+ *identifier = rphy->identify.sas_address;
return 0;
}
}
task->slow_task = slow;
- init_timer(&slow->timer);
+ slow->task = task;
+ timer_setup(&slow->timer, NULL, 0);
init_completion(&slow->completion);
return task;
hashed[2] = r & 0xFF;
}
-
- /* ---------- HA events ---------- */
-
- void sas_hae_reset(struct work_struct *work)
- {
- struct sas_ha_event *ev = to_sas_ha_event(work);
- struct sas_ha_struct *ha = ev->ha;
-
- clear_bit(HAE_RESET, &ha->pending);
- }
-
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
int error = 0;
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
-
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pring ? pring->txq_max : 0);
}
static DEVICE_ATTR(txq_hw, S_IRUGO,
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pring ? pring->txcmplq_max : 0);
}
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
continue;
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ if (ndlp->nrport)
+ nvme_fc_set_remoteport_devloss(ndlp->nrport->remoteport,
+ vport->cfg_devloss_tmo);
+#endif
}
spin_unlock_irq(shost->host_lock);
}
*/
LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
- "Define fc4 type to register with fabric.");
+ "Enable FC4 Protocol support - FCP / NVME");
/*
* lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
* percentage will go to NVME.
*/
LPFC_ATTR_R(xri_split, 50, 10, 90,
- "Division of XRI resources between SCSI and NVME");
+ "Percentage of FCP XRI resources versus NVME");
/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
* to the event associated with the ndlp.
**/
void
-lpfc_els_retry_delay(unsigned long ptr)
+lpfc_els_retry_delay(struct timer_list *t)
{
- struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
+ struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
struct lpfc_vport *vport = ndlp->vport;
struct lpfc_hba *phba = vport->phba;
unsigned long flags;
(len + pcmd), vport, ndlp);
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
&rdp_context->link_stat);
- /* Check if nport is logged, BZ190632 */
- if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
- goto lpfc_skip_descriptor;
-
len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
&rdp_context->link_stat, vport);
len += lpfc_rdp_res_oed_temp_desc(phba,
len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
rdp_context->page_a0, vport);
- lpfc_skip_descriptor:
rdp_res->length = cpu_to_be32(len - 8);
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
-
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2422 ELS RDP Request "
"dec len %d tag x%x port_id %d len %d\n",
be32_to_cpu(rdp_req->nport_id_desc.nport_id),
be32_to_cpu(rdp_req->nport_id_desc.length));
- if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
- !phba->cfg_enable_SmartSAN) {
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_expl = LSEXP_PORT_LOGIN_REQ;
- goto error;
- }
if (sizeof(struct fc_rdp_nport_desc) !=
be32_to_cpu(rdp_req->rdp_des_length))
goto rjt_logerr;
* lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
**/
void
-lpfc_els_timeout(unsigned long ptr)
+lpfc_els_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long iflag;
timeout = (uint32_t)(phba->fc_ratov << 1);
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
if ((phba->pport->load_flag & FC_UNLOADING))
return;
* posted event WORKER_FABRIC_BLOCK_TMO.
**/
void
-lpfc_fabric_block_timeout(unsigned long ptr)
+lpfc_fabric_block_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer);
unsigned long iflags;
uint32_t tmo_posted;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
+
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
list) {
rxid, 1);
/* Check if TXQ queue needs to be serviced */
- if (!(list_empty(&pring->txq)))
+ if (pring && !list_empty(&pring->txq))
lpfc_worker_wake_up(phba);
return;
}
/* Unblock ELS traffic */
pring = lpfc_phba_elsring(phba);
- pring->flag &= ~LPFC_STOP_IOCB_EVENT;
+ if (pring)
+ pring->flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if (mb->mbxStatus) {
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
- (unsigned long)ndlp);
+ timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
lpfc_cancel_retry_delay_tmo(vport, ndlp);
if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
!(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
- !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
+ phba->sli_rev != LPFC_SLI_REV4) {
/* For this case we need to cleanup the default rpi
* allocated by the firmware.
*/
psli = &phba->sli;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return;
/* Error matching iocb on txq or txcmplq
* First check the txq.
*/
/*****************************************************************************/
void
-lpfc_disc_timeout(unsigned long ptr)
+lpfc_disc_timeout(struct timer_list *t)
{
- struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
+ struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
struct lpfc_hba *phba = vport->phba;
uint32_t tmo_posted;
unsigned long flags = 0;
* be cleared by the worker thread after it has taken the event bitmap out.
**/
static void
-lpfc_hb_timeout(unsigned long ptr)
+lpfc_hb_timeout(struct timer_list *t)
{
struct lpfc_hba *phba;
uint32_t tmo_posted;
unsigned long iflag;
- phba = (struct lpfc_hba *)ptr;
+ phba = from_timer(phba, t, hb_tmofunc);
/* Check for heart beat timeout conditions */
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
* be cleared by the worker thread after it has taken the event bitmap out.
**/
static void
-lpfc_rrq_timeout(unsigned long ptr)
+lpfc_rrq_timeout(struct timer_list *t)
{
struct lpfc_hba *phba;
unsigned long iflag;
- phba = (struct lpfc_hba *)ptr;
+ phba = from_timer(phba, t, rrq_tmr);
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
if (!(phba->pport->load_flag & FC_UNLOADING))
phba->hba_flag |= HBA_RRQ_ACTIVE;
lpfc_destroy_vport_work_array(phba, vports);
lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
+
+ if (phba->wq)
+ flush_workqueue(phba->wq);
}
/**
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
- setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
- (unsigned long)vport);
+ timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
- setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
- (unsigned long)vport);
+ timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
- setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
- (unsigned long)vport);
+ timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
lpfc_stop_port(struct lpfc_hba *phba)
{
phba->lpfc_stop_port(phba);
+
+ if (phba->wq)
+ flush_workqueue(phba->wq);
}
/**
* worker thread context.
**/
static void
-lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
+lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
/* Don't send FCF rediscovery event if timer cancelled */
spin_lock_irq(&phba->hbalock);
INIT_LIST_HEAD(&phba->luns);
/* MBOX heartbeat timer */
- setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
+ timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
/* Fabric block timer */
- setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
- (unsigned long)phba);
+ timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
/* EA polling mode timer */
- setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
- (unsigned long)phba);
+ timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
/* Heartbeat timer */
- setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
+ timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
return 0;
}
*/
/* FCP polling mode timer */
- setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
- (unsigned long)phba);
+ timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
/* Host attention work mask setup */
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
* Initialize timers used by driver
*/
- setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
+ timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
/* FCF rediscover timer */
- setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
- (unsigned long)phba);
+ timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
/*
* Control structure for handling external multi-buffer mailbox
"Extents and RPI headers enabled.\n");
}
mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
goto out_free_bsmbx;
}
return error;
}
+ /* workqueue for deferred irq use */
+ phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
+
return 0;
}
static void
lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
{
+ if (phba->wq) {
+ flush_workqueue(phba->wq);
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
+ }
+
/* Stop kernel worker thread */
kthread_stop(phba->worker_thread);
}
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+ /*
+ * Bring down the SLI Layer. This step disables all interrupts,
+ * clears the rings, discards all mailbox commands, and resets
+ * the HBA FCoE function.
+ */
+ lpfc_debugfs_terminate(vport);
+ lpfc_sli4_hba_unset(phba);
/* Perform ndlp cleanup on the physical port. The nvme and nvmet
* localports are destroyed after to cleanup all transport memory.
lpfc_nvmet_destroy_targetport(phba);
lpfc_nvme_destroy_localport(vport);
- /*
- * Bring down the SLI Layer. This step disables all interrupts,
- * clears the rings, discards all mailbox commands, and resets
- * the HBA FCoE function.
- */
- lpfc_debugfs_terminate(vport);
- lpfc_sli4_hba_unset(phba);
+ lpfc_stop_hba_timers(phba);
spin_lock_irq(&phba->hbalock);
list_del_init(&vport->listentry);
spin_unlock_irq(&phba->hbalock);
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
struct lpfc_nvme_buf *lpfc_ncmd)
{
uint64_t seg1, seg2, seg3, seg4;
+ uint64_t segsum;
- if (!phba->ktime_on)
- return;
if (!lpfc_ncmd->ts_last_cmd ||
!lpfc_ncmd->ts_cmd_start ||
!lpfc_ncmd->ts_cmd_wqput ||
!lpfc_ncmd->ts_isr_cmpl ||
!lpfc_ncmd->ts_data_nvme)
return;
+
+ if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
+ return;
if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
return;
if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
* cmpl is handled off to the NVME Layer.
*/
seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
- if (seg1 > 5000000) /* 5 ms - for sequential IOs */
- return;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
+ seg1 = 0;
/* Calculate times relative to start of IO */
seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
- seg3 = (lpfc_ncmd->ts_isr_cmpl -
- lpfc_ncmd->ts_cmd_start) - seg2;
- seg4 = (lpfc_ncmd->ts_data_nvme -
- lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
+ segsum = seg2;
+ seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+
phba->ktime_data_samples++;
phba->ktime_seg1_total += seg1;
if (seg1 < phba->ktime_seg1_min)
} else {
lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
LPFC_IOCB_STATUS_MASK);
- lpfc_ncmd->result = wcqe->parameter;
+ lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
* IO error is when the adapter rejects it. All other
lpfc_ncmd->status, lpfc_ncmd->result,
wcqe->total_data_placed);
break;
+ case IOSTAT_LOCAL_REJECT:
+ /* Let fall through to set command final state. */
+ if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_IOERR,
+ "6032 Delay Aborted cmd %p "
+ "nvme cmd %p, xri x%x, "
+ "xb %d\n",
+ lpfc_ncmd, nCmd,
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ bf_get(lpfc_wcqe_c_xb, wcqe));
default:
out_err:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6072 NVME Completion Error: xri %x "
"status x%x result x%x placed x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
wcqe->total_data_placed);
nCmd->transferred_length = 0;
nCmd->rcv_rsplen = 0;
- nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
+ nCmd->status = NVME_SC_INTERNAL;
}
}
* owns the dma address.
*/
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
lpfc_ncmd->ts_data_nvme = ktime_get_ns();
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
#endif
freqpriv = nCmd->private;
freqpriv->nvme_buf = NULL;
- nCmd->done(nCmd);
+
+ /* NVME targets need completion held off until the abort exchange
+ * completes.
+ */
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+ nCmd->done(nCmd);
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Call release with XB=1 to queue the IO into the abort list. */
lpfc_release_nvme_buf(phba, lpfc_ncmd);
}
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
"nvmeIO sg_cnt %d\n",
- phba->cfg_nvme_seg_cnt,
+ phba->cfg_nvme_seg_cnt + 1,
lpfc_ncmd->seg_cnt);
lpfc_ncmd->seg_cnt = 0;
return 1;
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING) {
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
+ /* Validate pointers. */
+ if (!pnvme_lport || !pnvme_rport || !freqpriv) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
+ "6117 No Send:IO submit ptrs NULL, lport %p, "
+ "rport %p fcreq_priv %p\n",
+ pnvme_lport, pnvme_rport, freqpriv);
+ ret = -ENODEV;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
goto out_fail;
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on) {
+ if (start) {
lpfc_ncmd->ts_cmd_start = start;
lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
+ } else {
+ lpfc_ncmd->ts_cmd_start = 0;
}
#endif
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_dec(&ndlp->cmd_pending);
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
+ if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl)
{
- lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
"6145 ABORT_XRI_CN completing on rpi x%x "
"original iotag x%x, abort cmd iotag x%x "
"req_tag x%x, status x%x, hwstatus x%x\n",
* pci bus space for an I/O. The DMA buffer includes the
* number of SGE's necessary to support the sg_tablesize.
*/
- lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL,
- &lpfc_ncmd->dma_handle);
+ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
if (!lpfc_ncmd->data) {
kfree(lpfc_ncmd);
break;
}
- memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
spin_unlock(&phba->nvme_buf_list_put_lock);
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
- if (lpfc_test_rrq_active(
- phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
- continue;
list_del_init(&lpfc_ncmd->list);
found = 1;
break;
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
- lpfc_ncmd->nvmeCmd = NULL;
list_add_tail(&lpfc_ncmd->list,
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remote_port;
struct nvme_fc_port_info rpinfo;
+ struct lpfc_nodelist *prev_ndlp;
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
* new rport.
*/
rport = remote_port->private;
- if (ndlp->nrport == rport) {
+ if (ndlp->nrport) {
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
LOG_NVME_DISC,
"6014 Rebinding lport to "
remote_port->port_role,
ndlp->nlp_type,
ndlp->nlp_DID);
- } else {
- /* New rport. */
- rport->remoteport = remote_port;
- rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
- ndlp->nrport = rport;
- lpfc_printf_vlog(vport, KERN_INFO,
- LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
- "Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
- rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ prev_ndlp = rport->ndlp;
+
+ /* Sever the ndlp<->rport connection before dropping
+ * the ndlp ref from register.
+ */
+ ndlp->nrport = NULL;
+ rport->ndlp = NULL;
+ if (prev_ndlp)
+ lpfc_nlp_put(ndlp);
}
+
+ /* Clean bind the rport to the ndlp. */
+ rport->remoteport = remote_port;
+ rport->lport = lport;
+ rport->ndlp = lpfc_nlp_get(ndlp);
+ if (!rport->ndlp)
+ return -1;
+ ndlp->nrport = rport;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_DISC | LOG_NODE,
+ "6022 Binding new rport to "
+ "lport %p Rport WWNN 0x%llx, "
+ "Rport WWPN 0x%llx DID "
+ "x%06x Role x%x\n",
+ lport,
+ rpinfo.node_name, rpinfo.port_name,
+ rpinfo.port_id, rpinfo.port_role);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
* @axri: pointer to the fcp xri abort wcqe structure.
*
* This routine is invoked by the worker thread to process a SLI4 fast-path
- * FCP aborted xri.
+ * NVME aborted xri. Aborted NVME IO commands are completed to the transport
+ * here.
**/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
- int rrq_empty = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
spin_unlock(
&phba->sli4_hba.abts_nvme_buf_list_lock);
- rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
ndlp = lpfc_ncmd->ndlp;
- if (ndlp) {
- lpfc_set_rrq_active(
- phba, ndlp,
- lpfc_ncmd->cur_iocbq.sli4_lxritag,
- rxid, 1);
+ if (ndlp)
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
- }
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6311 XRI Aborted xri x%x tag x%x "
- "released\n",
- xri, lpfc_ncmd->cur_iocbq.iotag);
-
+ "6311 nvme_cmd %p xri x%x tag x%x "
+ "abort complete and xri released\n",
+ lpfc_ncmd->nvmeCmd, xri,
+ lpfc_ncmd->cur_iocbq.iotag);
+
+ /* Aborted NVME commands are required to not complete
+ * before the abort exchange command fully completes.
+ * Once completed, it is available via the put list.
+ */
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
- if (rrq_empty)
- lpfc_worker_wake_up(phba);
return;
}
}
struct lpfc_cqe *);
static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
- static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
- struct lpfc_eqe *eqe, uint32_t qidx);
+ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
+ struct lpfc_eqe *eqe, uint32_t qidx);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
*
* This function looks up the iocb_lookup table to get the command iocb
* corresponding to the given response iocb using the iotag of the
- * response iocb. This function is called with the hbalock held.
+ * response iocb. This function is called with the hbalock held
+ * for sli3 devices or the ring_lock for sli4 devices.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
unsigned long iflag;
/* Based on the iotag field, get the cmd IOCB from the txcmplq */
- spin_lock_irqsave(&phba->hbalock, iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_lock_irqsave(&pring->ring_lock, iflag);
+ else
+ spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
- spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ else
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
* and wake up worker thread to process it. Otherwise, it will set up the
* Error Attention polling timer for the next poll.
**/
-void lpfc_poll_eratt(unsigned long ptr)
+void lpfc_poll_eratt(struct timer_list *t)
{
struct lpfc_hba *phba;
uint32_t eratt = 0;
uint64_t sli_intr, cnt;
- phba = (struct lpfc_hba *)ptr;
+ phba = from_timer(phba, t, eratt_poll);
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
* done by the worker thread function lpfc_mbox_timeout_handler.
**/
void
-lpfc_mbox_timeout(unsigned long ptr)
+lpfc_mbox_timeout(struct timer_list *t)
{
- struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
+ struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
unsigned long iflag;
uint32_t tmo_posted;
* for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
- if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
+ if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
piocb->hba_wqidx =
lpfc_sli4_scmd_to_wqidx_distr(phba,
piocb->context1);
+ piocb->hba_wqidx = piocb->hba_wqidx %
+ phba->cfg_fcp_io_channel;
+ }
return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
+ if (!pring) {
+ if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+ cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+ else
+ cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+ goto abort_iotag_exit;
+ }
+
/*
* If we're unloading, don't abort iocb on the ELS ring, but change
* the callback so that nothing happens when it finishes.
unsigned long iflags;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return NULL;
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
spin_lock_irqsave(&pring->ring_lock, iflags);
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
- /* Put the iocb back on the txcmplq */
- lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
- spin_unlock_irqrestore(&pring->ring_lock, iflags);
-
if (unlikely(!cmdiocbq)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
- "cmdiocb: iotag (%d)\n",
- bf_get(lpfc_wcqe_c_request_tag, wcqe));
+ "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
+ wcqe->word0, wcqe->total_data_placed,
+ wcqe->parameter, wcqe->word3);
lpfc_sli_release_iocbq(phba, irspiocbq);
return NULL;
}
+ /* Put the iocb back on the txcmplq */
+ lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+
/* Fake the irspiocbq and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
* completion queue, and then return.
*
**/
- static int
+ static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
struct lpfc_queue *speq)
{
struct lpfc_queue *cq = NULL, *childq;
- struct lpfc_cqe *cqe;
- bool workposted = false;
- int ecount = 0;
uint16_t cqid;
/* Get the reference to the corresponding CQ */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0365 Slow-path CQ identifier "
"(%d) does not exist\n", cqid);
- return 0;
+ return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = speq;
+ if (!queue_work(phba->wq, &cq->spwork))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0390 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
+ }
+
+ /**
+ * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+ static void
+ lpfc_sli4_sp_process_cq(struct work_struct *work)
+ {
+ struct lpfc_queue *cq =
+ container_of(work, struct lpfc_queue, spwork);
+ struct lpfc_hba *phba = cq->phba;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ccount = 0;
+
/* Process all the entries to the CQ */
switch (cq->type) {
case LPFC_MCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
- if (!(++ecount % cq->entry_repost))
+ if (!(++ccount % cq->entry_repost))
break;
cq->CQ_mbox++;
}
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- if ((cq->subtype == LPFC_FCP) ||
- (cq->subtype == LPFC_NVME))
+ if (cq->subtype == LPFC_FCP ||
+ cq->subtype == LPFC_NVME) {
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+ #endif
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
cqe);
- else
+ } else {
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
cqe);
- if (!(++ecount % cq->entry_repost))
+ }
+ if (!(++ccount % cq->entry_repost))
break;
}
/* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
+ if (ccount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ccount;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0370 Invalid completion queue type (%d)\n",
cq->type);
- return 0;
+ return;
}
/* Catch the no cq entry condition, log an error */
- if (unlikely(ecount == 0))
+ if (unlikely(ccount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0371 No entry from the CQ: identifier "
"(x%x), type (%d)\n", cq->queue_id, cq->type);
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
-
- return ecount;
}
/**
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return;
}
-
- if (cq->assoc_qp)
- cmdiocbq->isr_timestamp =
- cq->assoc_qp->isr_timestamp;
-
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ cmdiocbq->isr_timestamp = cq->isr_timestamp;
+ #endif
if (cmdiocbq->iocb_cmpl == NULL) {
if (cmdiocbq->wqe_cmpl) {
if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
lpfc_nvmet_unsol_fcp_event(
phba, idx, dma_buf,
- cq->assoc_qp->isr_timestamp);
+ cq->isr_timestamp);
return false;
}
drop:
* queue and process all the entries on the completion queue, rearm the
* completion queue, and then return.
**/
- static int
+ static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint32_t qidx)
{
struct lpfc_queue *cq = NULL;
- struct lpfc_cqe *cqe;
- bool workposted = false;
uint16_t cqid, id;
- int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"event: majorcode=x%x, minorcode=x%x\n",
bf_get_le32(lpfc_eqe_major_code, eqe),
bf_get_le32(lpfc_eqe_minor_code, eqe));
- return 0;
+ return;
}
/* Get the reference to the corresponding CQ */
/* Otherwise this is a Slow path event */
if (cq == NULL) {
- ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
- phba->sli4_hba.hba_eq[qidx]);
- return ecount;
+ lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
+ return;
}
process_cq:
"0368 Miss-matched fast-path completion "
"queue identifier: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
- return 0;
+ return;
}
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
+ if (!queue_work(phba->wq, &cq->irqwork))
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0363 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
+ }
+
+ /**
+ * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+ static void
+ lpfc_sli4_hba_process_cq(struct work_struct *work)
+ {
+ struct lpfc_queue *cq =
+ container_of(work, struct lpfc_queue, irqwork);
+ struct lpfc_hba *phba = cq->phba;
+ struct lpfc_cqe *cqe;
+ bool workposted = false;
+ int ccount = 0;
+
/* Process all the entries to the CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
+ #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ cq->isr_timestamp = ktime_get_ns();
+ else
+ cq->isr_timestamp = 0;
+ #endif
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ecount % cq->entry_repost))
+ if (!(++ccount % cq->entry_repost))
break;
}
/* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
- cq->assoc_qp->EQ_cqe_cnt += ecount;
+ if (ccount > cq->CQ_max_cqe)
+ cq->CQ_max_cqe = ccount;
+ cq->assoc_qp->EQ_cqe_cnt += ccount;
/* Catch the no cq entry condition */
- if (unlikely(ecount == 0))
+ if (unlikely(ccount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0369 No entry from fast-path completion "
"queue fcpcqid=%d\n", cq->queue_id);
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
-
- return ecount;
}
static void
lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
{
struct lpfc_queue *cq;
- struct lpfc_cqe *cqe;
- bool workposted = false;
uint16_t cqid;
- int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
/* Save EQ associated with this CQ */
cq->assoc_qp = phba->sli4_hba.fof_eq;
- /* Process all the entries to the OAS CQ */
- while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
- if (!(++ecount % cq->entry_repost))
- break;
- }
-
- /* Track the max number of CQEs processed in 1 EQ */
- if (ecount > cq->CQ_max_cqe)
- cq->CQ_max_cqe = ecount;
- cq->assoc_qp->EQ_cqe_cnt += ecount;
-
- /* Catch the no cq entry condition */
- if (unlikely(ecount == 0))
+ /* CQ work will be processed on CPU affinitized to this IRQ */
+ if (!queue_work(phba->wq, &cq->irqwork))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "9153 No entry from fast-path completion "
- "queue fcpcqid=%d\n", cq->queue_id);
-
- /* In any case, flash and re-arm the CQ */
- lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
-
- /* wake up worker thread if there are works to be done */
- if (workposted)
- lpfc_worker_wake_up(phba);
+ "0367 Cannot schedule soft IRQ "
+ "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
+ cqid, cq->queue_id, smp_processor_id());
}
/**
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- int ccount = 0;
int hba_eqidx;
/* Get the driver's phba structure from the dev_id */
if (unlikely(!fpeq))
return IRQ_NONE;
- #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->ktime_on)
- fpeq->isr_timestamp = ktime_get_ns();
- #endif
-
if (lpfc_fcp_look_ahead) {
if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
lpfc_sli4_eq_clr_intr(fpeq);
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
- if (eqe == NULL)
- break;
-
- ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
- if (!(++ecount % fpeq->entry_repost) ||
- ccount > LPFC_MAX_ISR_CQE)
+ lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
+ if (!(++ecount % fpeq->entry_repost))
break;
fpeq->EQ_processed++;
}
queue->entry_size = entry_size;
queue->entry_count = entry_count;
queue->phba = phba;
+ INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
+ INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
/* entry_repost will be set during q creation */
if (pcmd && pcmd->virt)
dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
kfree(pcmd);
- lpfc_sli_release_iocbq(phba, iocbq);
+ if (iocbq)
+ lpfc_sli_release_iocbq(phba, iocbq);
lpfc_in_buf_free(phba, &dmabuf->dbuf);
}
uint32_t txq_cnt = 0;
pring = lpfc_phba_elsring(phba);
+ if (unlikely(!pring))
+ return 0;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_for_each_entry(piocbq, &pring->txq, list) {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
/* VENTURA */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
+ static int
+ megasas_set_dma_mask(struct megasas_instance *instance);
+ static int
+ megasas_alloc_ctrl_mem(struct megasas_instance *instance);
+ static inline void
+ megasas_free_ctrl_mem(struct megasas_instance *instance);
+ static inline int
+ megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
+ static inline void
+ megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
+ static inline void
+ megasas_init_ctrl_params(struct megasas_instance *instance);
+
+ /**
+ * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
+ * @instance: Adapter soft state
+ * @dcmd: DCMD frame inside MFI command
+ * @dma_addr: DMA address of buffer to be passed to FW
+ * @dma_len: Length of DMA buffer to be passed to FW
+ * @return: void
+ */
+ void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len)
+ {
+ if (instance->consistent_mask_64bit) {
+ dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
+ dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
+
+ } else {
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags);
+ }
+ }
void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
megasas_check_and_restore_queue_depth(instance);
}
+static void megasas_sriov_heartbeat_handler(struct timer_list *t);
+
/**
- * megasas_start_timer - Initializes a timer object
+ * megasas_start_timer - Initializes sriov heartbeat timer object
* @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
*
*/
-void megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
+void megasas_start_timer(struct megasas_instance *instance)
{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
+ struct timer_list *timer = &instance->sriov_heartbeat_timer;
+
+ timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
+ timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
add_timer(timer);
}
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+
+ megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
instance->host->host_no);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
retval = megasas_issue_blocked_cmd(instance, cmd,
MEGASAS_ROUTINE_WAIT_TIME_VF);
else
}
/* Handler for SR-IOV heartbeat */
-void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
+static void megasas_sriov_heartbeat_handler(struct timer_list *t)
{
struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
+ from_timer(instance, t, sriov_heartbeat_timer);
if (instance->hb_host_mem->HB.fwCounter !=
instance->hb_host_mem->HB.driverCounter) {
/*
* First wait for all commands to complete
*/
- if (instance->ctrl_context) {
+ if (instance->adapter_type == MFI_SERIES) {
+ ret = megasas_generic_reset(scmd);
+ } else {
struct megasas_cmd_fusion *cmd;
cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
if (cmd)
MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
ret = megasas_reset_fusion(scmd->device->host,
SCSIIO_TIMEOUT_OCR);
- } else
- ret = megasas_generic_reset(scmd);
+ }
return ret;
}
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_task_abort_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_reset_target_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+
case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
default:
dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
+ megasas_complete_int_cmd(instance, cmd);
break;
}
}
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(
MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
else
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_RESET_FLAGS,
&instance->reg_set->doorbell);
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
for (i = 0; i < (10 * 1000); i += 20) {
if (readl(
&instance->
* max_sge_sz = 12 byte (sizeof megasas_sge64)
* Total 192 byte (3 MFI frame of 64 byte)
*/
- frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ frame_count = (instance->adapter_type == MFI_SERIES) ?
+ (15 + 1) : (3 + 1);
instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
memset(cmd->frame, 0, instance->mfi_frame_size);
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
- if (!instance->ctrl_context && reset_devices)
+ if ((instance->adapter_type == MFI_SERIES) && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
}
int j;
u16 max_cmd;
struct megasas_cmd *cmd;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
max_cmd = instance->max_mfi_cmds;
/*
inline int
dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
- if (!instance->ctrl_context)
+ if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
+ sizeof(struct MR_PD_INFO));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
return ret;
}
+ ci = instance->pd_list_buf;
+ ci_h = instance->pd_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
+ (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware\n");
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megaraid_sas_kill_hba(instance);
else
instance->pd_list_not_supported = 1;
}
- pci_free_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
dma_addr_t ci_h = 0;
u32 ld_count;
+ ci = instance->ld_list_buf;
+ ci_h = instance->ld_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_LIST),
- &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
dma_addr_t ci_h = 0;
u32 tgtid_count;
+ ci = instance->ld_targetid_list_buf;
+ ci_h = instance->ld_targetid_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
-
- if (!ci) {
- dev_warn(&instance->pdev->dev,
- "Failed to alloc mem for ld_list_query\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_TARGETID_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
return;
instance->supportmax256vd =
- instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
/* Below is additional check to address future FW enhancement */
- if (instance->ctrl_info->max_lds > 64)
+ if (instance->ctrl_info_buf->max_lds > 64)
instance->supportmax256vd = 1;
instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct megasas_ctrl_info *ci;
- struct megasas_ctrl_info *ctrl_info;
dma_addr_t ci_h = 0;
- ctrl_info = instance->ctrl_info;
+ ci = instance->ctrl_info_buf;
+ ci_h = instance->ctrl_info_buf_h;
cmd = megasas_get_cmd(instance);
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_ctrl_info), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->mbox.b[0] = 1;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct megasas_ctrl_info));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
switch (ret) {
case DCMD_SUCCESS:
- memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
/* Save required controller information in
* CPU endianness format.
*/
- le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
- le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
+ le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+ le32_to_cpus((u32 *)&ci->adapterOperations2);
+ le32_to_cpus((u32 *)&ci->adapterOperations3);
+ le16_to_cpus((u16 *)&ci->adapter_operations4);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
*/
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
- ctrl_info->adapterOperations3.useSeqNumJbodFP;
+ ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
- ctrl_info->adapter_operations4.support_pd_map_target_id;
+ ci->adapter_operations4.support_pd_map_target_id;
/*Check whether controller is iMR or MR */
- instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
+ instance->is_imr = (ci->memory_size ? 0 : 1);
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
- le16_to_cpu(ctrl_info->memory_size));
+ le16_to_cpu(ci->memory_size));
instance->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ ci->properties.OnOffProperties.disableOnlineCtrlReset;
instance->secure_jbod_support =
- ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ ci->adapterOperations3.supportSecurityonJBOD;
dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
}
- pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
- ci, ci_h);
-
megasas_return_cmd(instance, cmd);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
+ CRASH_DMA_BUF_SIZE);
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
if (reset_devices || !fusion ||
- !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+ !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
dev_info(&instance->pdev->dev,
"Jbod map is not supported %s %d\n",
__func__, __LINE__);
reg_set = instance->reg_set;
- if (fusion)
+ if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
else {
switch (instance->pdev->device) {
goto fail_ready_state;
}
- if (instance->is_ventura) {
+ megasas_init_ctrl_params(instance);
+
+ if (megasas_set_dma_mask(instance))
+ goto fail_ready_state;
+
+ if (megasas_alloc_ctrl_mem(instance))
+ goto fail_alloc_dma_buf;
+
+ if (megasas_alloc_ctrl_dma_buffers(instance))
+ goto fail_alloc_dma_buf;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_3 =
readl(&instance->reg_set->outbound_scratch_pad_3);
instance->max_raid_mapsize = ((scratch_pad_3 >>
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
if (fusion) {
- if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
+ if (instance->adapter_type == THUNDERBOLT_SERIES) {
+ /* Thunderbolt Series*/
instance->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
- GFP_KERNEL);
- if (instance->ctrl_info == NULL)
- goto fail_init_adapter;
-
/*
* Below are default value for legacy Firmware.
* non-fusion based controllers
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_4 =
readl(&instance->reg_set->outbound_scratch_pad_4);
if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
/* stream detection initialization */
- if (instance->is_ventura && fusion) {
+ if (instance->adapter_type == VENTURA_SERIES) {
fusion->stream_detect_by_ld =
kzalloc(sizeof(struct LD_STREAM_DETECT *)
* MAX_LOGICAL_DRIVES_EXT,
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- ctrl_info = instance->ctrl_info;
+ ctrl_info = instance->ctrl_info_buf;
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
le16_to_cpu(ctrl_info->max_strips_per_io);
/* Launch SR-IOV heartbeat timer */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 1))
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
else
instance->skip_heartbeat_timer_del = 1;
}
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
+ fail_alloc_dma_buf:
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
fail_ready_state:
- kfree(instance->ctrl_info);
- instance->ctrl_info = NULL;
iounmap(instance->reg_set);
fail_ioremap:
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ megasas_set_dma_settings(instance, dcmd, el_info_h,
+ sizeof(struct megasas_evt_log_info));
if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
DCMD_SUCCESS) {
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num;
dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
+ sizeof(struct megasas_evt_detail));
if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len =
cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
- dcmd->sgl.sge32[0].phys_addr =
- cpu_to_le32(instance->tgt_prop_h);
- dcmd->sgl.sge32[0].length =
- cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
+ sizeof(struct MR_TARGET_PROPERTIES));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance,
cmd, MFI_IO_TIMEOUT_SECS);
else
return 0;
}
+ /**
+ * megasas_set_dma_mask - Set DMA mask for supported controllers
+ *
+ * @instance: Adapter soft state
+ * Description:
+ *
+ * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ *
+ * For invader-
+ * By default, driver/FW will operate in 32bit DMA addresses
+ * for consistent DMA mapping but if 32 bit consistent
+ * DMA mask fails, driver will try with 64 bit consistent
+ * mask provided FW is true 64bit DMA capable
+ *
+ * For older controllers(Thunderbolt and MFI based adapters)-
+ * driver/FW will operate in 32 bit consistent DMA addresses.
+ */
static int
- megasas_set_dma_mask(struct pci_dev *pdev)
+ megasas_set_dma_mask(struct megasas_instance *instance)
{
- /*
- * All our controllers are capable of performing 64-bit DMA
- */
+ u64 consistent_mask;
+ struct pci_dev *pdev;
+ u32 scratch_pad_2;
+
+ pdev = instance->pdev;
+ consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
+ DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+ (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
+ /*
+ * If 32 bit DMA mask fails, then try for 64 bit mask
+ * for FW capable of handling 64 bit DMA.
+ */
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ goto fail_set_dma_mask;
+ else if (dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(64)))
goto fail_set_dma_mask;
}
- } else {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
- goto fail_set_dma_mask;
- }
- /*
- * Ensure that all data structures are allocated in 32-bit
- * memory.
- */
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- /* Try 32bit DMA mask and 32 bit Consistent dma mask */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- dev_info(&pdev->dev, "set 32bit DMA mask"
- "and 32 bit consistent mask\n");
- else
- goto fail_set_dma_mask;
- }
+ } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
+ instance->consistent_mask_64bit = false;
+ else
+ instance->consistent_mask_64bit = true;
+
+ dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
+ (instance->consistent_mask_64bit ? "64" : "32"));
return 0;
fail_set_dma_mask:
- return 1;
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ return -1;
+
}
- /**
- * megasas_probe_one - PCI hotplug entry point
- * @pdev: PCI device structure
- * @id: PCI ids of supported hotplugged adapter
+ /*
+ * megasas_set_adapter_type - Set adapter type.
+ * Supported controllers can be divided in
+ * 4 categories- enum MR_ADAPTER_TYPE {
+ * MFI_SERIES = 1,
+ * THUNDERBOLT_SERIES = 2,
+ * INVADER_SERIES = 3,
+ * VENTURA_SERIES = 4,
+ * };
+ * @instance: Adapter soft state
+ * return: void
*/
- static int megasas_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
{
- int rval, pos;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
- u16 control = 0;
- struct fusion_context *fusion = NULL;
-
- /* Reset MSI-X in the kdump kernel */
- if (reset_devices) {
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
- &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev,
- pos + PCI_MSIX_FLAGS,
- control &
- ~PCI_MSIX_FLAGS_ENABLE);
- }
+ if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
+ instance->adapter_type = MFI_SERIES;
+ } else {
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_VENTURA:
+ case PCI_DEVICE_ID_LSI_CRUSADER:
+ case PCI_DEVICE_ID_LSI_HARPOON:
+ case PCI_DEVICE_ID_LSI_TOMCAT:
+ case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+ case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+ instance->adapter_type = VENTURA_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ instance->adapter_type = THUNDERBOLT_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_INTRUDER:
+ case PCI_DEVICE_ID_LSI_INTRUDER_24:
+ case PCI_DEVICE_ID_LSI_CUTLASS_52:
+ case PCI_DEVICE_ID_LSI_CUTLASS_53:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->adapter_type = INVADER_SERIES;
+ break;
+ default: /* For all other supported controllers */
+ instance->adapter_type = MFI_SERIES;
+ break;
}
}
+ }
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device_mem(pdev);
+ static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
+ {
+ instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->consumer_h);
- if (rval) {
- return rval;
+ if (!instance->producer || !instance->consumer) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate memory for producer, consumer\n");
+ return -1;
}
- pci_set_master(pdev);
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ return 0;
+ }
- if (megasas_set_dma_mask(pdev))
- goto fail_set_dma_mask;
+ /**
+ * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
+ * structures which are not common across MFI
+ * adapters and fusion adapters.
+ * For MFI based adapters, allocate producer and
+ * consumer buffers. For fusion adapters, allocate
+ * memory for fusion context.
+ * @instance: Adapter soft state
+ * return: 0 for SUCCESS
+ */
+ static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
+ {
+ switch (instance->adapter_type) {
+ case MFI_SERIES:
+ if (megasas_alloc_mfi_ctrl_mem(instance))
+ return -ENOMEM;
+ break;
+ case VENTURA_SERIES:
+ case THUNDERBOLT_SERIES:
+ case INVADER_SERIES:
+ if (megasas_alloc_fusion_context(instance))
+ return -ENOMEM;
+ break;
+ }
- host = scsi_host_alloc(&megasas_template,
- sizeof(struct megasas_instance));
+ return 0;
+ }
- if (!host) {
- dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
- goto fail_alloc_instance;
+ /*
+ * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
+ * producer, consumer buffers for MFI adapters
+ *
+ * @instance - Adapter soft instance
+ *
+ */
+ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
+ {
+ if (instance->adapter_type == MFI_SERIES) {
+ if (instance->producer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ } else {
+ megasas_free_fusion_context(instance);
}
+ }
- instance = (struct megasas_instance *)host->hostdata;
- memset(instance, 0, sizeof(*instance));
- atomic_set(&instance->fw_reset_no_pci_access, 0);
- instance->pdev = pdev;
+ /**
+ * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
+ * driver load time
+ *
+ * @instance- Adapter soft instance
+ * @return- O for SUCCESS
+ */
+ static inline
+ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
+ {
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
- switch (instance->pdev->device) {
- case PCI_DEVICE_ID_LSI_VENTURA:
- case PCI_DEVICE_ID_LSI_HARPOON:
- case PCI_DEVICE_ID_LSI_TOMCAT:
- case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
- case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
- instance->is_ventura = true;
- case PCI_DEVICE_ID_LSI_FUSION:
- case PCI_DEVICE_ID_LSI_PLASMA:
- case PCI_DEVICE_ID_LSI_INVADER:
- case PCI_DEVICE_ID_LSI_FURY:
- case PCI_DEVICE_ID_LSI_INTRUDER:
- case PCI_DEVICE_ID_LSI_INTRUDER_24:
- case PCI_DEVICE_ID_LSI_CUTLASS_52:
- case PCI_DEVICE_ID_LSI_CUTLASS_53:
- {
- if (megasas_alloc_fusion_context(instance)) {
- megasas_free_fusion_context(instance);
- goto fail_alloc_dma_buf;
- }
- fusion = instance->ctrl_context;
+ instance->evt_detail =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h);
- if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
- fusion->adapter_type = THUNDERBOLT_SERIES;
- else if (instance->is_ventura)
- fusion->adapter_type = VENTURA_SERIES;
- else
- fusion->adapter_type = INVADER_SERIES;
- }
- break;
- default: /* For all other supported controllers */
-
- instance->producer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->consumer_h);
-
- if (!instance->producer || !instance->consumer) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
- "memory for producer, consumer\n");
- goto fail_alloc_dma_buf;
+ if (!instance->evt_detail) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate event detail buffer\n");
+ return -ENOMEM;
+ }
+
+ if (fusion) {
+ fusion->ioc_init_request =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &fusion->ioc_init_request_phys,
+ GFP_KERNEL);
+
+ if (!fusion->ioc_init_request) {
+ dev_err(&pdev->dev,
+ "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
+ }
- *instance->producer = 0;
- *instance->consumer = 0;
- break;
+ instance->pd_list_buf =
+ pci_alloc_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ &instance->pd_list_buf_h);
+
+ if (!instance->pd_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
- /* Crash dump feature related initialisation*/
- instance->drv_buf_index = 0;
- instance->drv_buf_alloc = 0;
- instance->crash_dump_fw_support = 0;
- instance->crash_dump_app_support = 0;
- instance->fw_crash_state = UNAVAILABLE;
- spin_lock_init(&instance->crashdump_lock);
- instance->crash_dump_buf = NULL;
+ instance->ctrl_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_ctrl_info),
+ &instance->ctrl_info_buf_h);
- megasas_poll_wait_aen = 0;
- instance->flag_ieee = 0;
- instance->ev = NULL;
- instance->issuepend_done = 1;
- atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
- instance->is_imr = 0;
+ if (!instance->ctrl_info_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate controller info buffer\n");
+ return -ENOMEM;
+ }
- instance->evt_detail = pci_alloc_consistent(pdev,
- sizeof(struct
- megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->ld_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_LIST),
+ &instance->ld_list_buf_h);
- if (!instance->evt_detail) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
- "event detail structure\n");
- goto fail_alloc_dma_buf;
+ if (!instance->ld_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
+ return -ENOMEM;
+ }
+
+ instance->ld_targetid_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h);
+
+ if (!instance->ld_targetid_list_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate LD targetid list buffer\n");
+ return -ENOMEM;
}
if (!reset_devices) {
- instance->system_info_buf = pci_zalloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
- if (!instance->system_info_buf)
- dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+ instance->system_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h);
+ instance->pd_info =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h);
+ instance->tgt_prop =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h);
+ instance->crash_dump_buf =
+ pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
- instance->pd_info = pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ if (!instance->system_info_buf)
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate system info buffer\n");
if (!instance->pd_info)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
-
- instance->tgt_prop = pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate pd_info buffer\n");
if (!instance->tgt_prop)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate tgt_prop buffer\n");
- instance->crash_dump_buf = pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
if (!instance->crash_dump_buf)
- dev_err(&pdev->dev, "Can't allocate Firmware "
- "crash dump DMA buffer\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate crash dump buffer\n");
}
+ return 0;
+ }
+
+ /*
+ * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
+ * during driver load time
+ *
+ * @instance- Adapter soft instance
+ *
+ */
+ static inline
+ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
+ {
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (fusion && fusion->ioc_init_request)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ fusion->ioc_init_request,
+ fusion->ioc_init_request_phys);
+
+ if (instance->pd_list_buf)
+ pci_free_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ instance->pd_list_buf,
+ instance->pd_list_buf_h);
+
+ if (instance->ld_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ instance->ld_list_buf,
+ instance->ld_list_buf_h);
+
+ if (instance->ld_targetid_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ instance->ld_targetid_list_buf,
+ instance->ld_targetid_list_buf_h);
+
+ if (instance->ctrl_info_buf)
+ pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ instance->ctrl_info_buf,
+ instance->ctrl_info_buf_h);
+
+ if (instance->system_info_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ instance->system_info_buf,
+ instance->system_info_h);
+
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info, instance->pd_info_h);
+
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop, instance->tgt_prop_h);
+
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+ }
+
+ /*
+ * megasas_init_ctrl_params - Initialize controller's instance
+ * parameters before FW init
+ * @instance - Adapter soft instance
+ * @return - void
+ */
+ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
+ {
+ instance->fw_crash_state = UNAVAILABLE;
+
+ megasas_poll_wait_aen = 0;
+ instance->issuepend_done = 1;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+
/*
* Initialize locks and queues
*/
INIT_LIST_HEAD(&instance->cmd_pool);
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
- atomic_set(&instance->fw_outstanding,0);
+ atomic_set(&instance->fw_outstanding, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
+ spin_lock_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->reset_mutex);
mutex_init(&instance->hba_mutex);
-
- /*
- * Initialize PCI related and misc parameters
- */
- instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
- instance->init_id = MEGASAS_DEFAULT_INIT_ID;
- instance->ctrl_info = NULL;
-
+ mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
megasas_dbg_lvl = 0;
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
- } else
+ } else {
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+ }
+ }
+
+ /**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+ static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
+ int rval, pos;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->pdev = pdev;
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+ megasas_set_adapter_type(instance);
/*
* Initialize MFI Firmware
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
- fail_alloc_dma_buf:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
scsi_host_put(host);
fail_alloc_instance:
- fail_set_dma_mask:
pci_disable_device(pdev);
return -ENODEV;
pci_set_master(pdev);
- if (megasas_set_dma_mask(pdev))
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ if (megasas_set_dma_mask(instance))
goto fail_set_dma_mask;
/*
atomic_set(&instance->fw_outstanding, 0);
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
-
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
if (rval < 0)
goto fail_reenable_msix;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
megasas_free_cmds(instance);
/* Re-launch SR-IOV heartbeat timer */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 0))
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
else {
instance->skip_heartbeat_timer_del = 1;
goto fail_init_mfi;
return 0;
fail_init_mfi:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
+ fail_reenable_msix:
fail_set_dma_mask:
fail_ready_state:
- fail_reenable_msix:
pci_disable_device(pdev);
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
kfree(fusion->stream_detect_by_ld[i]);
kfree(fusion->stream_detect_by_ld);
}
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
}
- megasas_free_fusion_context(instance);
} else {
megasas_release_mfi(instance);
- pci_free_consistent(pdev, sizeof(u32),
- instance->producer,
- instance->producer_h);
- pci_free_consistent(pdev, sizeof(u32),
- instance->consumer,
- instance->consumer_h);
}
- kfree(instance->ctrl_info);
-
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail, instance->evt_detail_h);
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
instance->hb_host_mem,
instance->hb_host_mem_h);
- if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
- instance->crash_dump_buf, instance->crash_dump_h);
+ megasas_free_ctrl_dma_buffers(instance);
- if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
- instance->system_info_buf, instance->system_info_h);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
struct megasas_iocpacket __user * user_ioc,
struct megasas_iocpacket *ioc)
{
- struct megasas_sge32 *kern_sge32;
+ struct megasas_sge64 *kern_sge64 = NULL;
+ struct megasas_sge32 *kern_sge32 = NULL;
struct megasas_cmd *cmd;
void *kbuff_arr[MAX_IOCTL_SGE];
dma_addr_t buf_handle = 0;
void *sense = NULL;
dma_addr_t sense_handle;
unsigned long *sense_ptr;
- u32 opcode;
+ u32 opcode = 0;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
return -EINVAL;
}
+ if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ dev_err(&instance->pdev->dev,
+ "Received invalid ioctl command 0x%x\n",
+ ioc->frame.hdr.cmd);
+ return -ENOTSUPP;
+ }
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
- MFI_FRAME_SGL64 |
+
+ cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
+
+ if (instance->consistent_mask_64bit)
+ cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+ else
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
- opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+
+ if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
* kernel buffers in SGLs. The location of SGL is embedded in the
* struct iocpacket itself.
*/
- kern_sge32 = (struct megasas_sge32 *)
- ((unsigned long)cmd->frame + ioc->sgl_off);
+ if (instance->consistent_mask_64bit)
+ kern_sge64 = (struct megasas_sge64 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+ else
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
/*
* For each user buffer, create a mirror buffer and copy in
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
- kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
- kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ if (instance->consistent_mask_64bit) {
+ kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
+ kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ } else {
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ }
/*
* We created a kernel buffer corresponding to the
sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = cpu_to_le32(sense_handle);
+ if (instance->consistent_mask_64bit)
+ *sense_ptr = cpu_to_le64(sense_handle);
+ else
+ *sense_ptr = cpu_to_le32(sense_handle);
}
/*
if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
cmd->sync_cmd = 0;
dev_err(&instance->pdev->dev,
- "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
- __func__, __LINE__, opcode, cmd->cmd_status_drv);
+ "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+ __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+ cmd->cmd_status_drv);
return -EBUSY;
}
for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i]) {
- dma_free_coherent(&instance->pdev->dev,
- le32_to_cpu(kern_sge32[i].length),
- kbuff_arr[i],
- le32_to_cpu(kern_sge32[i].phys_addr));
+ if (instance->consistent_mask_64bit)
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge64[i].length),
+ kbuff_arr[i],
+ le64_to_cpu(kern_sge64[i].phys_addr));
+ else
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
}
}
void megaraid_sas_kill_hba(struct megasas_instance *instance);
extern u32 megasas_dbg_lvl;
-void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
int initial);
-void megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval);
+void megasas_start_timer(struct megasas_instance *instance);
extern struct megasas_mgmt_info megasas_mgmt_info;
extern unsigned int resetwaittime;
extern unsigned int dual_qdepth_disable;
static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
static void megasas_free_reply_fusion(struct megasas_instance *instance);
+ static inline
+ void megasas_configure_queue_sizes(struct megasas_instance *instance);
+ /**
+ * megasas_check_same_4gb_region - check if allocation
+ * crosses same 4GB boundary or not
+ * @instance - adapter's soft instance
+ * start_addr - start address of DMA allocation
+ * size - size of allocation in bytes
+ * return - true : allocation does not cross same
+ * 4GB boundary
+ * false: allocation crosses same
+ * 4GB boundary
+ */
+ static inline bool megasas_check_same_4gb_region
+ (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
+ {
+ dma_addr_t end_addr;
+
+ end_addr = start_addr + size;
+ if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
+ dev_err(&instance->pdev->dev,
+ "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
+ (unsigned long long)start_addr,
+ (unsigned long long)end_addr);
+ return false;
+ }
+
+ return true;
+ }
/**
* megasas_enable_intr_fusion - Enables interrupts
megasas_fire_cmd_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
writel(le32_to_cpu(req_desc->u.low),
&instance->reg_set->inbound_single_queue_port);
else {
reg_set = instance->reg_set;
/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
- if (!instance->is_ventura)
+ if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
dev_info(&instance->pdev->dev,
- "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
- cur_max_fw_cmds, ldio_threshold);
+ "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
+ cur_max_fw_cmds, ldio_threshold);
if (fw_boot_context == OCR_CONTEXT) {
cur_max_fw_cmds = cur_max_fw_cmds - 1;
instance->max_fw_cmds = cur_max_fw_cmds;
instance->ldio_threshold = ldio_threshold;
- if (!instance->is_rdpq)
- instance->max_fw_cmds =
- min_t(u16, instance->max_fw_cmds, 1024);
-
if (reset_devices)
instance->max_fw_cmds = min(instance->max_fw_cmds,
(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
* does not exceed max cmds that the FW can support
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
-
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
- instance->cur_can_queue = instance->max_scsi_cmds;
- instance->host->can_queue = instance->cur_can_queue;
}
-
- if (instance->is_ventura)
- instance->max_mpt_cmds =
- instance->max_fw_cmds * RAID_1_PEER_CMDS;
- else
- instance->max_mpt_cmds = instance->max_fw_cmds;
}
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
struct fusion_context *fusion = instance->ctrl_context;
struct megasas_cmd_fusion *cmd;
- /* SG, Sense */
- for (i = 0; i < instance->max_mpt_cmds; i++) {
- cmd = fusion->cmd_list[i];
- if (cmd) {
- if (cmd->sg_frame)
- dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
- if (cmd->sense)
- dma_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ if (fusion->sense)
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+
+ /* SG */
+ if (fusion->cmd_list) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ dma_pool_free(fusion->sg_dma_pool,
+ cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ }
+ kfree(cmd);
}
+ kfree(fusion->cmd_list);
}
if (fusion->sg_dma_pool) {
dma_pool_destroy(fusion->io_request_frames_pool);
fusion->io_request_frames_pool = NULL;
}
-
-
- /* cmd_list */
- for (i = 0; i < instance->max_mpt_cmds; i++)
- kfree(fusion->cmd_list[i]);
-
- kfree(fusion->cmd_list);
}
/**
u16 max_cmd;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
+ int sense_sz;
+ u32 offset;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
-
+ sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
fusion->sg_dma_pool =
dma_pool_create("mr_sg", &instance->pdev->dev,
/* SCSI_SENSE_BUFFERSIZE = 96 bytes */
fusion->sense_dma_pool =
dma_pool_create("mr_sense", &instance->pdev->dev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ sense_sz, 64, 0);
if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
dev_err(&instance->pdev->dev,
return -ENOMEM;
}
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* sense buffer, request frame and reply desc pool requires to be in
+ * same 4 gb region. Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * Older allocation and pool will be destroyed.
+ * Alignment will be used such a way that next allocation if success,
+ * will always meet same 4gb region requirement.
+ * Actual requirement is not alignment, but we need start and end of
+ * DMA address must have same upper 32 bit address.
+ */
+
+ if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
+ sense_sz)) {
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+ fusion->sense = NULL;
+ dma_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sense_dma_pool =
+ dma_pool_create("mr_sense_align", &instance->pdev->dev,
+ sense_sz, roundup_pow_of_two(sense_sz),
+ 0);
+ if (!fusion->sense_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL,
+ &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
/*
* Allocate and attach a frame to each of the commands in cmd_list
*/
cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
GFP_KERNEL, &cmd->sg_frame_phys_addr);
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sg_frame || !cmd->sense) {
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
+ if (!cmd->sg_frame) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
/* create sense buffer for the raid 1/10 fp */
for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sense) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
}
return 0;
fusion = instance->ctrl_context;
- fusion->req_frames_desc =
- dma_alloc_coherent(&instance->pdev->dev,
- fusion->request_alloc_sz,
- &fusion->req_frames_desc_phys, GFP_KERNEL);
- if (!fusion->req_frames_desc) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
-
+ retry_alloc:
fusion->io_request_frames_pool =
dma_pool_create("mr_ioreq", &instance->pdev->dev,
fusion->io_frames_alloc_sz, 16, 0);
dma_pool_alloc(fusion->io_request_frames_pool,
GFP_KERNEL, &fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
+ if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
+ instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+ megasas_configure_queue_sizes(instance);
+ goto retry_alloc;
+ } else {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->io_request_frames_phys,
+ fusion->io_frames_alloc_sz)) {
+ dma_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ fusion->io_request_frames = NULL;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+
+ fusion->io_request_frames_pool =
+ dma_pool_create("mr_ioreq_align",
+ &instance->pdev->dev,
+ fusion->io_frames_alloc_sz,
+ roundup_pow_of_two(fusion->io_frames_alloc_sz),
+ 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+ if (!fusion->req_frames_desc) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
return 0;
}
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->reply_frames_desc_phys[0],
+ (fusion->reply_alloc_sz * count))) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+ fusion->reply_frames_desc[0] = NULL;
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+ fusion->reply_frames_desc_pool =
+ dma_pool_create("mr_reply_align",
+ &instance->pdev->dev,
+ fusion->reply_alloc_sz * count,
+ roundup_pow_of_two(fusion->reply_alloc_sz * count),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->reply_frames_desc[0] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL,
+ &fusion->reply_frames_desc_phys[0]);
+
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
- int i, j, count;
+ int i, j, k, msix_count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
+ dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
+ u8 dma_alloc_count, abs_index;
+ u32 chunk_size, array_size, offset;
fusion = instance->ctrl_context;
+ chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
+ array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
+ MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
+ &fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- memset(fusion->rdpq_virt, 0,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
- count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ memset(fusion->rdpq_virt, 0, array_size);
+ msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
- fusion->reply_alloc_sz,
- 16, 0);
-
- if (!fusion->reply_frames_desc_pool) {
+ chunk_size, 16, 0);
+ fusion->reply_frames_desc_pool_align =
+ dma_pool_create("mr_rdpq_align",
+ &instance->pdev->dev,
+ chunk_size,
+ roundup_pow_of_two(chunk_size),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool ||
+ !fusion->reply_frames_desc_pool_align) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- for (i = 0; i < count; i++) {
- fusion->reply_frames_desc[i] =
- dma_pool_alloc(fusion->reply_frames_desc_pool,
- GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
- if (!fusion->reply_frames_desc[i]) {
+ /*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
+ * within 4GB boundary and also reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating the
+ * DMA'able memory for reply queues according. Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+
+ for (i = 0; i < dma_alloc_count; i++) {
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+ /* reply desc pool requires to be in same 4 gb region.
+ * Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * For RDPQ buffers, driver always allocate two separate pci pool.
+ * Alignment will be used such a way that next allocation if
+ * success, will always meet same 4gb region requirement.
+ * rdpq_tracker keep track of each buffer's physical,
+ * virtual address and pci pool descriptor. It will help driver
+ * while freeing the resources.
+ *
+ */
+ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
+ chunk_size)) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ rdpq_chunk_virt[i],
+ rdpq_chunk_phys[i]);
- fusion->rdpq_virt[i].RDPQBaseAddress =
- cpu_to_le64(fusion->reply_frames_desc_phys[i]);
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool_align,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool_align;
+ } else {
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool;
+ }
- reply_desc = fusion->reply_frames_desc[i];
- for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
+ fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
}
+
+ for (k = 0; k < dma_alloc_count; k++) {
+ for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
+ abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
+
+ if (abs_index == msix_count)
+ break;
+ offset = fusion->reply_alloc_sz * i;
+ fusion->rdpq_virt[abs_index].RDPQBaseAddress =
+ cpu_to_le64(rdpq_chunk_phys[k] + offset);
+ fusion->reply_frames_desc_phys[abs_index] =
+ rdpq_chunk_phys[k] + offset;
+ fusion->reply_frames_desc[abs_index] =
+ (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
+
+ reply_desc = fusion->reply_frames_desc[abs_index];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+ }
+ }
+
return 0;
}
fusion = instance->ctrl_context;
- for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
- if (fusion->reply_frames_desc[i])
- dma_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc[i],
- fusion->reply_frames_desc_phys[i]);
+ for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
+ if (fusion->rdpq_tracker[i].pool_entry_virt)
+ dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
+ fusion->rdpq_tracker[i].pool_entry_virt,
+ fusion->rdpq_tracker[i].pool_entry_phys);
+
}
if (fusion->reply_frames_desc_pool)
dma_pool_destroy(fusion->reply_frames_desc_pool);
+ if (fusion->reply_frames_desc_pool_align)
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
pci_free_consistent(instance->pdev,
fusion = instance->ctrl_context;
- if (megasas_alloc_cmdlist_fusion(instance))
- goto fail_exit;
-
if (megasas_alloc_request_fusion(instance))
goto fail_exit;
if (megasas_alloc_reply_fusion(instance))
goto fail_exit;
+ if (megasas_alloc_cmdlist_fusion(instance))
+ goto fail_exit;
+
+ dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+ instance->max_fw_cmds);
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
+ struct timeval tv;
+ bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
- cmd = megasas_get_cmd(instance);
+ ioc_init_handle = fusion->ioc_init_request_phys;
+ IOCInitMessage = fusion->ioc_init_request;
- if (!cmd) {
- dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
- ret = 1;
- goto fail_get_cmd;
- }
+ cmd = fusion->ioc_init_cmd;
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ if (instance->adapter_type == INVADER_SERIES) {
+ cur_fw_64bit_dma_capable =
+ (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+
+ if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
+ dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
+ "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
+ megaraid_sas_kill_hba(instance);
+ ret = 1;
+ goto fail_fw_init;
+ }
+ }
+
if (instance->is_rdpq && !cur_rdpq_mode) {
dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
" from RDPQ mode to non RDPQ mode\n");
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
- IOCInitMessage =
- dma_alloc_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- &ioc_init_handle, GFP_KERNEL);
-
- if (!IOCInitMessage) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "IOCInitMessage\n");
- ret = 1;
- goto fail_fw_init;
- }
-
memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
IOCInitMessage->MsgFlags = instance->is_rdpq ?
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
+
+ do_gettimeofday(&tv);
+ /* Convert to milliseconds as per FW requirement */
+ IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
+ (tv.tv_usec / 1000));
+
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
/* driver support Extended MSIX */
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
drv_ops->mfi_capabilities.support_additional_msix = 1;
/* driver supports HA / Remote LUN over Fast Path interface */
drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+
+ if (instance->consistent_mask_64bit)
+ drv_ops->mfi_capabilities.support_64bit_mode = 1;
+
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
strlen(sys_info) > 64 ? 64 : strlen(sys_info));
instance->system_info_buf->systemIdLength =
strlen(sys_info) > 64 ? 64 : strlen(sys_info);
- init_frame->system_info_lo = instance->system_info_h;
- init_frame->system_info_hi = 0;
+ init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
+ init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
}
init_frame->queue_info_new_phys_addr_hi =
ret = 0;
fail_fw_init:
- megasas_return_cmd(instance, cmd);
- if (IOCInitMessage)
- dma_free_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- IOCInitMessage, ioc_init_handle);
- fail_get_cmd:
dev_err(&instance->pdev->dev,
"Init cmd return status %s for SCSI host %d\n",
ret ? "FAILED" : "SUCCESS", instance->host->host_no);
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ instance->jbod_seq_cmd = cmd;
+ } else {
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ }
+
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
+
+ megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
if (pend) {
- dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
- instance->jbod_seq_cmd = cmd;
instance->instancet->issue_dcmd(instance, cmd);
return 0;
}
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
-
/* Below code is only for non pended DCMD */
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
ret = -EINVAL;
}
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
if (ret == DCMD_SUCCESS)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
+
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
megasas_return_cmd(instance, cmd);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
instance->map_update_cmd = cmd;
return -ENOMEM;
}
+ /**
+ * megasas_configure_queue_sizes - Calculate size of request desc queue,
+ * reply desc queue,
+ * IO request frame queue, set can_queue.
+ * @instance: Adapter soft state
+ * @return: void
+ */
+ static inline
+ void megasas_configure_queue_sizes(struct megasas_instance *instance)
+ {
+ struct fusion_context *fusion;
+ u16 max_cmd;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ if (instance->adapter_type == VENTURA_SERIES)
+ instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
+ else
+ instance->max_mpt_cmds = instance->max_fw_cmds;
+
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->cur_can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
+
+ fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
+
+ fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
+ instance->max_mpt_cmds;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
+ (fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+ * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+ }
+
+ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
+ {
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd;
+
+ fusion = instance->ctrl_context;
+
+ cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ &cmd->frame_phys_addr, GFP_KERNEL);
+
+ if (!cmd->frame) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ fusion->ioc_init_cmd = cmd;
+ return 0;
+ }
+
+ /**
+ * megasas_free_ioc_init_cmd - Free IOC INIT command frame
+ * @instance: Adapter soft state
+ */
+ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
+ {
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
+ dma_free_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ fusion->ioc_init_cmd->frame,
+ fusion->ioc_init_cmd->frame_phys_addr);
+
+ if (fusion->ioc_init_cmd)
+ kfree(fusion->ioc_init_cmd);
+ }
+
/**
* megasas_init_adapter_fusion - Initializes the FW
* @instance: Adapter soft state
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u16 max_cmd;
u32 scratch_pad_2;
int i = 0, count;
instance->max_mfi_cmds =
MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
- max_cmd = instance->max_fw_cmds;
-
- fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
-
- fusion->request_alloc_sz =
- sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
- fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
- *(fusion->reply_q_depth);
- fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
- (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
- * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+ megasas_configure_queue_sizes(instance);
scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ if (megasas_alloc_ioc_init_frame(instance))
+ return 1;
+
/*
* Allocate memory for descriptors
* Create a pool of commands
fail_alloc_cmds:
megasas_free_cmds(instance);
fail_alloc_mfi_cmds:
+ megasas_free_ioc_init_cmd(instance);
return 1;
}
fusion = instance->ctrl_context;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
sgl_ptr_end->Flags = 0;
sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
if (i == sge_count - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
sgl_ptr++;
(sge_count > fusion->max_sge_in_main_msg)) {
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
if ((le16_to_cpu(cmd->io_request->IoFlags) &
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
sg_chain = sgl_ptr;
/* Prepare chain element */
sg_chain->NextChainOffset = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags =
praid_context = &io_request->RaidContext;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
megasas_stream_detect(instance, cmd, &io_info);
spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_request->RaidContext.raid_context.reg_lock_flags ==
REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
io_request->RaidContext.raid_context.reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.nseg_type |=
(1 << RAID_CONTEXT_NSEG_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
&io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
io_request->RaidContext.raid_context_g35.span_arm
= io_info.span_arm;
else
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
else
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
(io_request->RaidContext.raid_context.reg_lock_flags
== REGION_TYPE_UNUSED))
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
io_request->RaidContext.raid_context.nseg = 0x1;
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
/* set RAID context values */
pRAID_Context->config_seq_num = raid->seqNum;
- if (!instance->is_ventura)
+ if (instance->adapter_type != VENTURA_SERIES)
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
pRAID_Context->timeout_value =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
return 1;
}
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLowAddress =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
- cpu_to_le32(r1_cmd->sense_phys_addr);
+ cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
r1_cmd->scmd = cmd->scmd;
req_desc2 = megasas_get_request_descriptor(instance,
(r1_cmd->index - 1));
io_req = cmd->io_request;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
void
megasas_release_fusion(struct megasas_instance *instance)
{
+ megasas_free_ioc_init_cmd(instance);
megasas_free_cmds(instance);
megasas_free_cmds_fusion(instance);
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
/*check for extra commands issued by driver*/
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
megasas_return_cmd_fusion(instance, r1_cmd);
}
megasas_set_dynamic_target_properties(sdev);
/* reset stream detection array */
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
0, sizeof(struct LD_STREAM_DETECT));
/* Restart SR-IOV heartbeat */
if (instance->requestorId) {
if (!megasas_sriov_start_heartbeat(instance, 0))
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
else
instance->skip_heartbeat_timer_del = 1;
}
} else {
/* For VF: Restart HB timer if we didn't OCR */
if (instance->requestorId) {
- megasas_start_timer(instance,
- &instance->sriov_heartbeat_timer,
- megasas_sriov_heartbeat_handler,
- MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
+ megasas_start_timer(instance);
}
clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
instance->instancet->enable_intr(instance);
{
struct fusion_context *fusion;
- instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
- instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- instance->ctrl_context_pages);
+ instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
+ GFP_KERNEL);
if (!instance->ctrl_context) {
- /* fall back to using vmalloc for fusion_context */
- instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
- if (!instance->ctrl_context) {
- dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
}
fusion = instance->ctrl_context;
+ fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ fusion->log_to_span =
+ (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ fusion->log_to_span_pages);
+ if (!fusion->log_to_span) {
+ fusion->log_to_span = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ if (!fusion->log_to_span) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
sizeof(struct LD_LOAD_BALANCE_INFO));
fusion->load_balance_info =
fusion->load_balance_info_pages);
}
- if (is_vmalloc_addr(fusion))
- vfree(fusion);
- else
- free_pages((ulong)fusion,
- instance->ctrl_context_pages);
+ if (fusion->log_to_span) {
+ if (is_vmalloc_addr(fusion->log_to_span))
+ vfree(fusion->log_to_span);
+ else
+ free_pages((ulong)fusion->log_to_span,
+ fusion->log_to_span_pages);
+ }
+
+ kfree(fusion);
}
}
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.42
+ * mpi2.h Version: 02.00.48
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
* 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT.
* 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT
* 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT
+ * 04-05-16 02.00.43 Modified MPI26_DIAG_BOOT_DEVICE_SELECT defines
+ * to be unique within first 32 characters.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-10-16 02.00.44 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 07-06-16 02.00.45 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 09-02-16 02.00.46 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-23-16 02.00.47 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 02-03-17 02.00.48 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
#define MPI2_VERSION_02_06 (0x0206)
/*Unit and Dev versioning for this MPI header set */
- #define MPI2_HEADER_VERSION_UNIT (0x2A)
+ #define MPI2_HEADER_VERSION_UNIT (0x30)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+ /* Defines for V7A/V7R HostDiagnostic Register */
+ #define MPI26_DIAG_BOOT_DEVICE_SEL_64FLASH (0x00000000)
+ #define MPI26_DIAG_BOOT_DEVICE_SEL_64HCDW (0x00000800)
+ #define MPI26_DIAG_BOOT_DEVICE_SEL_32FLASH (0x00001000)
+ #define MPI26_DIAG_BOOT_DEVICE_SEL_32HCDW (0x00001800)
+
#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
#define MPI2_DIAG_HCB_MODE (0x00000100)
#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+ #define MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED (0x10)
#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
Mpi25FastPathSCSIIORequestDescriptor_t,
*pMpi25FastPathSCSIIORequestDescriptor_t;
+ /*PCIe Encapsulated Request Descriptor */
+ typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
+ *PTR_MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR,
+ Mpi26PCIeEncapsulatedRequestDescriptor_t,
+ *pMpi26PCIeEncapsulatedRequestDescriptor_t;
+
/*union of Request Descriptors */
typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ MPI26_PCIE_ENCAPSULATED_REQUEST_DESCRIPTOR PCIeEncapsulated;
U64 Words;
} MPI2_REQUEST_DESCRIPTOR_UNION,
*PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
* Atomic SCSI Target Request Descriptor
* Atomic RAID Accelerator Request Descriptor
* Atomic Fast Path SCSI IO Request Descriptor
+ * Atomic PCIe Encapsulated Request Descriptor
*/
/*Atomic Request Descriptor */
#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+ #define MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS (0x08)
#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
/*values for marking a reply descriptor as unused */
Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
*pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+ /*PCIe Encapsulated Success Reply Descriptor */
+ typedef MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
+ MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi26PCIeEncapsulatedSuccessReplyDescriptor_t,
+ *pMpi26PCIeEncapsulatedSuccessReplyDescriptor_t;
+
/*union of Reply Descriptors */
typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ MPI26_PCIE_ENCAPSULATED_SUCCESS_REPLY_DESCRIPTOR
+ PCIeEncapsulatedSuccess;
U64 Words;
} MPI2_REPLY_DESCRIPTORS_UNION,
*PTR_MPI2_REPLY_DESCRIPTORS_UNION,
#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+ #define MPI2_FUNCTION_NVME_ENCAPSULATED (0x33)
#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
#define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
+ #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
+ #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
/*Data Location Address Space */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.35
+ * mpi2_cnfg.h Version: 02.00.40
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
* MPI2_CONFIG_PAGE_BIOS_1.
* 08-25-15 02.00.34 Bumped Header Version.
* 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4.
+ * 01-21-16 02.00.36 Added/modified MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added Link field to PCIe Link Pages
+ * Added EnclosureLevel and ConnectorName to PCIe
+ * Device Page 0.
+ * Added define for PCIE IoUnit page 1 max rate shift.
+ * Added comment for reserved ExtPageTypes.
+ * Added SAS 4 22.5 gbs speed support.
+ * Added PCIe 4 16.0 GT/sec speec support.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * Added NegotiatedLinkRate and NegotiatedPortWidth to
+ * PCIe device page 0.
+ * 04-10-16 02.00.37 Fixed MPI2_MFGPAGE_DEVID_SAS3616/3708 defines
+ * 07-01-16 02.00.38 Added Manufacturing page 7 Connector types.
+ * Changed declaration of ConnectorName in PCIe DevicePage0
+ * to match SAS DevicePage 0.
+ * Added SATADeviceWaitTime to IO Unit Page 11.
+ * Added MPI26_MFGPAGE_DEVID_SAS4008
+ * Added x16 PCIe width to IO Unit Page 7
+ * Added LINKFLAGS to control SRIS in PCIe IO Unit page 1
+ * phy data.
+ * Added InitStatus to PCIe IO Unit Page 1 header.
+ * 09-01-16 02.00.39 Added MPI26_CONFIG_PAGE_ENCLOSURE_0 and related defines.
+ * Added MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE and
+ * MPI26_ENCLOS_PGAD_FORM_HANDLE page address formats.
+ * 02-02-17 02.00.40 Added MPI2_MANPAGE7_SLOT_UNKNOWN.
+ * Added ChassisSlot field to SAS Enclosure Page 0.
+ * Added ChassisSlot Valid bit (bit 5) to the Flags field
+ * in SAS Enclosure Page 0.
* --------------------------------------------------------------------------
*/
#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+ #define MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT (0x1B)
+ #define MPI2_CONFIG_EXTPAGETYPE_PCIE_SWITCH (0x1C)
+ #define MPI2_CONFIG_EXTPAGETYPE_PCIE_DEVICE (0x1D)
+ #define MPI2_CONFIG_EXTPAGETYPE_PCIE_LINK (0x1E)
/*****************************************************************************
#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+ /*Enclosure PageAddress format */
+ #define MPI26_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+ #define MPI26_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+ #define MPI26_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+ #define MPI26_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
/*RAID Configuration PageAddress format */
#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+ /*PCIe Switch PageAddress format */
+ #define MPI26_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000)
+ #define MPI26_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+ #define MPI26_PCIE_SWITCH_PGAD_FORM_HNDL_PORTNUM (0x10000000)
+ #define MPI26_PCIE_SWITCH_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+ #define MPI26_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF)
+ #define MPI26_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000)
+ #define MPI26_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
+
+
+ /*PCIe Device PageAddress format */
+ #define MPI26_PCIE_DEVICE_PGAD_FORM_MASK (0xF0000000)
+ #define MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+ #define MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+ #define MPI26_PCIE_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+ /*PCIe Link PageAddress format */
+ #define MPI26_PCIE_LINK_PGAD_FORM_MASK (0xF0000000)
+ #define MPI26_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
+ #define MPI26_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
+
+ #define MPI26_PCIE_DEVICE_PGAD_LINKNUM_MASK (0x000000FF)
+
+
+
/****************************************************************************
* Configuration messages
****************************************************************************/
#define MPI26_MFGPAGE_DEVID_SAS3508 (0x00AD)
#define MPI26_MFGPAGE_DEVID_SAS3508_1 (0x00AE)
#define MPI26_MFGPAGE_DEVID_SAS3408 (0x00AF)
+ #define MPI26_MFGPAGE_DEVID_SAS3716 (0x00D0)
+ #define MPI26_MFGPAGE_DEVID_SAS3616 (0x00D1)
+ #define MPI26_MFGPAGE_DEVID_SAS3708 (0x00D2)
+
+ #define MPI26_MFGPAGE_DEVID_SAS4008 (0x00A1)
+
/*Manufacturing Page 0 */
#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+ #define MPI2_MANPAGE7_PINOUT_SFF_8088_A (0x0E)
+ #define MPI2_MANPAGE7_PINOUT_SFF_8643_16i (0x0F)
+ #define MPI2_MANPAGE7_PINOUT_SFF_8654_4i (0x10)
+ #define MPI2_MANPAGE7_PINOUT_SFF_8654_8i (0x11)
+ #define MPI2_MANPAGE7_PINOUT_SFF_8611_4i (0x12)
+ #define MPI2_MANPAGE7_PINOUT_SFF_8611_8i (0x13)
/*defines for the Location field */
#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+ /*defines for the Slot field */
+ #define MPI2_MANPAGE7_SLOT_UNKNOWN (0xFFFF)
+
/*
*Host code (drivers, BIOS, utilities, etc.) should leave this define set to
*one and check the value returned for NumPhys at runtime.
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+ #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X16 (0x10)
/*defines for IO Unit Page 7 PCIeSpeed field */
#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+ #define MPI2_IOUNITPAGE7_PCIE_SPEED_16_0_GBPS (0x03)
/*defines for IO Unit Page 7 ProcessorState field */
#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+ #define MPI26_SAS_NEG_LINK_RATE_22_5 (0x0C)
/*values for AttachedPhyInfo fields */
#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
+ #define MPI26_SAS_PRATE_MAX_RATE_22_5 (0xC0)
#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+ #define MPI26_SAS_PRATE_MIN_RATE_22_5 (0x0C)
/*values for SAS HwLinkRate fields */
#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
+ #define MPI26_SAS_HWRATE_MAX_RATE_22_5 (0xC0)
#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+ #define MPI26_SAS_HWRATE_MIN_RATE_22_5 (0x0C)
#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+ #define MPI26_SASIOUNIT1_MAX_RATE_22_5 (0xC0)
#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+ #define MPI26_SASIOUNIT1_MIN_RATE_22_5 (0x0C)
/*see mpi2_sas.h for values for
*SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
/*SAS Enclosure Page 0 */
typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
- MPI2_CONFIG_EXTENDED_PAGE_HEADER
- Header; /*0x00 */
- U32
- Reserved1; /*0x08 */
- U64
- EnclosureLogicalID; /*0x0C */
- U16
- Flags; /*0x14 */
- U16
- EnclosureHandle; /*0x16 */
- U16
- NumSlots; /*0x18 */
- U16
- StartSlot; /*0x1A */
- U8
- Reserved2; /*0x1C */
- U8
- EnclosureLevel; /*0x1D */
- U16
- SEPDevHandle; /*0x1E */
- U32
- Reserved3; /*0x20 */
- U32
- Reserved4; /*0x24 */
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U64 EnclosureLogicalID; /*0x0C */
+ U16 Flags; /*0x14 */
+ U16 EnclosureHandle; /*0x16 */
+ U16 NumSlots; /*0x18 */
+ U16 StartSlot; /*0x1A */
+ U8 ChassisSlot; /*0x1C */
+ U8 EnclosureLeve; /*0x1D */
+ U16 SEPDevHandle; /*0x1E */
+ U32 Reserved3; /*0x20 */
+ U32 Reserved4; /*0x24 */
} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
*PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
- Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t,
+ MPI26_CONFIG_PAGE_ENCLOSURE_0,
+ *PTR_MPI26_CONFIG_PAGE_ENCLOSURE_0,
+ Mpi26EnclosurePage0_t, *pMpi26EnclosurePage0_t;
#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04)
/*values for SAS Enclosure Page 0 Flags field */
+ #define MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+ #define MPI26_ENCLOSURE0_PAGEVERSION (0x04)
+
+ /*Values for Enclosure Page 0 Flags field */
+ #define MPI26_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
+ #define MPI26_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010)
+ #define MPI26_ENCLS0_FLAGS_MNG_MASK (0x000F)
+ #define MPI26_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+ #define MPI26_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+ #define MPI26_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+ #define MPI26_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+ #define MPI26_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+ #define MPI26_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
/****************************************************************************
* Log Config Page
/*PageVersion should be provided by product-specific code */
+
+
+ /****************************************************************************
+ * values for fields used by several types of PCIe Config Pages
+ ****************************************************************************/
+
+ /*values for NegotiatedLinkRates fields */
+ #define MPI26_PCIE_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+ /*link rates used for Negotiated Physical Link Rate */
+ #define MPI26_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
+ #define MPI26_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
+ #define MPI26_PCIE_NEG_LINK_RATE_2_5 (0x02)
+ #define MPI26_PCIE_NEG_LINK_RATE_5_0 (0x03)
+ #define MPI26_PCIE_NEG_LINK_RATE_8_0 (0x04)
+ #define MPI26_PCIE_NEG_LINK_RATE_16_0 (0x05)
+
+
+ /****************************************************************************
+ * PCIe IO Unit Config Pages (MPI v2.6 and later)
+ ****************************************************************************/
+
+ /*PCIe IO Unit Page 0 */
+
+ typedef struct _MPI26_PCIE_IO_UNIT0_PHY_DATA {
+ U8 Link; /*0x00 */
+ U8 LinkFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 EnumerationStatus; /*0x0C */
+ U32 Reserved1; /*0x10 */
+ } MPI26_PCIE_IO_UNIT0_PHY_DATA,
+ *PTR_MPI26_PCIE_IO_UNIT0_PHY_DATA,
+ Mpi26PCIeIOUnit0PhyData_t, *pMpi26PCIeIOUnit0PhyData_t;
+
+ /*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+ #ifndef MPI26_PCIE_IOUNIT0_PHY_MAX
+ #define MPI26_PCIE_IOUNIT0_PHY_MAX (1)
+ #endif
+
+ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 InitStatus; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI26_PCIE_IO_UNIT0_PHY_DATA
+ PhyData[MPI26_PCIE_IOUNIT0_PHY_MAX]; /*0x10 */
+ } MPI26_CONFIG_PAGE_PIOUNIT_0,
+ *PTR_MPI26_CONFIG_PAGE_PIOUNIT_0,
+ Mpi26PCIeIOUnitPage0_t, *pMpi26PCIeIOUnitPage0_t;
+
+ #define MPI26_PCIEIOUNITPAGE0_PAGEVERSION (0x00)
+
+ /*values for PCIe IO Unit Page 0 LinkFlags */
+ #define MPI26_PCIEIOUNIT0_LINKFLAGS_ENUMERATION_IN_PROGRESS (0x08)
+
+ /*values for PCIe IO Unit Page 0 PhyFlags */
+ #define MPI26_PCIEIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+ /*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+ /*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
+ *values
+ */
+
+ /*values for PCIe IO Unit Page 0 EnumerationStatus */
+ #define MPI26_PCIEIOUNIT0_ES_MAX_SWITCHES_EXCEEDED (0x40000000)
+ #define MPI26_PCIEIOUNIT0_ES_MAX_DEVICES_EXCEEDED (0x20000000)
+
+
+ /*PCIe IO Unit Page 1 */
+
+ typedef struct _MPI26_PCIE_IO_UNIT1_PHY_DATA {
+ U8 Link; /*0x00 */
+ U8 LinkFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U32 Reserved1; /*0x08 */
+ } MPI26_PCIE_IO_UNIT1_PHY_DATA,
+ *PTR_MPI26_PCIE_IO_UNIT1_PHY_DATA,
+ Mpi26PCIeIOUnit1PhyData_t, *pMpi26PCIeIOUnit1PhyData_t;
+
+ /*values for LinkFlags */
+ #define MPI26_PCIEIOUNIT1_LINKFLAGS_DIS_SRIS (0x00)
+ #define MPI26_PCIEIOUNIT1_LINKFLAGS_EN_SRIS (0x01)
+
+ /*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+ #ifndef MPI26_PCIE_IOUNIT1_PHY_MAX
+ #define MPI26_PCIE_IOUNIT1_PHY_MAX (1)
+ #endif
+
+ typedef struct _MPI26_CONFIG_PAGE_PIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 ControlFlags; /*0x08 */
+ U16 Reserved; /*0x0A */
+ U16 AdditionalControlFlags; /*0x0C */
+ U16 NVMeMaxQueueDepth; /*0x0E */
+ U8 NumPhys; /*0x10 */
+ U8 Reserved1; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ MPI26_PCIE_IO_UNIT1_PHY_DATA
+ PhyData[MPI26_PCIE_IOUNIT1_PHY_MAX];/*0x14 */
+ } MPI26_CONFIG_PAGE_PIOUNIT_1,
+ *PTR_MPI26_CONFIG_PAGE_PIOUNIT_1,
+ Mpi26PCIeIOUnitPage1_t, *pMpi26PCIeIOUnitPage1_t;
+
+ #define MPI26_PCIEIOUNITPAGE1_PAGEVERSION (0x00)
+
+ /*values for PCIe IO Unit Page 1 PhyFlags */
+ #define MPI26_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+ #define MPI26_PCIEIOUNIT1_PHYFLAGS_ENDPOINT_ONLY (0x01)
+
+ /*values for PCIe IO Unit Page 1 MaxMinLinkRate */
+ #define MPI26_PCIEIOUNIT1_MAX_RATE_MASK (0xF0)
+ #define MPI26_PCIEIOUNIT1_MAX_RATE_SHIFT (4)
+ #define MPI26_PCIEIOUNIT1_MAX_RATE_2_5 (0x20)
+ #define MPI26_PCIEIOUNIT1_MAX_RATE_5_0 (0x30)
+ #define MPI26_PCIEIOUNIT1_MAX_RATE_8_0 (0x40)
+ #define MPI26_PCIEIOUNIT1_MAX_RATE_16_0 (0x50)
+
+ /*see mpi2_pci.h for values for PCIe IO Unit Page 0 ControllerPhyDeviceInfo
+ *values
+ */
+
+
+ /****************************************************************************
+ * PCIe Switch Config Pages (MPI v2.6 and later)
+ ****************************************************************************/
+
+ /*PCIe Switch Page 0 */
+
+ typedef struct _MPI26_CONFIG_PAGE_PSWITCH_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 PhysicalPort; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 DevHandle; /*0x0C */
+ U16 ParentDevHandle; /*0x0E */
+ U8 NumPorts; /*0x10 */
+ U8 PCIeLevel; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+ } MPI26_CONFIG_PAGE_PSWITCH_0, *PTR_MPI26_CONFIG_PAGE_PSWITCH_0,
+ Mpi26PCIeSwitchPage0_t, *pMpi26PCIeSwitchPage0_t;
+
+ #define MPI26_PCIESWITCH0_PAGEVERSION (0x00)
+
+
+ /*PCIe Switch Page 1 */
+
+ typedef struct _MPI26_CONFIG_PAGE_PSWITCH_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 PhysicalPort; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumPorts; /*0x0C */
+ U8 PortNum; /*0x0D */
+ U16 AttachedDevHandle; /*0x0E */
+ U16 SwitchDevHandle; /*0x10 */
+ U8 NegotiatedPortWidth; /*0x12 */
+ U8 NegotiatedLinkRate; /*0x13 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ } MPI26_CONFIG_PAGE_PSWITCH_1, *PTR_MPI26_CONFIG_PAGE_PSWITCH_1,
+ Mpi26PCIeSwitchPage1_t, *pMpi26PCIeSwitchPage1_t;
+
+ #define MPI26_PCIESWITCH1_PAGEVERSION (0x00)
+
+ /*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+ /****************************************************************************
+ * PCIe Device Config Pages (MPI v2.6 and later)
+ ****************************************************************************/
+
+ /*PCIe Device Page 0 */
+
+ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U64 WWID; /*0x0C */
+ U16 ParentDevHandle; /*0x14 */
+ U8 PortNum; /*0x16 */
+ U8 AccessStatus; /*0x17 */
+ U16 DevHandle; /*0x18 */
+ U8 PhysicalPort; /*0x1A */
+ U8 Reserved1; /*0x1B */
+ U32 DeviceInfo; /*0x1C */
+ U32 Flags; /*0x20 */
+ U8 SupportedLinkRates; /*0x24 */
+ U8 MaxPortWidth; /*0x25 */
+ U8 NegotiatedPortWidth; /*0x26 */
+ U8 NegotiatedLinkRate; /*0x27 */
+ U8 EnclosureLevel; /*0x28 */
+ U8 Reserved2; /*0x29 */
+ U16 Reserved3; /*0x2A */
+ U8 ConnectorName[4]; /*0x2C */
+ U32 Reserved4; /*0x30 */
+ U32 Reserved5; /*0x34 */
+ } MPI26_CONFIG_PAGE_PCIEDEV_0, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_0,
+ Mpi26PCIeDevicePage0_t, *pMpi26PCIeDevicePage0_t;
+
+ #define MPI26_PCIEDEVICE0_PAGEVERSION (0x01)
+
+ /*values for PCIe Device Page 0 AccessStatus field */
+ #define MPI26_PCIEDEV0_ASTATUS_NO_ERRORS (0x00)
+ #define MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION (0x04)
+ #define MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED (0x02)
+ #define MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED (0x07)
+ #define MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED (0x08)
+ #define MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE (0x09)
+ #define MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED (0x0A)
+ #define MPI26_PCIEDEV0_ASTATUS_UNKNOWN (0x10)
+
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT (0x30)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x31)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED (0x32)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED (0x33)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED (0x34)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED (0x35)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x36)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT (0x37)
+ #define MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS (0x38)
+
+ #define MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX (0x3F)
+
+ /*see mpi2_pci.h for the MPI26_PCIE_DEVINFO_ defines used for the DeviceInfo
+ *field
+ */
+
+ /*values for PCIe Device Page 0 Flags field */
+ #define MPI26_PCIEDEV0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+ #define MPI26_PCIEDEV0_FLAGS_ENABLED_FAST_PATH (0x4000)
+ #define MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+ #define MPI26_PCIEDEV0_FLAGS_ASYNCHRONOUS_NOTIFICATION (0x0400)
+ #define MPI26_PCIEDEV0_FLAGS_ATA_SW_PRESERVATION (0x0200)
+ #define MPI26_PCIEDEV0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+ #define MPI26_PCIEDEV0_FLAGS_ATA_48BIT_LBA_SUPPORTED (0x0080)
+ #define MPI26_PCIEDEV0_FLAGS_ATA_SMART_SUPPORTED (0x0040)
+ #define MPI26_PCIEDEV0_FLAGS_ATA_NCQ_SUPPORTED (0x0020)
+ #define MPI26_PCIEDEV0_FLAGS_ATA_FUA_SUPPORTED (0x0010)
+ #define MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID (0x0002)
+ #define MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT (0x0001)
+
+ /* values for PCIe Device Page 0 SupportedLinkRates field */
+ #define MPI26_PCIEDEV0_LINK_RATE_16_0_SUPPORTED (0x08)
+ #define MPI26_PCIEDEV0_LINK_RATE_8_0_SUPPORTED (0x04)
+ #define MPI26_PCIEDEV0_LINK_RATE_5_0_SUPPORTED (0x02)
+ #define MPI26_PCIEDEV0_LINK_RATE_2_5_SUPPORTED (0x01)
+
+ /*use MPI26_PCIE_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+ /*PCIe Device Page 2 */
+
+ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x08 */
+ U16 Reserved1; /*0x0A */
+ U32 MaximumDataTransferSize;/*0x0C */
+ U32 Capabilities; /*0x10 */
+ U32 Reserved2; /*0x14 */
+ } MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2,
+ Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t;
+
+ #define MPI26_PCIEDEVICE2_PAGEVERSION (0x00)
+
+ /*defines for PCIe Device Page 2 Capabilities field */
+ #define MPI26_PCIEDEV2_CAP_SGL_FORMAT (0x00000004)
+ #define MPI26_PCIEDEV2_CAP_BIT_BUCKET_SUPPORT (0x00000002)
+ #define MPI26_PCIEDEV2_CAP_SGL_SUPPORT (0x00000001)
+
+
+ /****************************************************************************
+ * PCIe Link Config Pages (MPI v2.6 and later)
+ ****************************************************************************/
+
+ /*PCIe Link Page 1 */
+
+ typedef struct _MPI26_CONFIG_PAGE_PCIELINK_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 CorrectableErrorCount; /*0x0C */
+ U16 NonFatalErrorCount; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U16 FatalErrorCount; /*0x14 */
+ U16 Reserved4; /*0x16 */
+ } MPI26_CONFIG_PAGE_PCIELINK_1, *PTR_MPI26_CONFIG_PAGE_PCIELINK_1,
+ Mpi26PcieLinkPage1_t, *pMpi26PcieLinkPage1_t;
+
+ #define MPI26_PCIELINK1_PAGEVERSION (0x00)
+
+ /*PCIe Link Page 2 */
+
+ typedef struct _MPI26_PCIELINK2_LINK_EVENT {
+ U8 LinkEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 LinkEventInfo; /*0x04 */
+ } MPI26_PCIELINK2_LINK_EVENT, *PTR_MPI26_PCIELINK2_LINK_EVENT,
+ Mpi26PcieLink2LinkEvent_t, *pMpi26PcieLink2LinkEvent_t;
+
+ /*use MPI26_PCIELINK3_EVTCODE_ for the LinkEventCode field */
+
+
+ /*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLinkEvents at runtime.
+ */
+ #ifndef MPI26_PCIELINK2_LINK_EVENT_MAX
+ #define MPI26_PCIELINK2_LINK_EVENT_MAX (1)
+ #endif
+
+ typedef struct _MPI26_CONFIG_PAGE_PCIELINK_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumLinkEvents; /*0x0C */
+ U8 Reserved3; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ MPI26_PCIELINK2_LINK_EVENT
+ LinkEvent[MPI26_PCIELINK2_LINK_EVENT_MAX]; /*0x10 */
+ } MPI26_CONFIG_PAGE_PCIELINK_2, *PTR_MPI26_CONFIG_PAGE_PCIELINK_2,
+ Mpi26PcieLinkPage2_t, *pMpi26PcieLinkPage2_t;
+
+ #define MPI26_PCIELINK2_PAGEVERSION (0x00)
+
+ /*PCIe Link Page 3 */
+
+ typedef struct _MPI26_PCIELINK3_LINK_EVENT_CONFIG {
+ U8 LinkEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+ } MPI26_PCIELINK3_LINK_EVENT_CONFIG, *PTR_MPI26_PCIELINK3_LINK_EVENT_CONFIG,
+ Mpi26PcieLink3LinkEventConfig_t, *pMpi26PcieLink3LinkEventConfig_t;
+
+ /*values for LinkEventCode field */
+ #define MPI26_PCIELINK3_EVTCODE_NO_EVENT (0x00)
+ #define MPI26_PCIELINK3_EVTCODE_CORRECTABLE_ERROR_RECEIVED (0x01)
+ #define MPI26_PCIELINK3_EVTCODE_NON_FATAL_ERROR_RECEIVED (0x02)
+ #define MPI26_PCIELINK3_EVTCODE_FATAL_ERROR_RECEIVED (0x03)
+ #define MPI26_PCIELINK3_EVTCODE_DATA_LINK_ERROR_DETECTED (0x04)
+ #define MPI26_PCIELINK3_EVTCODE_TRANSACTION_LAYER_ERROR_DETECTED (0x05)
+ #define MPI26_PCIELINK3_EVTCODE_TLP_ECRC_ERROR_DETECTED (0x06)
+ #define MPI26_PCIELINK3_EVTCODE_POISONED_TLP (0x07)
+ #define MPI26_PCIELINK3_EVTCODE_RECEIVED_NAK_DLLP (0x08)
+ #define MPI26_PCIELINK3_EVTCODE_SENT_NAK_DLLP (0x09)
+ #define MPI26_PCIELINK3_EVTCODE_LTSSM_RECOVERY_STATE (0x0A)
+ #define MPI26_PCIELINK3_EVTCODE_LTSSM_RXL0S_STATE (0x0B)
+ #define MPI26_PCIELINK3_EVTCODE_LTSSM_TXL0S_STATE (0x0C)
+ #define MPI26_PCIELINK3_EVTCODE_LTSSM_L1_STATE (0x0D)
+ #define MPI26_PCIELINK3_EVTCODE_LTSSM_DISABLED_STATE (0x0E)
+ #define MPI26_PCIELINK3_EVTCODE_LTSSM_HOT_RESET_STATE (0x0F)
+ #define MPI26_PCIELINK3_EVTCODE_SYSTEM_ERROR (0x10)
+ #define MPI26_PCIELINK3_EVTCODE_DECODE_ERROR (0x11)
+ #define MPI26_PCIELINK3_EVTCODE_DISPARITY_ERROR (0x12)
+
+ /*values for the CounterType field */
+ #define MPI26_PCIELINK3_COUNTER_TYPE_WRAPPING (0x00)
+ #define MPI26_PCIELINK3_COUNTER_TYPE_SATURATING (0x01)
+ #define MPI26_PCIELINK3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+ /*values for the TimeUnits field */
+ #define MPI26_PCIELINK3_TM_UNITS_10_MICROSECONDS (0x00)
+ #define MPI26_PCIELINK3_TM_UNITS_100_MICROSECONDS (0x01)
+ #define MPI26_PCIELINK3_TM_UNITS_1_MILLISECOND (0x02)
+ #define MPI26_PCIELINK3_TM_UNITS_10_MILLISECONDS (0x03)
+
+ /*values for the ThresholdFlags field */
+ #define MPI26_PCIELINK3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+ /*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLinkEvents at runtime.
+ */
+ #ifndef MPI26_PCIELINK3_LINK_EVENT_MAX
+ #define MPI26_PCIELINK3_LINK_EVENT_MAX (1)
+ #endif
+
+ typedef struct _MPI26_CONFIG_PAGE_PCIELINK_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 Link; /*0x08 */
+ U8 Reserved1; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U8 NumLinkEvents; /*0x0C */
+ U8 Reserved3; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ MPI26_PCIELINK3_LINK_EVENT_CONFIG
+ LinkEventConfig[MPI26_PCIELINK3_LINK_EVENT_MAX]; /*0x10 */
+ } MPI26_CONFIG_PAGE_PCIELINK_3, *PTR_MPI26_CONFIG_PAGE_PCIELINK_3,
+ Mpi26PcieLinkPage3_t, *pMpi26PcieLinkPage3_t;
+
+ #define MPI26_PCIELINK3_PAGEVERSION (0x00)
+
+
#endif
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.20
+ * mpi2_init.h Version: 02.00.21
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
* 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset.
* 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message.
* 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message.
+ * 01-21-16 02.00.21 Modified MPI26_SCSITASKMGMT_MSGFLAGS_PCIE* defines to
+ * be unique within first 32 characters.
* --------------------------------------------------------------------------
*/
} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+ /*SCSI IO Reply MsgFlags bits */
+ #define MPI26_SCSIIO_REPLY_MSGFLAGS_REFTAG_OBSERVED_VALID (0x01)
+ #define MPI26_SCSIIO_REPLY_MSGFLAGS_GUARD_OBSERVED_VALID (0x02)
+ #define MPI26_SCSIIO_REPLY_MSGFLAGS_APPTAG_OBSERVED_VALID (0x04)
+
/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
#define MPI2_SCSI_STATUS_GOOD (0x00)
/*MsgFlags bits */
#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+ #define MPI26_SCSITASKMGMT_MSGFLAGS_HOT_RESET_PCIE (0x00)
#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+ #define MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE (0x18)
/*SCSI Task Management Reply Message */
typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2015 Avago Technologies. All rights reserved.
*
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.27
+ * mpi2_ioc.h Version: 02.00.32
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
* Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
* MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
* Added MPI26_CTRL_OP_SHUTDOWN.
- * 08-25-15 02.00.27 Added IC ARCH Class based signature defines
+ * 08-25-15 02.00.27 Added IC ARCH Class based signature defines.
+ * Added MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED event.
+ * Added ConigurationFlags field to IOCInit message to
+ * support NVMe SGL format control.
+ * Added PCIe SRIOV support.
+ * 02-17-16 02.00.28 Added SAS 4 22.5 gbs speed support.
+ * Added PCIe 4 16.0 GT/sec speec support.
+ * Removed AHCI support.
+ * Removed SOP support.
+ * 07-01-16 02.00.29 Added Archclass for 4008 product.
+ * Added IOCException MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED
+ * 08-23-16 02.00.30 Added new defines for the ImageType field of FWDownload
+ * Request Message.
+ * Added new defines for the ImageType field of FWUpload
+ * Request Message.
+ * Added new values for the RegionType field in the Layout
+ * Data sections of the FLASH Layout Extended Image Data.
+ * Added new defines for the ReasonCode field of
+ * Active Cable Exception Event.
+ * Added MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE and
+ * MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE.
+ * 11-23-16 02.00.31 Added MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR and
+ * MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR.
+ * 02-02-17 02.00.32 Added MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP.
+ * Added MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT and related
+ * defines for the ReasonCode field.
* --------------------------------------------------------------------------
*/
#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+ /*ConfigurationFlags */
+ #define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001)
+
/*minimum depth for a Reply Descriptor Post Queue */
#define MPI2_RDPQ_DEPTH_MIN (16)
U16 MinDevHandle; /*0x3C */
U8 CurrentHostPageSize; /* 0x3E */
U8 Reserved4; /* 0x3F */
+ U8 SGEModifierMask; /*0x40 */
+ U8 SGEModifierValue; /*0x41 */
+ U8 SGEModifierShift; /*0x42 */
+ U8 Reserved5; /*0x43 */
} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
/*IOCExceptions */
+ #define MPI2_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0400)
#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200)
#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
/*ProductID field uses MPI2_FW_HEADER_PID_ */
/*IOCCapabilities */
+ #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000)
#define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000)
#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
/*ProtocolFlags */
+ #define MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES (0x0008)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+ #define MPI2_PORTFACTS_PORTTYPE_TRI_MODE (0x40)
+
/****************************************************************************
* PortEnable message
#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+ #define MPI2_EVENT_ENCL_DEVICE_STATUS_CHANGE (0x001D)
#define MPI2_EVENT_IR_VOLUME (0x001E)
#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
#define MPI2_EVENT_HOST_MESSAGE (0x0028)
#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+ #define MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE (0x0030)
+ #define MPI2_EVENT_PCIE_ENUMERATION (0x0031)
+ #define MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x0032)
+ #define MPI2_EVENT_PCIE_LINK_COUNTER (0x0033)
#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034)
+ #define MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035)
#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
U8 ReasonCode; /* 0x04 */
U8 ReceptacleID; /* 0x05 */
U16 Reserved1; /* 0x06 */
- } MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ } MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ *PTR_MPI25_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ Mpi25EventDataActiveCableExcept_t,
+ *pMpi25EventDataActiveCableExcept_t,
+ MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
*PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
Mpi26EventDataActiveCableExcept_t,
*pMpi26EventDataActiveCableExcept_t;
+ /*MPI2.5 defines for the ReasonCode field */
+ #define MPI25_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
+ #define MPI25_EVENT_ACTIVE_CABLE_PRESENT (0x01)
+ #define MPI25_EVENT_ACTIVE_CABLE_DEGRADED (0x02)
+
/* defines for ReasonCode field */
#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
#define MPI26_EVENT_ACTIVE_CABLE_PRESENT (0x01)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+ #define MPI26_EVENT_SAS_TOPO_LR_RATE_22_5 (0x0C)
/*values for the PhyStatus field */
#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
*PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
Mpi2EventDataSasEnclDevStatusChange_t,
- *pMpi2EventDataSasEnclDevStatusChange_t;
+ *pMpi2EventDataSasEnclDevStatusChange_t,
+ MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI26_EVENT_DATA_ENCL_DEV_STATUS_CHANGE,
+ Mpi26EventDataEnclDevStatusChange_t,
+ *pMpi26EventDataEnclDevStatusChange_t;
/*SAS Enclosure Device Status Change event ReasonCode values */
#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+ /*Enclosure Device Status Change event ReasonCode values */
+ #define MPI26_EVENT_ENCL_RC_ADDED (0x01)
+ #define MPI26_EVENT_ENCL_RC_NOT_RESPONDING (0x02)
+
+
+ typedef struct _MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR {
+ U16 DevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1[2]; /*0x04 */
+ U64 SASAddress; /*0x0C */
+ U32 Reserved2[2]; /*0x14 */
+ } MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
+ *PTR_MPI25_EVENT_DATA_SAS_DEVICE_DISCOVERY_ERROR,
+ Mpi25EventDataSasDeviceDiscoveryError_t,
+ *pMpi25EventDataSasDeviceDiscoveryError_t;
+
+ /*SAS Device Discovery Error Event data ReasonCode values */
+ #define MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01)
+ #define MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02)
+
/*SAS PHY Counter Event data */
typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
/*values for the DescriptorType field */
#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+ /*PCIe Device Status Change Event data (MPI v2.6 and later) */
+
+ typedef struct _MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 WWID; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+ } MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
+ *PTR_MPI26_EVENT_DATA_PCIE_DEVICE_STATUS_CHANGE,
+ Mpi26EventDataPCIeDeviceStatusChange_t,
+ *pMpi26EventDataPCIeDeviceStatusChange_t;
+
+ /*PCIe Device Status Change Event data ReasonCode values */
+ #define MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA (0x05)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED (0x07)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+ #define MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE (0x10)
+
+
+ /*PCIe Enumeration Event data (MPI v2.6 and later) */
+
+ typedef struct _MPI26_EVENT_DATA_PCIE_ENUMERATION {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 EnumerationStatus; /*0x04 */
+ } MPI26_EVENT_DATA_PCIE_ENUMERATION,
+ *PTR_MPI26_EVENT_DATA_PCIE_ENUMERATION,
+ Mpi26EventDataPCIeEnumeration_t,
+ *pMpi26EventDataPCIeEnumeration_t;
+
+ /*PCIe Enumeration Event data Flags values */
+ #define MPI26_EVENT_PCIE_ENUM_DEVICE_CHANGE (0x02)
+ #define MPI26_EVENT_PCIE_ENUM_IN_PROGRESS (0x01)
+
+ /*PCIe Enumeration Event data ReasonCode values */
+ #define MPI26_EVENT_PCIE_ENUM_RC_STARTED (0x01)
+ #define MPI26_EVENT_PCIE_ENUM_RC_COMPLETED (0x02)
+
+ /*PCIe Enumeration Event data EnumerationStatus values */
+ #define MPI26_EVENT_PCIE_ENUM_ES_MAX_SWITCHES_EXCEED (0x40000000)
+ #define MPI26_EVENT_PCIE_ENUM_ES_MAX_DEVICES_EXCEED (0x20000000)
+ #define MPI26_EVENT_PCIE_ENUM_ES_RESOURCES_EXHAUSTED (0x10000000)
+
+
+ /*PCIe Topology Change List Event data (MPI v2.6 and later) */
+
+ /*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+ #ifndef MPI26_EVENT_PCIE_TOPO_PORT_COUNT
+ #define MPI26_EVENT_PCIE_TOPO_PORT_COUNT (1)
+ #endif
+
+ typedef struct _MPI26_EVENT_PCIE_TOPO_PORT_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 PortStatus; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U8 CurrentPortInfo; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 PreviousPortInfo; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ } MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
+ *PTR_MPI26_EVENT_PCIE_TOPO_PORT_ENTRY,
+ Mpi26EventPCIeTopoPortEntry_t,
+ *pMpi26EventPCIeTopoPortEntry_t;
+
+ /*PCIe Topology Change List Event data PortStatus values */
+ #define MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED (0x01)
+ #define MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING (0x02)
+ #define MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED (0x03)
+ #define MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE (0x04)
+ #define MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING (0x05)
+
+ /*PCIe Topology Change List Event data defines for CurrentPortInfo and
+ *PreviousPortInfo
+ */
+ #define MPI26_EVENT_PCIE_TOPO_PI_LANE_MASK (0xF0)
+ #define MPI26_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
+ #define MPI26_EVENT_PCIE_TOPO_PI_1_LANE (0x10)
+ #define MPI26_EVENT_PCIE_TOPO_PI_2_LANES (0x20)
+ #define MPI26_EVENT_PCIE_TOPO_PI_4_LANES (0x30)
+ #define MPI26_EVENT_PCIE_TOPO_PI_8_LANES (0x40)
+
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_5_0 (0x03)
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_8_0 (0x04)
+ #define MPI26_EVENT_PCIE_TOPO_PI_RATE_16_0 (0x05)
+
+ typedef struct _MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 SwitchDevHandle; /*0x02 */
+ U8 NumPorts; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPortNum; /*0x09 */
+ U8 SwitchStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI26_EVENT_PCIE_TOPO_PORT_ENTRY
+ PortEntry[MPI26_EVENT_PCIE_TOPO_PORT_COUNT]; /*0x0C */
+ } MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI26_EVENT_DATA_PCIE_TOPOLOGY_CHANGE_LIST,
+ Mpi26EventDataPCIeTopologyChangeList_t,
+ *pMpi26EventDataPCIeTopologyChangeList_t;
+
+ /*PCIe Topology Change List Event data SwitchStatus values */
+ #define MPI26_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH (0x00)
+ #define MPI26_EVENT_PCIE_TOPO_SS_ADDED (0x01)
+ #define MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
+ #define MPI26_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
+ #define MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+
+ /*PCIe Link Counter Event data (MPI v2.6 and later) */
+
+ typedef struct _MPI26_EVENT_DATA_PCIE_LINK_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 LinkEventCode; /*0x0C */
+ U8 LinkNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 LinkEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+ } MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
+ *PTR_MPI26_EVENT_DATA_PCIE_LINK_COUNTER,
+ Mpi26EventDataPcieLinkCounter_t, *pMpi26EventDataPcieLinkCounter_t;
+
+
+ /*use MPI26_PCIELINK3_EVTCODE_ values from mpi2_cnfg.h for the LinkEventCode
+ *field
+ */
+
+ /*use MPI26_PCIELINK3_COUNTER_TYPE_ values from mpi2_cnfg.h for the CounterType
+ *field
+ */
+
+ /*use MPI26_PCIELINK3_TIME_UNITS_ values from mpi2_cnfg.h for the TimeUnits
+ *field
+ */
+
+ /*use MPI26_PCIELINK3_TFLAGS_ values from mpi2_cnfg.h for the ThresholdFlags
+ *field
+ */
+
/****************************************************************************
* EventAck message
****************************************************************************/
#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
+ #define MPI2_FW_DOWNLOAD_ITYPE_CBB_BACKUP (0x0D)
+ #define MPI2_FW_DOWNLOAD_ITYPE_SBR (0x0E)
+ #define MPI2_FW_DOWNLOAD_ITYPE_SBR_BACKUP (0x0F)
+ #define MPI2_FW_DOWNLOAD_ITYPE_HIIM (0x10)
+ #define MPI2_FW_DOWNLOAD_ITYPE_HIIA (0x11)
+ #define MPI2_FW_DOWNLOAD_ITYPE_CTLR (0x12)
+ #define MPI2_FW_DOWNLOAD_ITYPE_IMR_FIRMWARE (0x13)
+ #define MPI2_FW_DOWNLOAD_ITYPE_MR_NVDATA (0x14)
#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
/*MPI v2.0 FWDownload TransactionContext Element */
#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
#define MPI2_FW_UPLOAD_ITYPE_CBB_BACKUP (0x0D)
+ #define MPI2_FW_UPLOAD_ITYPE_SBR (0x0E)
+ #define MPI2_FW_UPLOAD_ITYPE_SBR_BACKUP (0x0F)
+ #define MPI2_FW_UPLOAD_ITYPE_HIIM (0x10)
+ #define MPI2_FW_UPLOAD_ITYPE_HIIA (0x11)
+ #define MPI2_FW_UPLOAD_ITYPE_CTLR (0x12)
+ #define MPI2_FW_UPLOAD_ITYPE_IMR_FIRMWARE (0x13)
+ #define MPI2_FW_UPLOAD_ITYPE_MR_NVDATA (0x14)
+
/*MPI v2.0 FWUpload TransactionContext Element */
typedef struct _MPI2_FW_UPLOAD_TCSGE {
#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
/* legacy (0x5AEAA55A) */
+ #define MPI26_FW_HEADER_SIGNATURE0_ARC_3 (0x02)
#define MPI26_FW_HEADER_SIGNATURE0 \
(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
#define MPI26_FW_HEADER_SIGNATURE0_3516 \
(MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
+ #define MPI26_FW_HEADER_SIGNATURE0_4008 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_3)
/*Signature1 field */
#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
#define MPI2_FLASH_REGION_COMMON_BOOT_BLOCK (0x0A)
#define MPI2_FLASH_REGION_INIT (MPI2_FLASH_REGION_COMMON_BOOT_BLOCK)
#define MPI2_FLASH_REGION_CBB_BACKUP (0x0D)
+ #define MPI2_FLASH_REGION_SBR (0x0E)
+ #define MPI2_FLASH_REGION_SBR_BACKUP (0x0F)
+ #define MPI2_FLASH_REGION_HIIM (0x10)
+ #define MPI2_FLASH_REGION_HIIA (0x11)
+ #define MPI2_FLASH_REGION_CTLR (0x12)
+ #define MPI2_FLASH_REGION_IMR_FIRMWARE (0x13)
+ #define MPI2_FLASH_REGION_MR_NVDATA (0x14)
/*ImageRevision */
#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
#define MPI26_CTRL_OP_DEV_ENABLE_PERSIST_CONNECTION (0x17)
#define MPI26_CTRL_OP_DEV_DISABLE_PERSIST_CONNECTION (0x18)
#define MPI26_CTRL_OP_DEV_CLOSE_PERSIST_CONNECTION (0x19)
+ #define MPI26_CTRL_OP_ENABLE_NVME_SGL_FORMAT (0x1A)
+ #define MPI26_CTRL_OP_DISABLE_NVME_SGL_FORMAT (0x1B)
#define MPI26_CTRL_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2000-2014 Avago Technologies. All rights reserved.
*
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.13
+ * mpi2_tool.h Version: 02.00.14
*
* Version History
* ---------------
* 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
* 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC.
* 11-18-14 02.00.13 Updated copyright information.
+ * 08-25-16 02.00.14 Added new values for the Flags field of Toolbox Clean
+ * Tool Request Message.
* --------------------------------------------------------------------------
*/
#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000)
#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+ #define MPI2_TOOLBOX_CLEAN_SBR (0x00800000)
+ #define MPI2_TOOLBOX_CLEAN_SBR_BACKUP (0x00400000)
+ #define MPI2_TOOLBOX_CLEAN_HIIM (0x00200000)
+ #define MPI2_TOOLBOX_CLEAN_HIIA (0x00100000)
+ #define MPI2_TOOLBOX_CLEAN_CTLR (0x00080000)
+ #define MPI2_TOOLBOX_CLEAN_IMR_FIRMWARE (0x00040000)
+ #define MPI2_TOOLBOX_CLEAN_MR_NVDATA (0x00020000)
+ #define MPI2_TOOLBOX_CLEAN_RESERVED_5_16 (0x0001FFE0)
+ #define MPI2_TOOLBOX_CLEAN_ALL_BUT_MPB (0x00000010)
+ #define MPI2_TOOLBOX_CLEAN_ENTIRE_FLASH (0x00000008)
#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
complete(&task->slow_task->completion);
}
-static void pm8001_tmf_timedout(unsigned long data)
+static void pm8001_tmf_timedout(struct timer_list *t)
{
- struct sas_task *task = (struct sas_task *)data;
+ struct sas_task_slow *slow = from_timer(slow, t, timer);
+ struct sas_task *task = slow->task;
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
task->task_proto = dev->tproto;
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = pm8001_task_done;
- task->slow_task->timer.data = (unsigned long)task;
- task->slow_task->timer.function = pm8001_tmf_timedout;
+ task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
task->dev = dev;
task->task_proto = dev->tproto;
task->task_done = pm8001_task_done;
- task->slow_task->timer.data = (unsigned long)task;
- task->slow_task->timer.function = pm8001_tmf_timedout;
+ task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
- u32 tag = 0xdeadbeef;
+ u32 tag;
u32 device_id;
struct domain_device *dev ;
- struct pm8001_hba_info *pm8001_ha = NULL;
- struct pm8001_ccb_info *ccb;
+ struct pm8001_hba_info *pm8001_ha;
struct scsi_lun lun;
struct pm8001_device *pm8001_dev;
struct pm8001_tmf_task tmf_task;
- int rc = TMF_RESP_FUNC_FAILED;
+ int rc = TMF_RESP_FUNC_FAILED, ret;
+ u32 phy_id;
+ struct sas_task_slow slow_task;
if (unlikely(!task || !task->lldd_task || !task->dev))
- return rc;
+ return TMF_RESP_FUNC_FAILED;
+ dev = task->dev;
+ pm8001_dev = dev->lldd_dev;
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ device_id = pm8001_dev->device_id;
+ phy_id = pm8001_dev->attached_phy;
+ rc = pm8001_find_tag(task, &tag);
+ if (rc == 0) {
+ pm8001_printk("no tag for task:%p\n", task);
+ return TMF_RESP_FUNC_FAILED;
+ }
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
- rc = TMF_RESP_FUNC_COMPLETE;
- goto out;
+ return TMF_RESP_FUNC_COMPLETE;
+ }
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ if (task->slow_task == NULL) {
+ init_completion(&slow_task.completion);
+ task->slow_task = &slow_task;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (task->task_proto & SAS_PROTOCOL_SSP) {
struct scsi_cmnd *cmnd = task->uldd_task;
- dev = task->dev;
- ccb = task->lldd_task;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
int_to_scsilun(cmnd->device->lun, &lun);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
- }
- device_id = pm8001_dev->device_id;
- PM8001_EH_DBG(pm8001_ha,
- pm8001_printk("abort io to deviceid= %d\n", device_id));
tmf_task.tmf = TMF_ABORT_TASK;
tmf_task.tag_of_task_to_be_managed = tag;
rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
pm8001_dev->sas_device, 0, tag);
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
- dev = task->dev;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
+ if (pm8001_ha->chip_id == chip_8006) {
+ DECLARE_COMPLETION_ONSTACK(completion_reset);
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+
+ /* 1. Set Device state as Recovery */
+ pm8001_dev->setds_completion = &completion;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x03);
+ wait_for_completion(&completion);
+
+ /* 2. Send Phy Control Hard Reset */
+ reinit_completion(&completion);
+ phy->reset_success = false;
+ phy->enable_completion = &completion;
+ phy->reset_completion = &completion_reset;
+ ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_HARD_RESET);
+ if (ret)
+ goto out;
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for local phy ctl\n"));
+ wait_for_completion(&completion);
+ if (!phy->reset_success)
+ goto out;
+
+ /* 3. Wait for Port Reset complete / Port reset TMO */
+ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for Port reset\n"));
+ wait_for_completion(&completion_reset);
+ if (phy->port_reset_status)
+ goto out;
+
+ /*
+ * 4. SATA Abort ALL
+ * we wait for the task to be aborted so that the task
+ * is removed from the ccb. on success the caller is
+ * going to free the task.
+ */
+ ret = pm8001_exec_internal_task_abort(pm8001_ha,
+ pm8001_dev, pm8001_dev->sas_device, 1, tag);
+ if (ret)
+ goto out;
+ ret = wait_for_completion_timeout(
+ &task->slow_task->completion,
+ PM8001_TASK_TIMEOUT * HZ);
+ if (!ret)
+ goto out;
+
+ /* 5. Set Device State as Operational */
+ reinit_completion(&completion);
+ pm8001_dev->setds_completion = &completion;
+ PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
+ pm8001_dev, 0x01);
+ wait_for_completion(&completion);
+ } else {
+ rc = pm8001_exec_internal_task_abort(pm8001_ha,
+ pm8001_dev, pm8001_dev->sas_device, 0, tag);
}
- rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
- pm8001_dev->sas_device, 0, tag);
+ rc = TMF_RESP_FUNC_COMPLETE;
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
- dev = task->dev;
- pm8001_dev = dev->lldd_dev;
- pm8001_ha = pm8001_find_ha_by_dev(dev);
- rc = pm8001_find_tag(task, &tag);
- if (rc == 0) {
- printk(KERN_INFO "No such tag in %s\n", __func__);
- rc = TMF_RESP_FUNC_FAILED;
- return rc;
- }
rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
pm8001_dev->sas_device, 0, tag);
}
out:
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (task->slow_task == &slow_task)
+ task->slow_task = NULL;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
if (rc != TMF_RESP_FUNC_COMPLETE)
pm8001_printk("rc= %d\n", rc);
return rc;
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
+ extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *,
+ port_id_t);
extern void qla2x00_update_fcports(scsi_qla_host_t *);
extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
+ extern int ql2xenablemsix;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
*/
extern struct scsi_host_template qla2xxx_driver_template;
extern struct scsi_transport_template *qla2xxx_transport_vport_template;
-extern void qla2x00_timer(scsi_qla_host_t *);
-extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
+extern void qla2x00_timer(struct timer_list *);
+extern void qla2x00_start_timer(scsi_qla_host_t *, unsigned long);
extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
extern int qla24xx_disable_vp (scsi_qla_host_t *);
extern int qla24xx_enable_vp (scsi_qla_host_t *);
uint16_t *);
int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
struct port_database_24xx *);
+ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
+ void *, uint16_t);
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
extern void qla2x00_sp_free(void *);
-extern void qla2x00_sp_timeout(unsigned long);
+extern void qla2x00_sp_timeout(struct timer_list *);
extern void qla2x00_bsg_job_done(void *, int);
extern void qla2x00_bsg_sp_free(void *);
extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* SRB Extensions ---------------------------------------------------------- */
void
-qla2x00_sp_timeout(unsigned long __data)
+qla2x00_sp_timeout(struct timer_list *t)
{
- srb_t *sp = (srb_t *)__data;
+ srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
struct srb_iocb *iocb;
scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
ql_log(ql_log_warn, vha, 0xd043,
"Failed to allocate port database structure.\n");
goto done_free_sp;
}
- memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
mb = sp->u.iocb_cmd.u.mbx.out_mb;
mb[0] = MBC_GET_PORT_DATABASE;
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
default:
+ if (ea->fcport->n2n_flag) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post fc4 prli\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->fc4f_nvme = 0;
+ ea->fcport->n2n_flag = 0;
+ qla24xx_post_prli_work(vha, ea->fcport);
+ }
ql_dbg(ql_dbg_disc, vha, 0x2119,
"%s %d %8phC unhandle event of %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[0]);
return (rval);
}
+ /*
+ * N2N Login
+ * Updates Fibre Channel Device Database with local loop devices.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ *
+ * Returns:
+ */
+ static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha,
+ fc_port_t *fcport)
+ {
+ struct qla_hw_data *ha = vha->hw;
+ int res = QLA_SUCCESS, rval;
+ int greater_wwpn = 0;
+ int logged_in = 0;
+
+ if (ha->current_topology != ISP_CFG_N)
+ return res;
+
+ if (wwn_to_u64(vha->port_name) >
+ wwn_to_u64(vha->n2n_port_name)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2002,
+ "HBA WWPN is greater %llx > target %llx\n",
+ wwn_to_u64(vha->port_name),
+ wwn_to_u64(vha->n2n_port_name));
+ greater_wwpn = 1;
+ fcport->d_id.b24 = vha->n2n_id;
+ }
+
+ fcport->loop_id = vha->loop_id;
+ fcport->fc4f_nvme = 0;
+ fcport->query = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x4001,
+ "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n",
+ fcport->d_id.b24, vha->loop_id);
+
+ /* Fill in member data. */
+ if (!greater_wwpn) {
+ rval = qla2x00_get_port_database(vha, fcport, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x1051,
+ "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n",
+ fcport->current_login_state, fcport->last_login_state,
+ fcport->d_id.b24, fcport->loop_id, rval);
+
+ if (((fcport->current_login_state & 0xf) == 0x4) ||
+ ((fcport->current_login_state & 0xf) == 0x6))
+ logged_in = 1;
+ }
+
+ if (logged_in || greater_wwpn) {
+ if (!vha->nvme_local_port && vha->flags.nvme_enabled)
+ qla_nvme_register_hba(vha);
+
+ /* Set connected N_Port d_id */
+ if (vha->flags.nvme_enabled)
+ fcport->fc4f_nvme = 1;
+
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ fcport->disc_state = DSC_GNL;
+ fcport->n2n_flag = 1;
+ fcport->flags = 3;
+ vha->hw->flags.gpsc_supported = 0;
+
+ if (greater_wwpn) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "%s %d PLOGI ELS %8phC\n",
+ __func__, __LINE__, fcport->port_name);
+
+ res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
+ fcport, fcport->d_id);
+ }
+
+ if (res != QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xd04d,
+ "PLOGI Failed: portid=%06x - retrying\n",
+ fcport->d_id.b24);
+ res = QLA_SUCCESS;
+ } else {
+ /* State 0x6 means FCP PRLI complete */
+ if ((fcport->current_login_state & 0xf) == 0x6) {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post GPDB work\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->chip_reset =
+ vha->hw->base_qpair->chip_reset;
+ qla24xx_post_gpdb_work(vha, fcport, 0);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post NVMe PRLI\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_prli_work(vha, fcport);
+ }
+ }
+ } else {
+ /* Wait for next database change */
+ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
+ }
+ return res;
+ }
/*
* qla2x00_configure_local_loop
}
}
+ /* Inititae N2N login. */
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+ rval = qla24xx_n2n_handle_login(vha, new_fcport);
+ if (rval != QLA_SUCCESS)
+ goto cleanup_allocation;
+ return QLA_SUCCESS;
+ }
+
/* Add devices to port list. */
id_iter = (char *)ha->gid_list;
for (index = 0; index < entries; index++) {
"Failed to retrieve fcport information "
"-- get_port_database=%x, loop_id=0x%04x.\n",
rval2, new_fcport->loop_id);
- ql_dbg(ql_dbg_disc, vha, 0x2105,
- "Scheduling resync.\n");
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- continue;
+ /* Skip retry if N2N */
+ if (ha->current_topology != ISP_CFG_N) {
+ ql_dbg(ql_dbg_disc, vha, 0x2105,
+ "Scheduling resync.\n");
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ continue;
+ }
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
icb->firmware_options_3 |= BIT_0;
+ if (IS_QLA27XX(ha)) {
+ icb->firmware_options_3 |= BIT_8;
+ ql_dbg(ql_log_info, vha, 0x0075,
+ "Enabling direct connection.\n");
+ }
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
return NULL;
}
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
if (qpair == NULL) {
ql_log(ql_log_warn, vha, 0x0182,
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
+ qla2x00_start_timer(vha, WATCH_INTERVAL);
vha->req = base_vha->req;
host->can_queue = base_vha->req->length + 128;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair, *tqpair;
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
qp_list_elem)
qla2xxx_delete_qpair(vha, qpair);
"Detect SFP range and set appropriate distance.\n"
"1 (Default): Enable\n");
+ int ql2xenablemsix = 1;
+ module_param(ql2xenablemsix, int, 0444);
+ MODULE_PARM_DESC(ql2xenablemsix,
+ "Set to enable MSI or MSI-X interrupt mechanism.\n"
+ " Default is 1, enable MSI-X interrupt mechanism.\n"
+ " 0 -- enable traditional pin-based mechanism.\n"
+ " 1 -- enable MSI-X interrupt mechanism.\n"
+ " 2 -- enable MSI interrupt mechanism.\n");
+
/*
* SCSI host template entry points
*/
*/
__inline__ void
-qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
+qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
{
- init_timer(&vha->timer);
+ timer_setup(&vha->timer, qla2x00_timer, 0);
vha->timer.expires = jiffies + interval * HZ;
- vha->timer.data = (unsigned long)vha;
- vha->timer.function = (void (*)(unsigned long))func;
add_timer(&vha->timer);
vha->timer_active = 1;
}
INIT_LIST_HEAD(&ha->base_qpair->nvme_done_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(rsp->qpair, smp_processor_id());
+ qla_cpu_update(rsp->qpair, raw_smp_processor_id());
ha->base_qpair->pdev = ha->pdev;
if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
qla_init_base_qpair(vha, req, rsp);
- if (ql2xmqsupport && ha->max_qpairs) {
+ if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL);
if (!ha->queue_pair_map) {
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
ha->msix_count = QLA_BASE_VECTORS;
- if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ if (!ql2xmqsupport || !ql2xnvmeenable ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
* By default, driver uses at least two msix vectors
* (default & rspq)
*/
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
ql_dbg(ql_dbg_init, base_vha, 0x0192,
"blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
- } else
- ql_dbg(ql_dbg_init, base_vha, 0x0193,
- "blk/scsi-mq disabled.\n");
+ } else {
+ if (ql2xnvmeenable) {
+ host->nr_hw_queues = ha->max_qpairs;
+ ql_dbg(ql_dbg_init, base_vha, 0x0194,
+ "FC-NVMe support is enabled, HW queues=%d\n",
+ host->nr_hw_queues);
+ } else {
+ ql_dbg(ql_dbg_init, base_vha, 0x0193,
+ "blk/scsi-mq disabled.\n");
+ }
+ }
qlt_probe_one_stage1(base_vha, ha);
ql_log(ql_log_fatal, base_vha, 0x00ed,
"Failed to start DPC thread.\n");
ret = PTR_ERR(ha->dpc_thread);
+ ha->dpc_thread = NULL;
goto probe_failed;
}
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
*/
qla2xxx_wake_dpc(base_vha);
- INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
base_vha->host->irq = ha->pdev->irq;
/* Initialized the timer */
- qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
+ qla2x00_start_timer(base_vha, WATCH_INTERVAL);
ql_dbg(ql_dbg_init, base_vha, 0x00ef,
"Started qla2x00_timer with "
"interval=%d.\n", WATCH_INTERVAL);
if (pla)
qlt_plogi_ack_unref(vha, pla);
else
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_async_gffid(vha, fcport);
}
if (free_fcport) {
* Context: Interrupt
***************************************************************************/
void
-qla2x00_timer(scsi_qla_host_t *vha)
+qla2x00_timer(struct timer_list *t)
{
+ scsi_qla_host_t *vha = from_timer(vha, t, timer);
unsigned long cpu_flags = 0;
int start_dpc = 0;
int index;
switch (state) {
case pci_channel_io_normal:
ha->flags.eeh_busy = 0;
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
pci_disable_device(pdev);
/* Return back all IOs */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
case pci_channel_io_perm_failure:
ha->flags.pci_channel_io_perm_failure = 1;
qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
- if (ql2xmqsupport) {
+ if (ql2xmqsupport || ql2xnvmeenable) {
set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
if (logout_started) {
bool traced = false;
- while (!ACCESS_ONCE(sess->logout_completed)) {
+ while (!READ_ONCE(sess->logout_completed)) {
if (!traced) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
"%s: waiting for sess %p logout\n",
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ struct init_cb_24xx *icb;
if (!QLA_TGT_MODE_ENABLED())
return;
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ icb = (struct init_cb_24xx *)ha->init_cb;
+
+ if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
struct qla_msix_entry *msix = &ha->msix_entries[2];
- struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
icb->msix_atio = cpu_to_le16(msix->entry);
ql_dbg(ql_dbg_init, vha, 0xf072,
"Registering ICB vector 0x%x for atio que.\n",
msix->entry);
+ } else if (ql2xenablemsix == 0) {
+ icb->firmware_options_2 |= cpu_to_le32(BIT_26);
+ ql_dbg(ql_dbg_init, vha, 0xf07f,
+ "Registering INTx vector for ATIO.\n");
}
}
if (!QLA_TGT_MODE_ENABLED())
return;
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
} else {
+// SPDX-License-Identifier: GPL-2.0
#include <linux/blkdev.h>
#include <linux/init.h>
{"3PARdata", "VV", NULL, BLIST_REPORTLUN2},
{"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN},
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
+ {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES},
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
{"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
{"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
{"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
+ {"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
{"HITACHI", "DF500", "*", BLIST_REPORTLUN2},
{"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
{"HITACHI", "HUS1530", "*", BLIST_NO_DIF},
- {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2},
+ {"HITACHI", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES},
{"HITACHI", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
*/
to[from_length] = '\0';
} else {
- /*
- * space pad the string if it is short.
+ /*
+ * space pad the string if it is short.
*/
strncpy(&to[from_length], spaces,
to_length - from_length);
* @flags: if strflags NULL, use this flag value
*
* Description:
- * Create and add one dev_info entry for @vendor, @model, @strflags or
- * @flag. If @compatible, add to the tail of the list, do not space
- * pad, and set devinfo->compatible. The scsi_static_device_list entries
- * are added with @compatible 1 and @clfags NULL.
+ * Create and add one dev_info entry for @vendor, @model, @strflags or
+ * @flag. If @compatible, add to the tail of the list, do not space
+ * pad, and set devinfo->compatible. The scsi_static_device_list entries
+ * are added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
* @key: specify list to use
*
* Description:
- * Create and add one dev_info entry for @vendor, @model,
- * @strflags or @flag in list specified by @key. If @compatible,
- * add to the tail of the list, do not space pad, and set
- * devinfo->compatible. The scsi_static_device_list entries are
- * added with @compatible 1 and @clfags NULL.
+ * Create and add one dev_info entry for @vendor, @model,
+ * @strflags or @flag in list specified by @key. If @compatible,
+ * add to the tail of the list, do not space pad, and set
+ * devinfo->compatible. The scsi_static_device_list entries are
+ * added with @compatible 1 and @clfags NULL.
*
* Returns: 0 OK, -error on failure.
**/
/**
* scsi_dev_info_list_find - find a matching dev_info list entry.
- * @vendor: vendor string
- * @model: model (product) string
+ * @vendor: full vendor string
+ * @model: full model (product) string
* @key: specify list to use
*
* Description:
* Finds the first dev_info entry matching @vendor, @model
- * in list specified by @key.
+ * in list specified by @key.
*
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
- size_t vmax, mmax;
+ size_t vmax, mmax, mlen;
const char *vskip, *mskip;
if (IS_ERR(devinfo_table))
dev_info_list) {
if (devinfo->compatible) {
/*
- * Behave like the older version of get_device_flags.
+ * vendor strings must be an exact match
*/
- if (memcmp(devinfo->vendor, vskip, vmax) ||
- (vmax < sizeof(devinfo->vendor) &&
- devinfo->vendor[vmax]))
+ if (vmax != strlen(devinfo->vendor) ||
+ memcmp(devinfo->vendor, vskip, vmax))
continue;
- if (memcmp(devinfo->model, mskip, mmax) ||
- (mmax < sizeof(devinfo->model) &&
- devinfo->model[mmax]))
+
+ /*
+ * @model specifies the full string, and
+ * must be larger or equal to devinfo->model
+ */
+ mlen = strlen(devinfo->model);
+ if (mmax < mlen || memcmp(devinfo->model, mskip, mlen))
continue;
return devinfo;
} else {
if (!memcmp(devinfo->vendor, vendor,
- sizeof(devinfo->vendor)) &&
- !memcmp(devinfo->model, model,
- sizeof(devinfo->model)))
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
return devinfo;
}
}
* @dev_list: string of device flags to add
*
* Description:
- * Parse dev_list, and add entries to the scsi_dev_info_list.
- * dev_list is of the form "vendor:product:flag,vendor:product:flag".
- * dev_list is modified via strsep. Can be called for command line
- * addition, for proc or mabye a sysfs interface.
+ * Parse dev_list, and add entries to the scsi_dev_info_list.
+ * dev_list is of the form "vendor:product:flag,vendor:product:flag".
+ * dev_list is modified via strsep. Can be called for command line
+ * addition, for proc or mabye a sysfs interface.
*
* Returns: 0 if OK, -error on failure.
**/
return seq_open(file, &scsi_devinfo_seq_ops);
}
- /*
+ /*
* proc_scsi_dev_info_write - allow additions to scsi_dev_info_list via /proc.
*
* Description: Adds a black/white list entry for vendor and model with an
* scsi_init_devinfo - set up the dynamic device list.
*
* Description:
- * Add command line entries from scsi_dev_flags, then add
- * scsi_static_device_list entries to the scsi device info list.
+ * Add command line entries from scsi_dev_flags, then add
+ * scsi_static_device_list entries to the scsi device info list.
*/
int __init scsi_init_devinfo(void)
{
"threshold.\n");
}
+ if (sshdr->asc == 0x29) {
+ evt_type = SDEV_EVT_POWER_ON_RESET_OCCURRED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Power-on or device reset occurred\n");
+ }
+
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
+ sshdr.asc == 0x22 || /* Invalid function */
sshdr.asc == 0x24 || /* Invalid field in cdb */
- sshdr.asc == 0x26) { /* Parameter value invalid */
+ sshdr.asc == 0x26 || /* Parameter value invalid */
+ sshdr.asc == 0x27) { /* Write protected */
set_host_byte(scmd, DID_TARGET_FAILURE);
}
return SUCCESS;
* that it indicates SUCCESS.
*/
return SUCCESS;
+ case DID_SOFT_ERROR:
/*
* when the low level driver returns did_soft_error,
* it is responsible for keeping an internal retry counter
* in order to avoid endless loops (db)
- *
- * actually this is a bug in this function here. we should
- * be mindful of the maximum number of retries specified
- * and not get stuck in a loop.
*/
- case DID_SOFT_ERROR:
goto maybe_retry;
case DID_IMM_RETRY:
return NEEDS_RETRY;
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
- req = blk_get_request(sdev->request_queue,
+ req = blk_get_request_flags(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
rq->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
- req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
+ req->rq_flags |= rq_flags | RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
/*
* If the devices is blocked we defer normal commands.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_DEFER;
break;
default:
* special commands. In particular any user initiated
* command is not allowed.
*/
- if (!(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PREEMPT))
ret = BLKPREP_KILL;
break;
}
ret = scsi_setup_cmnd(sdev, req);
out:
- if (ret != BLKPREP_OK)
- cmd->flags &= ~SCMD_INITIALIZED;
return scsi_prep_return(q, req, ret);
}
*
* Returns: Nothing
*
- * Lock status: IO request lock assumed to be held when called.
+ * Lock status: request queue lock assumed to be held when called.
+ *
+ * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
+ * protection for ZBC disks.
*/
static void scsi_request_fn(struct request_queue *q)
__releases(q->queue_lock)
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scatterlist *sg;
- int ret;
scsi_init_command(sdev, cmd);
blk_mq_start_request(req);
- ret = scsi_setup_cmnd(sdev, req);
- if (ret != BLK_STS_OK)
- cmd->flags &= ~SCMD_INITIALIZED;
- return ret;
+ return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request);
}
+static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ atomic_dec(&sdev->device_busy);
+ put_device(&sdev->sdev_gendev);
+}
+
+static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
+{
+ struct request_queue *q = hctx->queue;
+ struct scsi_device *sdev = q->queuedata;
+
+ if (!get_device(&sdev->sdev_gendev))
+ goto out;
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto out_put_device;
+
+ return true;
+
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+out:
+ return false;
+}
+
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
ret = prep_to_mq(scsi_prep_state_check(sdev, req));
if (ret != BLK_STS_OK)
- goto out;
+ goto out_put_budget;
ret = BLK_STS_RESOURCE;
- if (!get_device(&sdev->sdev_gendev))
- goto out;
-
- if (!scsi_dev_queue_ready(q, sdev))
- goto out_put_device;
if (!scsi_target_queue_ready(shost, sdev))
- goto out_dec_device_busy;
+ goto out_put_budget;
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ atomic_dec(&shost->host_busy);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
-out_dec_device_busy:
- atomic_dec(&sdev->device_busy);
-out_put_device:
- put_device(&sdev->sdev_gendev);
-out:
+out_put_budget:
+ scsi_mq_put_budget(hctx);
switch (ret) {
case BLK_STS_OK:
break;
}
static const struct blk_mq_ops scsi_mq_ops = {
+ .get_budget = scsi_mq_get_budget,
+ .put_budget = scsi_mq_put_budget,
.queue_rq = scsi_queue_rq,
.complete = scsi_softirq_done,
.timeout = scsi_timeout,
}
sdev->sdev_state = state;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
return 0;
illegal:
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
break;
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
+ envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED";
+ break;
default:
/* do nothing */
break;
case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
case SDEV_EVT_LUN_CHANGE_REPORTED:
case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
+ case SDEV_EVT_POWER_ON_RESET_OCCURRED:
default:
/* do nothing */
break;
int
scsi_device_quiesce(struct scsi_device *sdev)
{
+ struct request_queue *q = sdev->request_queue;
int err;
+ /*
+ * It is allowed to call scsi_device_quiesce() multiple times from
+ * the same context but concurrent scsi_device_quiesce() calls are
+ * not allowed.
+ */
+ WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
+
+ blk_set_preempt_only(q);
+
+ blk_mq_freeze_queue(q);
+ /*
+ * Ensure that the effect of blk_set_preempt_only() will be visible
+ * for percpu_ref_tryget() callers that occur after the queue
+ * unfreeze even if the queue was already frozen before this function
+ * was called. See also https://lwn.net/Articles/573497/.
+ */
+ synchronize_rcu();
+ blk_mq_unfreeze_queue(q);
+
mutex_lock(&sdev->state_mutex);
err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ if (err == 0)
+ sdev->quiesced_by = current;
+ else
+ blk_clear_preempt_only(q);
mutex_unlock(&sdev->state_mutex);
- if (err)
- return err;
-
- scsi_run_queue(sdev->request_queue);
- while (atomic_read(&sdev->device_busy)) {
- msleep_interruptible(200);
- scsi_run_queue(sdev->request_queue);
- }
- return 0;
+ return err;
}
EXPORT_SYMBOL(scsi_device_quiesce);
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- if (sdev->sdev_state == SDEV_QUIESCE &&
- scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
- scsi_run_queue(sdev->request_queue);
+ WARN_ON_ONCE(!sdev->quiesced_by);
+ sdev->quiesced_by = NULL;
+ blk_clear_preempt_only(sdev->request_queue);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
case SDEV_BLOCK:
case SDEV_TRANSPORT_OFFLINE:
sdev->sdev_state = new_state;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break;
case SDEV_CREATED_BLOCK:
if (new_state == SDEV_TRANSPORT_OFFLINE ||
sdev->sdev_state = new_state;
else
sdev->sdev_state = SDEV_CREATED;
- sysfs_notify(&sdev->sdev_gendev.kobj, NULL, "state");
break;
case SDEV_CANCEL:
case SDEV_OFFLINE:
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_LOGGING_H
#define _SCSI_LOGGING_H
/*
- * This defines the scsi logging feature. It is a means by which the user
- * can select how much information they get about various goings on, and it
- * can be really useful for fault tracing. The logging word is divided into
- * 8 nibbles, each of which describes a loglevel. The division of things is
+ * This defines the scsi logging feature. It is a means by which the user can
+ * select how much information they get about various goings on, and it can be
+ * really useful for fault tracing. The logging word is divided into 10 3-bit
+ * bitfields, each of which describes a loglevel. The division of things is
* somewhat arbitrary, and the division of the word could be changed if it
* were really needed for any reason. The numbers below are the only place
* where these are specified. For a first go-around, 3 bits is more than
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_PRIV_H
#define _SCSI_PRIV_H
/* scsi_dh.c */
#ifdef CONFIG_SCSI_DH
- int scsi_dh_add_device(struct scsi_device *sdev);
+ void scsi_dh_add_device(struct scsi_device *sdev);
void scsi_dh_release_device(struct scsi_device *sdev);
#else
- static inline int scsi_dh_add_device(struct scsi_device *sdev) { return 0; }
+ static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
+// SPDX-License-Identifier: GPL-2.0
/*
* scsi_scan.c
*
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
+ if (*bflags & BLIST_UNMAP_LIMIT_WS)
+ sdev->unmap_limit_for_ws = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
scsi_attach_vpd(sdev);
sdev->max_queue_depth = sdev->queue_depth;
+ sdev->sdev_bflags = *bflags;
/*
* Ok, the device is now all set up, we can
#include <scsi/scsi_dh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_driver.h>
+ #include <scsi/scsi_devinfo.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
}
static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
+ #define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+ static const char *const sdev_bflags_name[] = {
+ #include "scsi_devinfo_tbl.c"
+ };
+ #undef BLIST_FLAG_NAME
+
+ static ssize_t
+ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
+ char *buf)
+ {
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int i;
+ ssize_t len = 0;
+
+ for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
+ const char *name = NULL;
+
+ if (!(sdev->sdev_bflags & BIT(i)))
+ continue;
+ if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
+ name = sdev_bflags_name[i];
+
+ if (name)
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s%s", len ? " " : "", name);
+ else
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%sINVALID_BIT(%d)", len ? " " : "", i);
+ }
+ if (len)
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+ }
+ static DEVICE_ATTR(blacklist, S_IRUGO, sdev_show_blacklist, NULL);
+
#ifdef CONFIG_SCSI_DH
static ssize_t
sdev_show_dh_state(struct device *dev, struct device_attribute *attr,
&dev_attr_queue_depth.attr,
&dev_attr_queue_type.attr,
&dev_attr_wwid.attr,
+ &dev_attr_blacklist.attr,
#ifdef CONFIG_SCSI_DH
&dev_attr_dh_state.attr,
&dev_attr_access_state.attr,
scsi_autopm_get_device(sdev);
- error = scsi_dh_add_device(sdev);
- if (error)
- /*
- * device_handler is optional, so any error can be ignored
- */
- sdev_printk(KERN_INFO, sdev,
- "failed to add device handler: %d\n", error);
+ scsi_dh_add_device(sdev);
error = device_add(&sdev->sdev_gendev);
if (error) {
spin_lock_irqsave(shost->host_lock, flags);
restart:
list_for_each_entry(sdev, &shost->__devices, siblings) {
+ /*
+ * We cannot call scsi_device_get() here, as
+ * we might've been called from rmmod() causing
+ * scsi_device_get() to fail the module_is_live()
+ * check.
+ */
if (sdev->channel != starget->channel ||
sdev->id != starget->id ||
- scsi_device_get(sdev))
+ !get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
- scsi_device_put(sdev);
+ put_device(&sdev->sdev_gendev);
spin_lock_irqsave(shost->host_lock, flags);
goto restart;
}
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
+ { FC_PORTSPEED_64BIT, "64 Gbit" },
+ { FC_PORTSPEED_128BIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
list_for_each_entry(rport, &fc_host->rports, peers) {
- if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
+ if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
+ rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
(rport->channel == channel)) {
switch (fc_host->tgtid_bind_type) {
memcpy(&rport->port_name, &ids->port_name,
sizeof(rport->port_name));
rport->port_id = ids->port_id;
- rport->roles = ids->roles;
rport->port_state = FC_PORTSTATE_ONLINE;
rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
fci->f->dd_fcrport_size);
spin_unlock_irqrestore(shost->host_lock, flags);
- if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
- scsi_target_unblock(&rport->dev, SDEV_RUNNING);
-
- /* initiate a scan of the target */
- spin_lock_irqsave(shost->host_lock, flags);
- rport->flags |= FC_RPORT_SCAN_PENDING;
- scsi_queue_work(shost, &rport->scan_work);
- spin_unlock_irqrestore(shost->host_lock, flags);
- }
+ fc_remote_port_rolechg(rport, ids->roles);
return rport;
}
}
{
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ if (WARN_ON_ONCE(!rport))
+ return FAST_IO_FAIL;
+
return fc_block_rport(rport);
}
EXPORT_SYMBOL(fc_block_scsi_eh);
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct bsg_job *job = (void *) req->special;
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
}
EXPORT_SYMBOL_GPL(iscsi_free_session);
-/**
- * iscsi_destroy_session - destroy iscsi session
- * @session: iscsi_session
- *
- * Can be called by a LLD or iscsi_transport. There must not be
- * any running connections.
- */
-int iscsi_destroy_session(struct iscsi_cls_session *session)
-{
- iscsi_remove_session(session);
- ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
- iscsi_free_session(session);
- return 0;
-}
-EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-
/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
shost = scsi_host_lookup(ev->u.get_host_stats.host_no);
if (!shost) {
- pr_err("%s: failed. Cound not find host no %u\n",
+ pr_err("%s: failed. Could not find host no %u\n",
__func__, ev->u.get_host_stats.host_no);
return -ENODEV;
}
uint32_t group;
nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) ||
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ bool v;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- sdp->manage_start_stop = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_start_stop = v;
return count;
}
allow_restart_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ bool v;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
return -EINVAL;
- sdp->allow_restart = simple_strtoul(buf, NULL, 10);
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->allow_restart = v;
return count;
}
break;
case SD_LBP_WS16:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS16_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS10:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS10_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
break;
case SD_LBP_ZERO:
else
sdkp->zeroing_mode = SD_ZERO_WRITE;
+ if (sdkp->max_ws_blocks &&
+ sdkp->physical_block_size > logical_block_size) {
+ /*
+ * Reporting a maximum number of blocks that is not aligned
+ * on the device physical size would cause a large write same
+ * request to be split into physically unaligned chunks by
+ * __blkdev_issue_write_zeroes() and __blkdev_issue_write_same()
+ * even if the caller of these functions took care to align the
+ * large request. So make sure the maximum reported is aligned
+ * to the device physical block size. This is only an optional
+ * optimization for regular disks, but this is mandatory to
+ * avoid failure of large write same requests directed at
+ * sequential write required zones of host-managed ZBC disks.
+ */
+ sdkp->max_ws_blocks =
+ round_down(sdkp->max_ws_blocks,
+ bytes_to_logical(sdkp->device,
+ sdkp->physical_block_size));
+ }
+
out:
blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks *
(logical_block_size >> 9));
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
- else if (sdkp->lbpu && sdkp->max_unmap_blocks)
- sd_config_discard(sdkp, SD_LBP_UNMAP);
else
sd_config_discard(sdkp, SD_LBP_DISABLE);
}
sd_read_security(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
#define BUILD_TIMESTAMP
#endif
- #define DRIVER_VERSION "1.1.2-125"
+ #define DRIVER_VERSION "1.1.2-126"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
#define DRIVER_RELEASE 2
- #define DRIVER_REVISION 125
+ #define DRIVER_REVISION 126
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
bad_raid_map:
dev_warn(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d %s\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun, err_msg);
+ "logical device %08x%08x %s\n",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]), err_msg);
return -EINVAL;
}
#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
-static void pqi_heartbeat_timer_handler(unsigned long data)
+static void pqi_heartbeat_timer_handler(struct timer_list *t)
{
int num_interrupts;
u32 heartbeat_count;
- struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+ struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t,
+ heartbeat_timer);
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info))
ctrl_info->heartbeat_timer.expires =
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
- ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
- ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
add_timer(&ctrl_info->heartbeat_timer);
}
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
- init_timer(&ctrl_info->heartbeat_timer);
+ timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0);
INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
sema_init(&ctrl_info->sync_request_sem,
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1301)
},
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1302)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x1303)
+ },
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x1380)
bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
? true : false;
+ if (is_rate_B)
+ phy_set_mode(phy, PHY_MODE_UFS_HS_B);
+
/* Assert PHY reset and apply PHY calibration values */
ufs_qcom_assert_reset(hba);
/* provide 1ms delay to let the reset pulse propagate */
usleep_range(1000, 1100);
- ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
-
+ /* phy initialization - calibrate the phy */
+ ret = phy_init(phy);
if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
goto out;
}
* voltage, current to settle down before starting serdes.
*/
usleep_range(1000, 1100);
- ret = ufs_qcom_phy_start_serdes(phy);
+
+ /* power on phy - start serdes and phy's power and clocks */
+ ret = phy_power_on(phy);
if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
__func__, ret);
- goto out;
+ goto out_disable_phy;
}
- ret = ufs_qcom_phy_is_pcs_ready(phy);
- if (ret)
- dev_err(hba->dev,
- "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
- __func__, ret);
-
ufs_qcom_select_unipro_mode(host);
+ return 0;
+
+out_disable_phy:
+ ufs_qcom_assert_reset(hba);
+ phy_exit(phy);
out:
return ret;
}
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
- phy_init(host->generic_phy);
- err = phy_power_on(host->generic_phy);
- if (err)
- goto out_unregister_bus;
-
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_variant_clear;
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
goto out;
-out_disable_phy:
- phy_power_off(host->generic_phy);
-out_unregister_bus:
- phy_exit(host->generic_phy);
out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UFS_BIT(17);
+ reg |= UTP_DBG_RAMS_EN;
ufshcd_writel(hba, reg, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
/* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
* so when updating/adding events here, please also
* update the other file too.
*/
- enum ha_event {
- HAE_RESET = 0U,
- HA_NUM_EVENTS = 1,
- };
-
enum port_event {
PORTE_BYTES_DMAED = 0U,
- PORTE_BROADCAST_RCVD = 1,
- PORTE_LINK_RESET_ERR = 2,
- PORTE_TIMER_EVENT = 3,
- PORTE_HARD_RESET = 4,
- PORT_NUM_EVENTS = 5,
+ PORTE_BROADCAST_RCVD,
+ PORTE_LINK_RESET_ERR,
+ PORTE_TIMER_EVENT,
+ PORTE_HARD_RESET,
+ PORT_NUM_EVENTS,
};
enum phy_event {
PHYE_LOSS_OF_SIGNAL = 0U,
- PHYE_OOB_DONE = 1,
- PHYE_OOB_ERROR = 2,
- PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */
- PHYE_RESUME_TIMEOUT = 4,
- PHY_NUM_EVENTS = 5,
+ PHYE_OOB_DONE,
+ PHYE_OOB_ERROR,
+ PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
+ PHYE_RESUME_TIMEOUT,
+ PHY_NUM_EVENTS,
};
enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
- DISCE_REVALIDATE_DOMAIN = 1,
- DISCE_PORT_GONE = 2,
- DISCE_PROBE = 3,
- DISCE_SUSPEND = 4,
- DISCE_RESUME = 5,
- DISCE_DESTRUCT = 6,
- DISC_NUM_EVENTS = 7,
+ DISCE_REVALIDATE_DOMAIN,
+ DISCE_PROBE,
+ DISCE_SUSPEND,
+ DISCE_RESUME,
+ DISCE_DESTRUCT,
+ DISC_NUM_EVENTS,
};
/* ---------- Expander Devices ---------- */
/* The port struct is Class:RW, driver:RO */
struct asd_sas_port {
/* private: */
- struct completion port_gone_completion;
-
struct sas_discovery disc;
struct domain_device *port_dev;
spinlock_t dev_list_lock;
};
- struct sas_ha_event {
- struct sas_work work;
- struct sas_ha_struct *ha;
- };
-
- static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work)
- {
- struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work);
-
- return ev;
- }
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
struct sas_ha_struct {
/* private: */
- struct sas_ha_event ha_events[HA_NUM_EVENTS];
- unsigned long pending;
-
struct list_head defer_q; /* work queued while draining */
struct mutex drain_mutex;
unsigned long state;
* their siblings when forming wide ports */
/* LLDD calls these to notify the class of an event. */
- int (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
*/
struct timer_list timer;
struct completion completion;
+ struct sas_task *task;
};
#define SAS_TASK_STATE_PENDING 1
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_SCSI_DEVICE_H
#define _SCSI_SCSI_DEVICE_H
SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
+ SDEV_EVT_POWER_ON_RESET_OCCURRED, /* 29 00 UA reported */
SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
- SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
+ SDEV_EVT_LAST = SDEV_EVT_POWER_ON_RESET_OCCURRED,
SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
};
unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
unsigned broken_fua:1; /* Don't set FUA bit */
unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
+ unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
unsigned char access_state;
struct mutex state_mutex;
enum scsi_device_state sdev_state;
+ struct task_struct *quiesced_by;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _SCSI_SCSI_DEVINFO_H
#define _SCSI_SCSI_DEVINFO_H
/*
* Flags for SCSI devices that need special treatment
*/
- #define BLIST_NOLUN 0x001 /* Only scan LUN 0 */
- #define BLIST_FORCELUN 0x002 /* Known to have LUNs, force scanning,
- deprecated: Use max_luns=N */
- #define BLIST_BORKEN 0x004 /* Flag for broken handshaking */
- #define BLIST_KEY 0x008 /* unlock by special command */
- #define BLIST_SINGLELUN 0x010 /* Do not use LUNs in parallel */
- #define BLIST_NOTQ 0x020 /* Buggy Tagged Command Queuing */
- #define BLIST_SPARSELUN 0x040 /* Non consecutive LUN numbering */
- #define BLIST_MAX5LUN 0x080 /* Avoid LUNS >= 5 */
- #define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */
- #define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */
- #define BLIST_INQUIRY_36 0x400 /* override additional length field */
- #define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */
- #define BLIST_REPORTLUN2 0x20000 /* try REPORT_LUNS even for SCSI-2 devs
- (if HBA supports more than 8 LUNs) */
- #define BLIST_NOREPORTLUN 0x40000 /* don't try REPORT_LUNS scan (SCSI-3 devs) */
- #define BLIST_NOT_LOCKABLE 0x80000 /* don't use PREVENT-ALLOW commands */
- #define BLIST_NO_ULD_ATTACH 0x100000 /* device is actually for RAID config */
- #define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */
- #define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
- #define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
- #define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
- #define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
- #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
- #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
- #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
- #define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */
+
+ /* Only scan LUN 0 */
+ #define BLIST_NOLUN ((__force __u32 __bitwise)(1 << 0))
+ /* Known to have LUNs, force scanning.
+ * DEPRECATED: Use max_luns=N */
+ #define BLIST_FORCELUN ((__force __u32 __bitwise)(1 << 1))
+ /* Flag for broken handshaking */
+ #define BLIST_BORKEN ((__force __u32 __bitwise)(1 << 2))
+ /* unlock by special command */
+ #define BLIST_KEY ((__force __u32 __bitwise)(1 << 3))
+ /* Do not use LUNs in parallel */
+ #define BLIST_SINGLELUN ((__force __u32 __bitwise)(1 << 4))
+ /* Buggy Tagged Command Queuing */
+ #define BLIST_NOTQ ((__force __u32 __bitwise)(1 << 5))
+ /* Non consecutive LUN numbering */
+ #define BLIST_SPARSELUN ((__force __u32 __bitwise)(1 << 6))
+ /* Avoid LUNS >= 5 */
+ #define BLIST_MAX5LUN ((__force __u32 __bitwise)(1 << 7))
+ /* Treat as (removable) CD-ROM */
+ #define BLIST_ISROM ((__force __u32 __bitwise)(1 << 8))
+ /* LUNs past 7 on a SCSI-2 device */
+ #define BLIST_LARGELUN ((__force __u32 __bitwise)(1 << 9))
+ /* override additional length field */
+ #define BLIST_INQUIRY_36 ((__force __u32 __bitwise)(1 << 10))
+ /* do not do automatic start on add */
+ #define BLIST_NOSTARTONADD ((__force __u32 __bitwise)(1 << 12))
+ /* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
+ #define BLIST_REPORTLUN2 ((__force __u32 __bitwise)(1 << 17))
+ /* don't try REPORT_LUNS scan (SCSI-3 devs) */
+ #define BLIST_NOREPORTLUN ((__force __u32 __bitwise)(1 << 18))
+ /* don't use PREVENT-ALLOW commands */
+ #define BLIST_NOT_LOCKABLE ((__force __u32 __bitwise)(1 << 19))
+ /* device is actually for RAID config */
+ #define BLIST_NO_ULD_ATTACH ((__force __u32 __bitwise)(1 << 20))
+ /* select without ATN */
+ #define BLIST_SELECT_NO_ATN ((__force __u32 __bitwise)(1 << 21))
+ /* retry HARDWARE_ERROR */
+ #define BLIST_RETRY_HWERROR ((__force __u32 __bitwise)(1 << 22))
+ /* maximum 512 sector cdb length */
+ #define BLIST_MAX_512 ((__force __u32 __bitwise)(1 << 23))
+ /* Disable T10 PI (DIF) */
+ #define BLIST_NO_DIF ((__force __u32 __bitwise)(1 << 25))
+ /* Ignore SBC-3 VPD pages */
+ #define BLIST_SKIP_VPD_PAGES ((__force __u32 __bitwise)(1 << 26))
+ /* Attempt to read VPD pages */
+ #define BLIST_TRY_VPD_PAGES ((__force __u32 __bitwise)(1 << 28))
+ /* don't try to issue RSOC */
+ #define BLIST_NO_RSOC ((__force __u32 __bitwise)(1 << 29))
+ /* maximum 1024 sector cdb length */
+ #define BLIST_MAX_1024 ((__force __u32 __bitwise)(1 << 30))
++/* Use UNMAP limit for WRITE SAME */
++#define BLIST_UNMAP_LIMIT_WS ((__force __u32 __bitwise)(1 << 31))
#endif
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This header file contains public constants and structures used by
* both the SCSI initiator and the SCSI target code.
/* Reporting options for REPORT ZONES */
enum zbc_zone_reporting_options {
- ZBC_ZONE_REPORTING_OPTION_ALL = 0,
- ZBC_ZONE_REPORTING_OPTION_EMPTY,
- ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN,
- ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN,
- ZBC_ZONE_REPORTING_OPTION_CLOSED,
- ZBC_ZONE_REPORTING_OPTION_FULL,
- ZBC_ZONE_REPORTING_OPTION_READONLY,
- ZBC_ZONE_REPORTING_OPTION_OFFLINE,
- ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
- ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE,
- ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
+ ZBC_ZONE_REPORTING_OPTION_ALL = 0x00,
+ ZBC_ZONE_REPORTING_OPTION_EMPTY = 0x01,
+ ZBC_ZONE_REPORTING_OPTION_IMPLICIT_OPEN = 0x02,
+ ZBC_ZONE_REPORTING_OPTION_EXPLICIT_OPEN = 0x03,
+ ZBC_ZONE_REPORTING_OPTION_CLOSED = 0x04,
+ ZBC_ZONE_REPORTING_OPTION_FULL = 0x05,
+ ZBC_ZONE_REPORTING_OPTION_READONLY = 0x06,
+ ZBC_ZONE_REPORTING_OPTION_OFFLINE = 0x07,
+ /* 0x08 to 0x0f are reserved */
+ ZBC_ZONE_REPORTING_OPTION_NEED_RESET_WP = 0x10,
+ ZBC_ZONE_REPORTING_OPTION_NON_SEQWRITE = 0x11,
+ /* 0x12 to 0x3e are reserved */
+ ZBC_ZONE_REPORTING_OPTION_NON_WP = 0x3f,
};
#define ZBC_REPORT_ZONE_PARTIAL 0x80
+ /* Zone types of REPORT ZONES zone descriptors */
+ enum zbc_zone_type {
+ ZBC_ZONE_TYPE_CONV = 0x1,
+ ZBC_ZONE_TYPE_SEQWRITE_REQ = 0x2,
+ ZBC_ZONE_TYPE_SEQWRITE_PREF = 0x3,
+ /* 0x4 to 0xf are reserved */
+ };
+
+ /* Zone conditions of REPORT ZONES zone descriptors */
+ enum zbc_zone_cond {
+ ZBC_ZONE_COND_NO_WP = 0x0,
+ ZBC_ZONE_COND_EMPTY = 0x1,
+ ZBC_ZONE_COND_IMP_OPEN = 0x2,
+ ZBC_ZONE_COND_EXP_OPEN = 0x3,
+ ZBC_ZONE_COND_CLOSED = 0x4,
+ /* 0x5 to 0xc are reserved */
+ ZBC_ZONE_COND_READONLY = 0xd,
+ ZBC_ZONE_COND_FULL = 0xe,
+ ZBC_ZONE_COND_OFFLINE = 0xf,
+ };
+
#endif /* _SCSI_PROTO_H_ */