2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include <linux/delay.h>
9 #include <linux/ktime.h>
10 #include <linux/pci.h>
11 #include <linux/ratelimit.h>
12 #include <linux/vmalloc.h>
13 #include <scsi/scsi_tcq.h>
14 #include <linux/utsname.h>
17 /* QLAFX00 specific Mailbox implementation functions */
20 * qlafx00_mailbox_command
21 * Issue mailbox command and waits for completion.
24 * ha = adapter block pointer.
25 * mcp = driver internal mbx struct pointer.
28 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
31 * 0 : QLA_SUCCESS = cmd performed success
32 * 1 : QLA_FUNCTION_FAILED (error encountered)
33 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
39 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
43 unsigned long flags = 0;
52 unsigned long wait_time;
53 struct qla_hw_data *ha = vha->hw;
54 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
56 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
57 ql_log(ql_log_warn, vha, 0x115c,
58 "PCI channel failed permanently, exiting.\n");
59 return QLA_FUNCTION_TIMEOUT;
62 if (vha->device_flags & DFLG_DEV_FAILED) {
63 ql_log(ql_log_warn, vha, 0x115f,
64 "Device in failed state, exiting.\n");
65 return QLA_FUNCTION_TIMEOUT;
69 io_lock_on = base_vha->flags.init_done;
72 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
74 if (ha->flags.pci_channel_io_perm_failure) {
75 ql_log(ql_log_warn, vha, 0x1175,
76 "Perm failure on EEH timeout MBX, exiting.\n");
77 return QLA_FUNCTION_TIMEOUT;
80 if (ha->flags.isp82xx_fw_hung) {
81 /* Setting Link-Down error */
82 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
83 ql_log(ql_log_warn, vha, 0x1176,
84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
85 rval = QLA_FUNCTION_FAILED;
90 * Wait for active mailbox commands to finish by waiting at most tov
91 * seconds. This is to serialize actual issuing of mailbox cmds during
94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
95 /* Timeout occurred. Return error. */
96 ql_log(ql_log_warn, vha, 0x1177,
97 "Cmd access timeout, cmd=0x%x, Exiting.\n",
99 return QLA_FUNCTION_TIMEOUT;
102 ha->flags.mbox_busy = 1;
103 /* Save mailbox command for debug */
106 ql_dbg(ql_dbg_mbx, vha, 0x1178,
107 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
109 spin_lock_irqsave(&ha->hardware_lock, flags);
111 /* Load mailbox registers. */
112 optr = ®->ispfx00.mailbox0;
115 command = mcp->mb[0];
116 mboxes = mcp->out_mb;
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
120 wrt_reg_dword(optr, *iptr);
127 /* Issue set host interrupt command to send cmd out. */
128 ha->flags.mbox_int = 0;
129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
131 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
132 (uint8_t *)mcp->mb, 16);
133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
134 ((uint8_t *)mcp->mb + 0x10), 16);
135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
136 ((uint8_t *)mcp->mb + 0x20), 8);
138 /* Unlock mbx registers and wait for interrupt */
139 ql_dbg(ql_dbg_mbx, vha, 0x1179,
140 "Going to unlock irq & waiting for interrupts. "
141 "jiffies=%lx.\n", jiffies);
143 /* Wait for mbx cmd completion until timeout */
144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
150 WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
151 mcp->tov * HZ) != 0);
153 ql_dbg(ql_dbg_mbx, vha, 0x112c,
154 "Cmd=%x Polling Mode.\n", command);
156 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
157 spin_unlock_irqrestore(&ha->hardware_lock, flags);
159 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
160 while (!ha->flags.mbox_int) {
161 if (time_after(jiffies, wait_time))
164 /* Check for pending interrupts. */
165 qla2x00_poll(ha->rsp_q_map[0]);
167 if (!ha->flags.mbox_int &&
169 command == MBC_LOAD_RISC_RAM_EXTENDED))
170 usleep_range(10000, 11000);
172 ql_dbg(ql_dbg_mbx, vha, 0x112d,
174 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
177 /* Check whether we timed out */
178 if (ha->flags.mbox_int) {
181 ql_dbg(ql_dbg_mbx, vha, 0x112e,
182 "Cmd=%x completed.\n", command);
184 /* Got interrupt. Clear the flag. */
185 ha->flags.mbox_int = 0;
186 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
188 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
189 rval = QLA_FUNCTION_FAILED;
191 /* Load return mailbox registers. */
193 iptr = (uint32_t *)&ha->mailbox_out32[0];
195 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
205 rval = QLA_FUNCTION_TIMEOUT;
208 ha->flags.mbox_busy = 0;
213 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
214 ql_dbg(ql_dbg_mbx, vha, 0x113a,
215 "checking for additional resp interrupt.\n");
217 /* polling mode for non isp_abort commands. */
218 qla2x00_poll(ha->rsp_q_map[0]);
221 if (rval == QLA_FUNCTION_TIMEOUT &&
222 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
223 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
224 ha->flags.eeh_busy) {
225 /* not in dpc. schedule it for dpc to take over. */
226 ql_dbg(ql_dbg_mbx, vha, 0x115d,
227 "Timeout, schedule isp_abort_needed.\n");
229 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
230 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
231 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
233 ql_log(ql_log_info, base_vha, 0x115e,
234 "Mailbox cmd timeout occurred, cmd=0x%x, "
235 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
236 "abort.\n", command, mcp->mb[0],
238 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
239 qla2xxx_wake_dpc(vha);
241 } else if (!abort_active) {
242 /* call abort directly since we are in the DPC thread */
243 ql_dbg(ql_dbg_mbx, vha, 0x1160,
244 "Timeout, calling abort_isp.\n");
246 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
247 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
248 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
250 ql_log(ql_log_info, base_vha, 0x1161,
251 "Mailbox cmd timeout occurred, cmd=0x%x, "
252 "mb[0]=0x%x. Scheduling ISP abort ",
253 command, mcp->mb[0]);
255 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
256 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
257 if (ha->isp_ops->abort_isp(vha)) {
258 /* Failed. retry later. */
259 set_bit(ISP_ABORT_NEEDED,
262 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
263 ql_dbg(ql_dbg_mbx, vha, 0x1162,
264 "Finished abort_isp.\n");
270 /* Allow next mbx cmd to come in. */
271 complete(&ha->mbx_cmd_comp);
274 ql_log(ql_log_warn, base_vha, 0x1163,
275 "**** Failed=%x mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
276 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
279 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
286 * qlafx00_driver_shutdown
287 * Indicate a driver shutdown to firmware.
290 * ha = adapter block pointer.
293 * local function return status code.
299 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
302 struct mbx_cmd_32 mc;
303 struct mbx_cmd_32 *mcp = &mc;
305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
306 "Entered %s.\n", __func__);
308 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
314 mcp->tov = MBX_TOV_SECONDS;
316 rval = qlafx00_mailbox_command(vha, mcp);
318 if (rval != QLA_SUCCESS) {
319 ql_dbg(ql_dbg_mbx, vha, 0x1167,
320 "Failed=%x.\n", rval);
322 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
323 "Done %s.\n", __func__);
330 * qlafx00_get_firmware_state
331 * Get adapter firmware state.
334 * ha = adapter block pointer.
335 * TARGET_QUEUE_LOCK must be released.
336 * ADAPTER_STATE_LOCK must be released.
339 * qla7xxx local function return status code.
345 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
348 struct mbx_cmd_32 mc;
349 struct mbx_cmd_32 *mcp = &mc;
351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
352 "Entered %s.\n", __func__);
354 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
356 mcp->in_mb = MBX_1|MBX_0;
357 mcp->tov = MBX_TOV_SECONDS;
359 rval = qlafx00_mailbox_command(vha, mcp);
361 /* Return firmware states. */
362 states[0] = mcp->mb[1];
364 if (rval != QLA_SUCCESS) {
365 ql_dbg(ql_dbg_mbx, vha, 0x116a,
366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
369 "Done %s.\n", __func__);
375 * qlafx00_init_firmware
376 * Initialize adapter firmware.
379 * ha = adapter block pointer.
380 * dptr = Initialization control block pointer.
381 * size = size of initialization control block.
382 * TARGET_QUEUE_LOCK must be released.
383 * ADAPTER_STATE_LOCK must be released.
386 * qlafx00 local function return status code.
392 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
395 struct mbx_cmd_32 mc;
396 struct mbx_cmd_32 *mcp = &mc;
397 struct qla_hw_data *ha = vha->hw;
399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
400 "Entered %s.\n", __func__);
402 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
405 mcp->mb[2] = MSD(ha->init_cb_dma);
406 mcp->mb[3] = LSD(ha->init_cb_dma);
408 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
410 mcp->buf_size = size;
411 mcp->flags = MBX_DMA_OUT;
412 mcp->tov = MBX_TOV_SECONDS;
413 rval = qlafx00_mailbox_command(vha, mcp);
415 if (rval != QLA_SUCCESS) {
416 ql_dbg(ql_dbg_mbx, vha, 0x116d,
417 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
420 "Done %s.\n", __func__);
426 * qlafx00_mbx_reg_test
429 qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
432 struct mbx_cmd_32 mc;
433 struct mbx_cmd_32 *mcp = &mc;
435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
436 "Entered %s.\n", __func__);
439 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
449 mcp->mb[10] = 0xBB66;
450 mcp->mb[11] = 0x66BB;
451 mcp->mb[12] = 0xB6B6;
452 mcp->mb[13] = 0x6B6B;
453 mcp->mb[14] = 0x3636;
454 mcp->mb[15] = 0xCCCC;
457 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
458 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
459 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
460 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
462 mcp->flags = MBX_DMA_OUT;
463 mcp->tov = MBX_TOV_SECONDS;
464 rval = qlafx00_mailbox_command(vha, mcp);
465 if (rval == QLA_SUCCESS) {
466 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
467 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
468 rval = QLA_FUNCTION_FAILED;
469 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
470 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
471 rval = QLA_FUNCTION_FAILED;
472 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
473 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
474 rval = QLA_FUNCTION_FAILED;
475 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
476 mcp->mb[31] != 0xCCCC)
477 rval = QLA_FUNCTION_FAILED;
480 if (rval != QLA_SUCCESS) {
481 ql_dbg(ql_dbg_mbx, vha, 0x1170,
482 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
485 "Done %s.\n", __func__);
491 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
494 * Returns 0 on success.
497 qlafx00_pci_config(scsi_qla_host_t *vha)
500 struct qla_hw_data *ha = vha->hw;
502 pci_set_master(ha->pdev);
503 pci_try_set_mwi(ha->pdev);
505 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
506 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
507 w &= ~PCI_COMMAND_INTX_DISABLE;
508 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
510 /* PCIe -- adjust Maximum Read Request Size (2048). */
511 if (pci_is_pcie(ha->pdev))
512 pcie_set_readrq(ha->pdev, 2048);
514 ha->chip_revision = ha->pdev->revision;
520 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
525 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
527 unsigned long flags = 0;
528 struct qla_hw_data *ha = vha->hw;
533 spin_lock_irqsave(&ha->hardware_lock, flags);
535 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
536 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
538 /* stop the XOR DMA engines */
539 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
540 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
541 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
542 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
544 /* stop the IDMA engines */
545 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
547 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
549 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
551 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
553 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
555 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
557 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
559 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
561 for (i = 0; i < 100000; i++) {
562 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
563 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
568 /* Set all 4 cores in reset */
569 for (i = 0; i < 4; i++) {
570 QLAFX00_SET_HBA_SOC_REG(ha,
571 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
572 QLAFX00_SET_HBA_SOC_REG(ha,
573 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
576 /* Reset all units in Fabric */
577 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
580 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
581 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
583 /* Set all 4 core Memory Power Down Registers */
584 for (i = 0; i < 5; i++) {
585 QLAFX00_SET_HBA_SOC_REG(ha,
586 (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
589 /* Reset all interrupt control registers */
590 for (i = 0; i < 115; i++) {
591 QLAFX00_SET_HBA_SOC_REG(ha,
592 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
595 /* Reset Timers control registers. per core */
596 for (core = 0; core < 4; core++)
597 for (i = 0; i < 8; i++)
598 QLAFX00_SET_HBA_SOC_REG(ha,
599 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
601 /* Reset per core IRQ ack register */
602 for (core = 0; core < 4; core++)
603 QLAFX00_SET_HBA_SOC_REG(ha,
604 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
606 /* Set Fabric control and config to defaults */
607 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
608 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
610 /* Kick in Fabric units */
611 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
613 /* Kick in Core0 to start boot process */
614 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
616 spin_unlock_irqrestore(&ha->hardware_lock, flags);
618 /* Wait 10secs for soft-reset to complete. */
619 for (cnt = 10; cnt; cnt--) {
626 * qlafx00_soft_reset() - Soft Reset ISPFx00.
629 * Returns 0 on success.
632 qlafx00_soft_reset(scsi_qla_host_t *vha)
634 struct qla_hw_data *ha = vha->hw;
635 int rval = QLA_FUNCTION_FAILED;
637 if (unlikely(pci_channel_offline(ha->pdev) &&
638 ha->flags.pci_channel_io_perm_failure))
641 ha->isp_ops->disable_intrs(ha);
642 qlafx00_soc_cpu_reset(vha);
648 * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
651 * Returns 0 on success.
654 qlafx00_chip_diag(scsi_qla_host_t *vha)
657 struct qla_hw_data *ha = vha->hw;
658 struct req_que *req = ha->req_q_map[0];
660 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
662 rval = qlafx00_mbx_reg_test(vha);
664 ql_log(ql_log_warn, vha, 0x1165,
665 "Failed mailbox send register test\n");
667 /* Flag a successful rval */
674 qlafx00_config_rings(struct scsi_qla_host *vha)
676 struct qla_hw_data *ha = vha->hw;
677 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
679 wrt_reg_dword(®->req_q_in, 0);
680 wrt_reg_dword(®->req_q_out, 0);
682 wrt_reg_dword(®->rsp_q_in, 0);
683 wrt_reg_dword(®->rsp_q_out, 0);
686 rd_reg_dword(®->rsp_q_out);
690 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
692 struct qla_hw_data *ha = vha->hw;
694 if (pci_is_pcie(ha->pdev))
695 strlcpy(str, "PCIe iSA", str_len);
700 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
702 struct qla_hw_data *ha = vha->hw;
704 snprintf(str, size, "%s", ha->mr.fw_version);
709 qlafx00_enable_intrs(struct qla_hw_data *ha)
711 unsigned long flags = 0;
713 spin_lock_irqsave(&ha->hardware_lock, flags);
714 ha->interrupts_on = 1;
715 QLAFX00_ENABLE_ICNTRL_REG(ha);
716 spin_unlock_irqrestore(&ha->hardware_lock, flags);
720 qlafx00_disable_intrs(struct qla_hw_data *ha)
722 unsigned long flags = 0;
724 spin_lock_irqsave(&ha->hardware_lock, flags);
725 ha->interrupts_on = 0;
726 QLAFX00_DISABLE_ICNTRL_REG(ha);
727 spin_unlock_irqrestore(&ha->hardware_lock, flags);
731 qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag)
733 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
737 qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
739 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
743 qlafx00_loop_reset(scsi_qla_host_t *vha)
746 struct fc_port *fcport;
747 struct qla_hw_data *ha = vha->hw;
749 if (ql2xtargetreset) {
750 list_for_each_entry(fcport, &vha->vp_fcports, list) {
751 if (fcport->port_type != FCT_TARGET)
754 ret = ha->isp_ops->target_reset(fcport, 0, 0);
755 if (ret != QLA_SUCCESS) {
756 ql_dbg(ql_dbg_taskm, vha, 0x803d,
757 "Bus Reset failed: Reset=%d "
758 "d_id=%x.\n", ret, fcport->d_id.b24);
766 qlafx00_iospace_config(struct qla_hw_data *ha)
768 if (pci_request_selected_regions(ha->pdev, ha->bars,
769 QLA2XXX_DRIVER_NAME)) {
770 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
771 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
773 goto iospace_error_exit;
776 /* Use MMIO operations for all accesses. */
777 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
778 ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
779 "Invalid pci I/O region size (%s).\n",
781 goto iospace_error_exit;
783 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
784 ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
785 "Invalid PCI mem BAR0 region size (%s), aborting\n",
787 goto iospace_error_exit;
791 ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
793 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
794 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
795 goto iospace_error_exit;
798 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
799 ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
800 "region #2 not an MMIO resource (%s), aborting\n",
802 goto iospace_error_exit;
804 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
805 ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
806 "Invalid PCI mem BAR2 region size (%s), aborting\n",
808 goto iospace_error_exit;
812 ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
814 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
815 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
816 goto iospace_error_exit;
819 /* Determine queue resources */
820 ha->max_req_queues = ha->max_rsp_queues = 1;
822 ql_log_pci(ql_log_info, ha->pdev, 0x012c,
823 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
824 ha->bars, ha->cregbase, ha->iobase);
833 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
835 struct qla_hw_data *ha = vha->hw;
836 struct req_que *req = ha->req_q_map[0];
837 struct rsp_que *rsp = ha->rsp_q_map[0];
839 req->length_fx00 = req->length;
840 req->ring_fx00 = req->ring;
841 req->dma_fx00 = req->dma;
843 rsp->length_fx00 = rsp->length;
844 rsp->ring_fx00 = rsp->ring;
845 rsp->dma_fx00 = rsp->dma;
847 ql_dbg(ql_dbg_init, vha, 0x012d,
848 "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
849 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
850 req->length_fx00, (u64)req->dma_fx00);
852 ql_dbg(ql_dbg_init, vha, 0x012e,
853 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
854 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
855 rsp->length_fx00, (u64)rsp->dma_fx00);
859 qlafx00_config_queues(struct scsi_qla_host *vha)
861 struct qla_hw_data *ha = vha->hw;
862 struct req_que *req = ha->req_q_map[0];
863 struct rsp_que *rsp = ha->rsp_q_map[0];
864 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
866 req->length = ha->req_que_len;
867 req->ring = (void __force *)ha->iobase + ha->req_que_off;
868 req->dma = bar2_hdl + ha->req_que_off;
869 if ((!req->ring) || (req->length == 0)) {
870 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
871 "Unable to allocate memory for req_ring\n");
872 return QLA_FUNCTION_FAILED;
875 ql_dbg(ql_dbg_init, vha, 0x0130,
876 "req: %p req_ring pointer %p req len 0x%x "
877 "req off 0x%x\n, req->dma: 0x%llx",
878 req, req->ring, req->length,
879 ha->req_que_off, (u64)req->dma);
881 rsp->length = ha->rsp_que_len;
882 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
883 rsp->dma = bar2_hdl + ha->rsp_que_off;
884 if ((!rsp->ring) || (rsp->length == 0)) {
885 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
886 "Unable to allocate memory for rsp_ring\n");
887 return QLA_FUNCTION_FAILED;
890 ql_dbg(ql_dbg_init, vha, 0x0132,
891 "rsp: %p rsp_ring pointer %p rsp len 0x%x "
892 "rsp off 0x%x, rsp->dma: 0x%llx\n",
893 rsp, rsp->ring, rsp->length,
894 ha->rsp_que_off, (u64)rsp->dma);
900 qlafx00_init_fw_ready(scsi_qla_host_t *vha)
904 uint16_t wait_time; /* Wait time */
905 struct qla_hw_data *ha = vha->hw;
906 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
907 uint32_t aenmbx, aenmbx7 = 0;
912 /* 30 seconds wait - Adjust if required */
915 pseudo_aen = rd_reg_dword(®->pseudoaen);
916 if (pseudo_aen == 1) {
917 aenmbx7 = rd_reg_dword(®->initval7);
918 ha->mbx_intr_code = MSW(aenmbx7);
919 ha->rqstq_intr_code = LSW(aenmbx7);
920 rval = qlafx00_driver_shutdown(vha, 10);
921 if (rval != QLA_SUCCESS)
922 qlafx00_soft_reset(vha);
925 /* wait time before firmware ready */
926 wtime = jiffies + (wait_time * HZ);
928 aenmbx = rd_reg_dword(®->aenmailbox0);
930 ql_dbg(ql_dbg_mbx, vha, 0x0133,
931 "aenmbx: 0x%x\n", aenmbx);
934 case MBA_FW_NOT_STARTED:
935 case MBA_FW_STARTING:
939 case MBA_REQ_TRANSFER_ERR:
940 case MBA_RSP_TRANSFER_ERR:
941 case MBA_FW_INIT_FAILURE:
942 qlafx00_soft_reset(vha);
945 case MBA_FW_RESTART_CMPLT:
946 /* Set the mbx and rqstq intr code */
947 aenmbx7 = rd_reg_dword(®->aenmailbox7);
948 ha->mbx_intr_code = MSW(aenmbx7);
949 ha->rqstq_intr_code = LSW(aenmbx7);
950 ha->req_que_off = rd_reg_dword(®->aenmailbox1);
951 ha->rsp_que_off = rd_reg_dword(®->aenmailbox3);
952 ha->req_que_len = rd_reg_dword(®->aenmailbox5);
953 ha->rsp_que_len = rd_reg_dword(®->aenmailbox6);
954 wrt_reg_dword(®->aenmailbox0, 0);
955 rd_reg_dword_relaxed(®->aenmailbox0);
956 ql_dbg(ql_dbg_init, vha, 0x0134,
957 "f/w returned mbx_intr_code: 0x%x, "
958 "rqstq_intr_code: 0x%x\n",
959 ha->mbx_intr_code, ha->rqstq_intr_code);
960 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
966 if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
969 /* If fw is apparently not ready. In order to continue,
970 * we might need to issue Mbox cmd, but the problem is
971 * that the DoorBell vector values that come with the
972 * 8060 AEN are most likely gone by now (and thus no
973 * bell would be rung on the fw side when mbox cmd is
974 * issued). We have to therefore grab the 8060 AEN
975 * shadow regs (filled in by FW when the last 8060
976 * AEN was being posted).
977 * Do the following to determine what is needed in
978 * order to get the FW ready:
979 * 1. reload the 8060 AEN values from the shadow regs
980 * 2. clear int status to get rid of possible pending
982 * 3. issue Get FW State Mbox cmd to determine fw state
983 * Set the mbx and rqstq intr code from Shadow Regs
985 aenmbx7 = rd_reg_dword(®->initval7);
986 ha->mbx_intr_code = MSW(aenmbx7);
987 ha->rqstq_intr_code = LSW(aenmbx7);
988 ha->req_que_off = rd_reg_dword(®->initval1);
989 ha->rsp_que_off = rd_reg_dword(®->initval3);
990 ha->req_que_len = rd_reg_dword(®->initval5);
991 ha->rsp_que_len = rd_reg_dword(®->initval6);
992 ql_dbg(ql_dbg_init, vha, 0x0135,
993 "f/w returned mbx_intr_code: 0x%x, "
994 "rqstq_intr_code: 0x%x\n",
995 ha->mbx_intr_code, ha->rqstq_intr_code);
996 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
998 /* Get the FW state */
999 rval = qlafx00_get_firmware_state(vha, state);
1000 if (rval != QLA_SUCCESS) {
1001 /* Retry if timer has not expired */
1005 if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
1006 /* Firmware is waiting to be
1007 * initialized by driver
1014 /* Issue driver shutdown and wait until f/w recovers.
1015 * Driver should continue to poll until 8060 AEN is
1016 * received indicating firmware recovery.
1018 ql_dbg(ql_dbg_init, vha, 0x0136,
1019 "Sending Driver shutdown fw_state 0x%x\n",
1022 rval = qlafx00_driver_shutdown(vha, 10);
1023 if (rval != QLA_SUCCESS) {
1024 rval = QLA_FUNCTION_FAILED;
1029 wtime = jiffies + (wait_time * HZ);
1034 if (time_after_eq(jiffies, wtime)) {
1035 ql_dbg(ql_dbg_init, vha, 0x0137,
1036 "Init f/w failed: aen[7]: 0x%x\n",
1037 rd_reg_dword(®->aenmailbox7));
1038 rval = QLA_FUNCTION_FAILED;
1042 /* Delay for a while */
1048 ql_dbg(ql_dbg_init, vha, 0x0138,
1049 "%s **** FAILED ****.\n", __func__);
1051 ql_dbg(ql_dbg_init, vha, 0x0139,
1052 "%s **** SUCCESS ****.\n", __func__);
1058 * qlafx00_fw_ready() - Waits for firmware ready.
1061 * Returns 0 on success.
1064 qlafx00_fw_ready(scsi_qla_host_t *vha)
1067 unsigned long wtime;
1068 uint16_t wait_time; /* Wait time if loop is coming ready */
1075 /* wait time before firmware ready */
1076 wtime = jiffies + (wait_time * HZ);
1078 /* Wait for ISP to finish init */
1079 if (!vha->flags.init_done)
1080 ql_dbg(ql_dbg_init, vha, 0x013a,
1081 "Waiting for init to complete...\n");
1084 rval = qlafx00_get_firmware_state(vha, state);
1086 if (rval == QLA_SUCCESS) {
1087 if (state[0] == FSTATE_FX00_INITIALIZED) {
1088 ql_dbg(ql_dbg_init, vha, 0x013b,
1089 "fw_state=%x\n", state[0]);
1094 rval = QLA_FUNCTION_FAILED;
1096 if (time_after_eq(jiffies, wtime))
1099 /* Delay for a while */
1102 ql_dbg(ql_dbg_init, vha, 0x013c,
1103 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1108 ql_dbg(ql_dbg_init, vha, 0x013d,
1109 "Firmware ready **** FAILED ****.\n");
1111 ql_dbg(ql_dbg_init, vha, 0x013e,
1112 "Firmware ready **** SUCCESS ****.\n");
1118 qlafx00_find_all_targets(scsi_qla_host_t *vha,
1119 struct list_head *new_fcports)
1123 fc_port_t *fcport, *new_fcport;
1125 struct qla_hw_data *ha = vha->hw;
1129 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1130 return QLA_FUNCTION_FAILED;
1132 if ((atomic_read(&vha->loop_down_timer) ||
1133 STATE_TRANSITION(vha))) {
1134 atomic_set(&vha->loop_down_timer, 0);
1135 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1136 return QLA_FUNCTION_FAILED;
1139 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
1140 "Listing Target bit map...\n");
1141 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089,
1144 /* Allocate temporary rmtport for any new rmtports discovered. */
1145 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1146 if (new_fcport == NULL)
1147 return QLA_MEMORY_ALLOC_FAILED;
1149 for_each_set_bit(tgt_id, (void *)ha->gid_list,
1150 QLAFX00_TGT_NODE_LIST_SIZE) {
1152 /* Send get target node info */
1153 new_fcport->tgt_id = tgt_id;
1154 rval = qlafx00_fx_disc(vha, new_fcport,
1155 FXDISC_GET_TGT_NODE_INFO);
1156 if (rval != QLA_SUCCESS) {
1157 ql_log(ql_log_warn, vha, 0x208a,
1158 "Target info scan failed -- assuming zero-entry "
1163 /* Locate matching device in database. */
1165 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1166 if (memcmp(new_fcport->port_name,
1167 fcport->port_name, WWN_SIZE))
1173 * If tgt_id is same and state FCS_ONLINE, nothing
1176 if (fcport->tgt_id == new_fcport->tgt_id &&
1177 atomic_read(&fcport->state) == FCS_ONLINE)
1181 * Tgt ID changed or device was marked to be updated.
1183 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
1184 "TGT-ID Change(%s): Present tgt id: "
1186 "wwnn = %llx wwpn = %llx.\n",
1187 __func__, fcport->tgt_id,
1188 atomic_read(&fcport->state),
1189 (unsigned long long)wwn_to_u64(fcport->node_name),
1190 (unsigned long long)wwn_to_u64(fcport->port_name));
1192 ql_log(ql_log_info, vha, 0x208c,
1193 "TGT-ID Announce(%s): Discovered tgt "
1194 "id 0x%x wwnn = %llx "
1195 "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
1196 (unsigned long long)
1197 wwn_to_u64(new_fcport->node_name),
1198 (unsigned long long)
1199 wwn_to_u64(new_fcport->port_name));
1201 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1202 fcport->old_tgt_id = fcport->tgt_id;
1203 fcport->tgt_id = new_fcport->tgt_id;
1204 ql_log(ql_log_info, vha, 0x208d,
1205 "TGT-ID: New fcport Added: %p\n", fcport);
1206 qla2x00_update_fcport(vha, fcport);
1208 ql_log(ql_log_info, vha, 0x208e,
1209 " Existing TGT-ID %x did not get "
1210 " offline event from firmware.\n",
1211 fcport->old_tgt_id);
1212 qla2x00_mark_device_lost(vha, fcport, 0);
1213 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1214 qla2x00_free_fcport(new_fcport);
1223 /* If device was not in our fcports list, then add it. */
1224 list_add_tail(&new_fcport->list, new_fcports);
1226 /* Allocate a new replacement fcport. */
1227 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1228 if (new_fcport == NULL)
1229 return QLA_MEMORY_ALLOC_FAILED;
1232 qla2x00_free_fcport(new_fcport);
1237 * qlafx00_configure_all_targets
1238 * Setup target devices with node ID's.
1241 * ha = adapter block pointer.
1248 qlafx00_configure_all_targets(scsi_qla_host_t *vha)
1251 fc_port_t *fcport, *rmptemp;
1252 LIST_HEAD(new_fcports);
1254 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1255 FXDISC_GET_TGT_NODE_LIST);
1256 if (rval != QLA_SUCCESS) {
1257 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1261 rval = qlafx00_find_all_targets(vha, &new_fcports);
1262 if (rval != QLA_SUCCESS) {
1263 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1268 * Delete all previous devices marked lost.
1270 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1271 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1274 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
1275 if (fcport->port_type != FCT_INITIATOR)
1276 qla2x00_mark_device_lost(vha, fcport, 0);
1281 * Add the new devices to our devices list.
1283 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1284 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1287 qla2x00_update_fcport(vha, fcport);
1288 list_move_tail(&fcport->list, &vha->vp_fcports);
1289 ql_log(ql_log_info, vha, 0x208f,
1290 "Attach new target id 0x%x wwnn = %llx "
1293 (unsigned long long)wwn_to_u64(fcport->node_name),
1294 (unsigned long long)wwn_to_u64(fcport->port_name));
1297 /* Free all new device structures not processed. */
1298 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1299 list_del(&fcport->list);
1300 qla2x00_free_fcport(fcport);
1307 * qlafx00_configure_devices
1308 * Updates Fibre Channel Device Database with what is actually on loop.
1311 * ha = adapter block pointer.
1316 * 2 = database was full and device was not configured.
1319 qlafx00_configure_devices(scsi_qla_host_t *vha)
1322 unsigned long flags;
1326 flags = vha->dpc_flags;
1328 ql_dbg(ql_dbg_disc, vha, 0x2090,
1329 "Configure devices -- dpc flags =0x%lx\n", flags);
1331 rval = qlafx00_configure_all_targets(vha);
1333 if (rval == QLA_SUCCESS) {
1334 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1335 rval = QLA_FUNCTION_FAILED;
1337 atomic_set(&vha->loop_state, LOOP_READY);
1338 ql_log(ql_log_info, vha, 0x2091,
1344 ql_dbg(ql_dbg_disc, vha, 0x2092,
1345 "%s *** FAILED ***.\n", __func__);
1347 ql_dbg(ql_dbg_disc, vha, 0x2093,
1348 "%s: exiting normally.\n", __func__);
1354 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
1356 struct qla_hw_data *ha = vha->hw;
1359 vha->flags.online = 0;
1360 ha->mr.fw_hbt_en = 0;
1363 ha->flags.chip_reset_done = 0;
1364 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1365 vha->qla_stats.total_isp_aborts++;
1366 ql_log(ql_log_info, vha, 0x013f,
1367 "Performing ISP error recovery - ha = %p.\n", ha);
1368 ha->isp_ops->reset_chip(vha);
1371 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1372 atomic_set(&vha->loop_state, LOOP_DOWN);
1373 atomic_set(&vha->loop_down_timer,
1374 QLAFX00_LOOP_DOWN_TIME);
1376 if (!atomic_read(&vha->loop_down_timer))
1377 atomic_set(&vha->loop_down_timer,
1378 QLAFX00_LOOP_DOWN_TIME);
1381 /* Clear all async request states across all VPs. */
1382 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1384 if (atomic_read(&fcport->state) == FCS_ONLINE)
1385 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1388 if (!ha->flags.eeh_busy) {
1390 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1392 /* Requeue all commands in outstanding command list. */
1393 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1397 qla2x00_free_irqs(vha);
1399 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
1401 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1403 /* Clear the Interrupts */
1404 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1406 ql_log(ql_log_info, vha, 0x0140,
1407 "%s Done done - ha=%p.\n", __func__, ha);
1411 * qlafx00_init_response_q_entries() - Initializes response queue entries.
1412 * @rsp: response queue
1414 * Beginning of request ring has initialization control block already built
1415 * by nvram config routine.
1417 * Returns 0 on success.
1420 qlafx00_init_response_q_entries(struct rsp_que *rsp)
1425 rsp->ring_ptr = rsp->ring;
1426 rsp->ring_index = 0;
1427 rsp->status_srb = NULL;
1428 pkt = rsp->ring_ptr;
1429 for (cnt = 0; cnt < rsp->length; cnt++) {
1430 pkt->signature = RESPONSE_PROCESSED;
1431 wrt_reg_dword((void __force __iomem *)&pkt->signature,
1432 RESPONSE_PROCESSED);
1438 qlafx00_rescan_isp(scsi_qla_host_t *vha)
1440 uint32_t status = QLA_FUNCTION_FAILED;
1441 struct qla_hw_data *ha = vha->hw;
1442 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1445 qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1447 aenmbx7 = rd_reg_dword(®->aenmailbox7);
1448 ha->mbx_intr_code = MSW(aenmbx7);
1449 ha->rqstq_intr_code = LSW(aenmbx7);
1450 ha->req_que_off = rd_reg_dword(®->aenmailbox1);
1451 ha->rsp_que_off = rd_reg_dword(®->aenmailbox3);
1452 ha->req_que_len = rd_reg_dword(®->aenmailbox5);
1453 ha->rsp_que_len = rd_reg_dword(®->aenmailbox6);
1455 ql_dbg(ql_dbg_disc, vha, 0x2094,
1456 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1457 " Req que offset 0x%x Rsp que offset 0x%x\n",
1458 ha->mbx_intr_code, ha->rqstq_intr_code,
1459 ha->req_que_off, ha->rsp_que_len);
1461 /* Clear the Interrupts */
1462 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1464 status = qla2x00_init_rings(vha);
1466 vha->flags.online = 1;
1468 /* if no cable then assume it's good */
1469 if ((vha->device_flags & DFLG_NO_CABLE))
1471 /* Register system information */
1472 if (qlafx00_fx_disc(vha,
1473 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
1474 ql_dbg(ql_dbg_disc, vha, 0x2095,
1475 "failed to register host info\n");
1477 scsi_unblock_requests(vha->host);
1482 qlafx00_timer_routine(scsi_qla_host_t *vha)
1484 struct qla_hw_data *ha = vha->hw;
1485 uint32_t fw_heart_beat;
1487 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1490 /* Check firmware health */
1491 if (ha->mr.fw_hbt_cnt)
1492 ha->mr.fw_hbt_cnt--;
1494 if ((!ha->flags.mr_reset_hdlr_active) &&
1495 (!test_bit(UNLOADING, &vha->dpc_flags)) &&
1496 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
1497 (ha->mr.fw_hbt_en)) {
1498 fw_heart_beat = rd_reg_dword(®->fwheartbeat);
1499 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1500 ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1501 ha->mr.fw_hbt_miss_cnt = 0;
1503 ha->mr.fw_hbt_miss_cnt++;
1504 if (ha->mr.fw_hbt_miss_cnt ==
1505 QLAFX00_HEARTBEAT_MISS_CNT) {
1506 set_bit(ISP_ABORT_NEEDED,
1508 qla2xxx_wake_dpc(vha);
1509 ha->mr.fw_hbt_miss_cnt = 0;
1513 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1516 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
1517 /* Reset recovery to be performed in timer routine */
1518 aenmbx0 = rd_reg_dword(®->aenmailbox0);
1519 if (ha->mr.fw_reset_timer_exp) {
1520 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1521 qla2xxx_wake_dpc(vha);
1522 ha->mr.fw_reset_timer_exp = 0;
1523 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
1524 /* Wake up DPC to rescan the targets */
1525 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
1526 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1527 qla2xxx_wake_dpc(vha);
1528 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1529 } else if ((aenmbx0 == MBA_FW_STARTING) &&
1530 (!ha->mr.fw_hbt_en)) {
1531 ha->mr.fw_hbt_en = 1;
1532 } else if (!ha->mr.fw_reset_timer_tick) {
1533 if (aenmbx0 == ha->mr.old_aenmbx0_state)
1534 ha->mr.fw_reset_timer_exp = 1;
1535 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1536 } else if (aenmbx0 == 0xFFFFFFFF) {
1537 uint32_t data0, data1;
1539 data0 = QLAFX00_RD_REG(ha,
1540 QLAFX00_BAR1_BASE_ADDR_REG);
1541 data1 = QLAFX00_RD_REG(ha,
1542 QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
1544 data0 &= 0xffff0000;
1545 data1 &= 0x0000ffff;
1548 QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
1550 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1551 ha->mr.fw_reset_timer_tick =
1552 QLAFX00_MAX_RESET_INTERVAL;
1553 } else if (aenmbx0 == MBA_FW_RESET_FCT) {
1554 ha->mr.fw_reset_timer_tick =
1555 QLAFX00_MAX_RESET_INTERVAL;
1557 if (ha->mr.old_aenmbx0_state != aenmbx0) {
1558 ha->mr.old_aenmbx0_state = aenmbx0;
1559 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1561 ha->mr.fw_reset_timer_tick--;
1563 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
1565 * Critical temperature recovery to be
1566 * performed in timer routine
1568 if (ha->mr.fw_critemp_timer_tick == 0) {
1569 tempc = QLAFX00_GET_TEMPERATURE(ha);
1570 ql_dbg(ql_dbg_timer, vha, 0x6012,
1571 "ISPFx00(%s): Critical temp timer, "
1572 "current SOC temperature: %d\n",
1574 if (tempc < ha->mr.critical_temperature) {
1575 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1576 clear_bit(FX00_CRITEMP_RECOVERY,
1578 qla2xxx_wake_dpc(vha);
1580 ha->mr.fw_critemp_timer_tick =
1581 QLAFX00_CRITEMP_INTERVAL;
1583 ha->mr.fw_critemp_timer_tick--;
1586 if (ha->mr.host_info_resend) {
1588 * Incomplete host info might be sent to firmware
1589 * durinng system boot - info should be resend
1591 if (ha->mr.hinfo_resend_timer_tick == 0) {
1592 ha->mr.host_info_resend = false;
1593 set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags);
1594 ha->mr.hinfo_resend_timer_tick =
1595 QLAFX00_HINFO_RESEND_INTERVAL;
1596 qla2xxx_wake_dpc(vha);
1598 ha->mr.hinfo_resend_timer_tick--;
1605 * qlfx00a_reset_initialize
1606 * Re-initialize after a iSA device reset.
1609 * ha = adapter block pointer.
1615 qlafx00_reset_initialize(scsi_qla_host_t *vha)
1617 struct qla_hw_data *ha = vha->hw;
1619 if (vha->device_flags & DFLG_DEV_FAILED) {
1620 ql_dbg(ql_dbg_init, vha, 0x0142,
1621 "Device in failed state\n");
1625 ha->flags.mr_reset_hdlr_active = 1;
1627 if (vha->flags.online) {
1628 scsi_block_requests(vha->host);
1629 qlafx00_abort_isp_cleanup(vha, false);
1632 ql_log(ql_log_info, vha, 0x0143,
1633 "(%s): succeeded.\n", __func__);
1634 ha->flags.mr_reset_hdlr_active = 0;
1640 * Resets ISP and aborts all outstanding commands.
1643 * ha = adapter block pointer.
1649 qlafx00_abort_isp(scsi_qla_host_t *vha)
1651 struct qla_hw_data *ha = vha->hw;
1653 if (vha->flags.online) {
1654 if (unlikely(pci_channel_offline(ha->pdev) &&
1655 ha->flags.pci_channel_io_perm_failure)) {
1656 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1660 scsi_block_requests(vha->host);
1661 qlafx00_abort_isp_cleanup(vha, false);
1663 scsi_block_requests(vha->host);
1664 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1665 vha->qla_stats.total_isp_aborts++;
1666 ha->isp_ops->reset_chip(vha);
1667 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1668 /* Clear the Interrupts */
1669 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1672 ql_log(ql_log_info, vha, 0x0145,
1673 "(%s): succeeded.\n", __func__);
1678 static inline fc_port_t*
1679 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1683 /* Check for matching device in remote port list. */
1684 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1685 if (fcport->tgt_id == tgt_id) {
1686 ql_dbg(ql_dbg_async, vha, 0x5072,
1687 "Matching fcport(%p) found with TGT-ID: 0x%x "
1688 "and Remote TGT_ID: 0x%x\n",
1689 fcport, fcport->tgt_id, tgt_id);
1697 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
1701 ql_log(ql_log_info, vha, 0x5073,
1702 "Detach TGT-ID: 0x%x\n", tgt_id);
1704 fcport = qlafx00_get_fcport(vha, tgt_id);
1708 qla2x00_mark_device_lost(vha, fcport, 0);
1714 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1716 uint32_t aen_code, aen_data;
1718 aen_code = FCH_EVT_VENDOR_UNIQUE;
1719 aen_data = evt->u.aenfx.evtcode;
1721 switch (evt->u.aenfx.evtcode) {
1722 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
1723 if (evt->u.aenfx.mbx[1] == 0) {
1724 if (evt->u.aenfx.mbx[2] == 1) {
1725 if (!vha->flags.fw_tgt_reported)
1726 vha->flags.fw_tgt_reported = 1;
1727 atomic_set(&vha->loop_down_timer, 0);
1728 atomic_set(&vha->loop_state, LOOP_UP);
1729 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1730 qla2xxx_wake_dpc(vha);
1731 } else if (evt->u.aenfx.mbx[2] == 2) {
1732 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
1734 } else if (evt->u.aenfx.mbx[1] == 0xffff) {
1735 if (evt->u.aenfx.mbx[2] == 1) {
1736 if (!vha->flags.fw_tgt_reported)
1737 vha->flags.fw_tgt_reported = 1;
1738 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1739 } else if (evt->u.aenfx.mbx[2] == 2) {
1740 vha->device_flags |= DFLG_NO_CABLE;
1741 qla2x00_mark_all_devices_lost(vha);
1745 case QLAFX00_MBA_LINK_UP:
1746 aen_code = FCH_EVT_LINKUP;
1749 case QLAFX00_MBA_LINK_DOWN:
1750 aen_code = FCH_EVT_LINKDOWN;
1753 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
1754 ql_log(ql_log_info, vha, 0x5082,
1755 "Process critical temperature event "
1757 evt->u.aenfx.evtcode);
1758 scsi_block_requests(vha->host);
1759 qlafx00_abort_isp_cleanup(vha, true);
1760 scsi_unblock_requests(vha->host);
1764 fc_host_post_event(vha->host, fc_get_event_number(),
1765 aen_code, aen_data);
1769 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
1771 u64 port_name = 0, node_name = 0;
1773 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
1774 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
1776 fc_host_node_name(vha->host) = node_name;
1777 fc_host_port_name(vha->host) = port_name;
1778 if (!pinfo->port_type)
1779 vha->hw->current_topology = ISP_CFG_F;
1780 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
1781 atomic_set(&vha->loop_state, LOOP_READY);
1782 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
1783 atomic_set(&vha->loop_state, LOOP_DOWN);
1784 vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
1788 qla2x00_fxdisc_iocb_timeout(void *data)
1791 struct srb_iocb *lio = &sp->u.iocb_cmd;
1793 complete(&lio->u.fxiocb.fxiocb_comp);
1796 static void qla2x00_fxdisc_sp_done(srb_t *sp, int res)
1798 struct srb_iocb *lio = &sp->u.iocb_cmd;
1800 complete(&lio->u.fxiocb.fxiocb_comp);
1804 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1807 struct srb_iocb *fdisc;
1808 int rval = QLA_FUNCTION_FAILED;
1809 struct qla_hw_data *ha = vha->hw;
1810 struct host_system_info *phost_info;
1811 struct register_host_info *preg_hsi;
1812 struct new_utsname *p_sysid = NULL;
1814 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1818 sp->type = SRB_FXIOCB_DCMD;
1819 sp->name = "fxdisc";
1821 fdisc = &sp->u.iocb_cmd;
1822 fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
1823 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1826 case FXDISC_GET_CONFIG_INFO:
1827 fdisc->u.fxiocb.flags =
1828 SRB_FXDISC_RESP_DMA_VALID;
1829 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
1831 case FXDISC_GET_PORT_INFO:
1832 fdisc->u.fxiocb.flags =
1833 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1834 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
1835 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
1837 case FXDISC_GET_TGT_NODE_INFO:
1838 fdisc->u.fxiocb.flags =
1839 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1840 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
1841 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
1843 case FXDISC_GET_TGT_NODE_LIST:
1844 fdisc->u.fxiocb.flags =
1845 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1846 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
1848 case FXDISC_REG_HOST_INFO:
1849 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
1850 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
1851 p_sysid = utsname();
1853 ql_log(ql_log_warn, vha, 0x303c,
1854 "Not able to get the system information\n");
1858 case FXDISC_ABORT_IOCTL:
1863 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
1864 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1865 fdisc->u.fxiocb.req_len,
1866 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
1867 if (!fdisc->u.fxiocb.req_addr)
1870 if (fx_type == FXDISC_REG_HOST_INFO) {
1871 preg_hsi = (struct register_host_info *)
1872 fdisc->u.fxiocb.req_addr;
1873 phost_info = &preg_hsi->hsi;
1874 memset(preg_hsi, 0, sizeof(struct register_host_info));
1875 phost_info->os_type = OS_TYPE_LINUX;
1876 strlcpy(phost_info->sysname, p_sysid->sysname,
1877 sizeof(phost_info->sysname));
1878 strlcpy(phost_info->nodename, p_sysid->nodename,
1879 sizeof(phost_info->nodename));
1880 if (!strcmp(phost_info->nodename, "(none)"))
1881 ha->mr.host_info_resend = true;
1882 strlcpy(phost_info->release, p_sysid->release,
1883 sizeof(phost_info->release));
1884 strlcpy(phost_info->version, p_sysid->version,
1885 sizeof(phost_info->version));
1886 strlcpy(phost_info->machine, p_sysid->machine,
1887 sizeof(phost_info->machine));
1888 strlcpy(phost_info->domainname, p_sysid->domainname,
1889 sizeof(phost_info->domainname));
1890 strlcpy(phost_info->hostdriver, QLA2XXX_VERSION,
1891 sizeof(phost_info->hostdriver));
1892 preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
1893 ql_dbg(ql_dbg_init, vha, 0x0149,
1894 "ISP%04X: Host registration with firmware\n",
1896 ql_dbg(ql_dbg_init, vha, 0x014a,
1897 "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1898 phost_info->os_type,
1899 phost_info->sysname,
1900 phost_info->nodename);
1901 ql_dbg(ql_dbg_init, vha, 0x014b,
1902 "release = '%s', version = '%s'\n",
1903 phost_info->release,
1904 phost_info->version);
1905 ql_dbg(ql_dbg_init, vha, 0x014c,
1907 "domainname = '%s', hostdriver = '%s'\n",
1908 phost_info->machine,
1909 phost_info->domainname,
1910 phost_info->hostdriver);
1911 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
1912 phost_info, sizeof(*phost_info));
1916 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
1917 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1918 fdisc->u.fxiocb.rsp_len,
1919 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
1920 if (!fdisc->u.fxiocb.rsp_addr)
1921 goto done_unmap_req;
1924 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
1925 sp->done = qla2x00_fxdisc_sp_done;
1927 rval = qla2x00_start_sp(sp);
1928 if (rval != QLA_SUCCESS)
1929 goto done_unmap_dma;
1931 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
1933 if (fx_type == FXDISC_GET_CONFIG_INFO) {
1934 struct config_info_data *pinfo =
1935 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1936 strlcpy(vha->hw->model_number, pinfo->model_num,
1937 ARRAY_SIZE(vha->hw->model_number));
1938 strlcpy(vha->hw->model_desc, pinfo->model_description,
1939 ARRAY_SIZE(vha->hw->model_desc));
1940 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1941 sizeof(vha->hw->mr.symbolic_name));
1942 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
1943 sizeof(vha->hw->mr.serial_num));
1944 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
1945 sizeof(vha->hw->mr.hw_version));
1946 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
1947 sizeof(vha->hw->mr.fw_version));
1948 strim(vha->hw->mr.fw_version);
1949 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
1950 sizeof(vha->hw->mr.uboot_version));
1951 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1952 sizeof(vha->hw->mr.fru_serial_num));
1953 vha->hw->mr.critical_temperature =
1954 (pinfo->nominal_temp_value) ?
1955 pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
1956 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1957 QLAFX00_EXTENDED_IO_EN_MASK) != 0;
1958 } else if (fx_type == FXDISC_GET_PORT_INFO) {
1959 struct port_info_data *pinfo =
1960 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
1961 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
1962 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
1963 vha->d_id.b.domain = pinfo->port_id[0];
1964 vha->d_id.b.area = pinfo->port_id[1];
1965 vha->d_id.b.al_pa = pinfo->port_id[2];
1966 qlafx00_update_host_attr(vha, pinfo);
1967 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
1969 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
1970 struct qlafx00_tgt_node_info *pinfo =
1971 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1972 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
1973 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
1974 fcport->port_type = FCT_TARGET;
1975 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
1977 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
1978 struct qlafx00_tgt_node_info *pinfo =
1979 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1980 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
1982 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
1983 } else if (fx_type == FXDISC_ABORT_IOCTL)
1984 fdisc->u.fxiocb.result =
1985 (fdisc->u.fxiocb.result ==
1986 cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
1987 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
1989 rval = le32_to_cpu(fdisc->u.fxiocb.result);
1992 if (fdisc->u.fxiocb.rsp_addr)
1993 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
1994 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
1997 if (fdisc->u.fxiocb.req_addr)
1998 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
1999 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
2007 * qlafx00_initialize_adapter
2011 * ha = adapter block pointer.
2017 qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2020 struct qla_hw_data *ha = vha->hw;
2023 /* Clear adapter flags. */
2024 vha->flags.online = 0;
2025 ha->flags.chip_reset_done = 0;
2026 vha->flags.reset_active = 0;
2027 ha->flags.pci_channel_io_perm_failure = 0;
2028 ha->flags.eeh_busy = 0;
2029 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2030 atomic_set(&vha->loop_state, LOOP_DOWN);
2031 vha->device_flags = DFLG_NO_CABLE;
2033 vha->flags.management_server_logged_in = 0;
2034 ha->isp_abort_cnt = 0;
2035 ha->beacon_blink_led = 0;
2037 set_bit(0, ha->req_qid_map);
2038 set_bit(0, ha->rsp_qid_map);
2040 ql_dbg(ql_dbg_init, vha, 0x0147,
2041 "Configuring PCI space...\n");
2043 rval = ha->isp_ops->pci_config(vha);
2045 ql_log(ql_log_warn, vha, 0x0148,
2046 "Unable to configure PCI space.\n");
2050 rval = qlafx00_init_fw_ready(vha);
2051 if (rval != QLA_SUCCESS)
2054 qlafx00_save_queue_ptrs(vha);
2056 rval = qlafx00_config_queues(vha);
2057 if (rval != QLA_SUCCESS)
2061 * Allocate the array of outstanding commands
2062 * now that we know the firmware resources.
2064 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2065 if (rval != QLA_SUCCESS)
2068 rval = qla2x00_init_rings(vha);
2069 ha->flags.chip_reset_done = 1;
2071 tempc = QLAFX00_GET_TEMPERATURE(ha);
2072 ql_dbg(ql_dbg_init, vha, 0x0152,
2073 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
2080 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
2083 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2084 int rval = QLA_FUNCTION_FAILED;
2087 if (qla2x00_reset_active(vha))
2088 ql_log(ql_log_warn, vha, 0x70ce,
2089 "ISP reset active.\n");
2090 else if (!vha->hw->flags.eeh_busy) {
2091 rval = qlafx00_get_firmware_state(vha, state);
2093 if (rval != QLA_SUCCESS)
2094 memset(state, -1, sizeof(state));
2100 qlafx00_get_host_speed(struct Scsi_Host *shost)
2102 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2103 (shost_priv(shost)))->hw;
2104 u32 speed = FC_PORTSPEED_UNKNOWN;
2106 switch (ha->link_data_rate) {
2107 case QLAFX00_PORT_SPEED_2G:
2108 speed = FC_PORTSPEED_2GBIT;
2110 case QLAFX00_PORT_SPEED_4G:
2111 speed = FC_PORTSPEED_4GBIT;
2113 case QLAFX00_PORT_SPEED_8G:
2114 speed = FC_PORTSPEED_8GBIT;
2116 case QLAFX00_PORT_SPEED_10G:
2117 speed = FC_PORTSPEED_10GBIT;
2120 fc_host_speed(shost) = speed;
2123 /** QLAFX00 specific ISR implementation functions */
2126 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2127 uint32_t sense_len, struct rsp_que *rsp, int res)
2129 struct scsi_qla_host *vha = sp->vha;
2130 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2131 uint32_t track_sense_len;
2133 SET_FW_SENSE_LEN(sp, sense_len);
2135 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2136 sense_len = SCSI_SENSE_BUFFERSIZE;
2138 SET_CMD_SENSE_LEN(sp, sense_len);
2139 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2140 track_sense_len = sense_len;
2142 if (sense_len > par_sense_len)
2143 sense_len = par_sense_len;
2145 memcpy(cp->sense_buffer, sense_data, sense_len);
2147 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
2149 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2150 track_sense_len -= sense_len;
2151 SET_CMD_SENSE_LEN(sp, track_sense_len);
2153 ql_dbg(ql_dbg_io, vha, 0x304d,
2154 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2155 sense_len, par_sense_len, track_sense_len);
2156 if (GET_FW_SENSE_LEN(sp) > 0) {
2157 rsp->status_srb = sp;
2162 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
2163 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2164 sp->vha->host_no, cp->device->id, cp->device->lun,
2166 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
2167 cp->sense_buffer, sense_len);
2172 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2173 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
2174 __le16 sstatus, __le16 cpstatus)
2176 struct srb_iocb *tmf;
2178 tmf = &sp->u.iocb_cmd;
2179 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
2180 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
2181 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
2182 tmf->u.tmf.comp_status = cpstatus;
2187 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2188 struct abort_iocb_entry_fx00 *pkt)
2190 const char func[] = "ABT_IOCB";
2192 struct srb_iocb *abt;
2194 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2198 abt = &sp->u.iocb_cmd;
2199 abt->u.abt.comp_status = pkt->tgt_id_sts;
2204 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2205 struct ioctl_iocb_entry_fx00 *pkt)
2207 const char func[] = "IOSB_IOCB";
2209 struct bsg_job *bsg_job;
2210 struct fc_bsg_reply *bsg_reply;
2211 struct srb_iocb *iocb_job;
2213 struct qla_mt_iocb_rsp_fx00 fstatus;
2214 uint8_t *fw_sts_ptr;
2216 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2220 if (sp->type == SRB_FXIOCB_DCMD) {
2221 iocb_job = &sp->u.iocb_cmd;
2222 iocb_job->u.fxiocb.seq_number = pkt->seq_no;
2223 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
2224 iocb_job->u.fxiocb.result = pkt->status;
2225 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
2226 iocb_job->u.fxiocb.req_data =
2229 bsg_job = sp->u.bsg_job;
2230 bsg_reply = bsg_job->reply;
2232 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2234 fstatus.reserved_1 = pkt->reserved_0;
2235 fstatus.func_type = pkt->comp_func_num;
2236 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2237 fstatus.ioctl_data = pkt->dataword_r;
2238 fstatus.adapid = pkt->adapid;
2239 fstatus.reserved_2 = pkt->dataword_r_extra;
2240 fstatus.res_count = pkt->residuallen;
2241 fstatus.status = pkt->status;
2242 fstatus.seq_number = pkt->seq_no;
2243 memcpy(fstatus.reserved_3,
2244 pkt->reserved_2, 20 * sizeof(uint8_t));
2246 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
2248 memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus));
2249 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
2250 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
2252 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2253 sp->vha, 0x5080, pkt, sizeof(*pkt));
2255 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2257 fw_sts_ptr, sizeof(fstatus));
2259 res = bsg_reply->result = DID_OK << 16;
2260 bsg_reply->reply_payload_rcv_len =
2261 bsg_job->reply_payload.payload_len;
2267 * qlafx00_status_entry() - Process a Status IOCB entry.
2268 * @vha: SCSI driver HA context
2269 * @rsp: response queue
2270 * @pkt: Entry pointer
2273 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2277 struct scsi_cmnd *cp;
2278 struct sts_entry_fx00 *sts;
2281 __le16 lscsi_status;
2283 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2285 uint8_t *rsp_info = NULL, *sense_data = NULL;
2286 struct qla_hw_data *ha = vha->hw;
2287 uint32_t hindex, handle;
2289 struct req_que *req;
2293 sts = (struct sts_entry_fx00 *) pkt;
2295 comp_status = sts->comp_status;
2296 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
2297 hindex = sts->handle;
2298 handle = LSW(hindex);
2301 req = ha->req_q_map[que];
2303 /* Validate handle. */
2304 if (handle < req->num_outstanding_cmds)
2305 sp = req->outstanding_cmds[handle];
2310 ql_dbg(ql_dbg_io, vha, 0x3034,
2311 "Invalid status handle (0x%x).\n", handle);
2313 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2314 qla2xxx_wake_dpc(vha);
2318 if (sp->type == SRB_TM_CMD) {
2319 req->outstanding_cmds[handle] = NULL;
2320 qlafx00_tm_iocb_entry(vha, req, pkt, sp,
2321 scsi_status, comp_status);
2325 /* Fast path completion. */
2326 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2327 qla2x00_process_completed_request(vha, req, handle);
2331 req->outstanding_cmds[handle] = NULL;
2332 cp = GET_CMD_SP(sp);
2334 ql_dbg(ql_dbg_io, vha, 0x3048,
2335 "Command already returned (0x%x/%p).\n",
2341 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
2343 fcport = sp->fcport;
2345 sense_len = par_sense_len = rsp_info_len = resid_len =
2347 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
2348 sense_len = sts->sense_len;
2349 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2350 | (uint16_t)SS_RESIDUAL_OVER)))
2351 resid_len = le32_to_cpu(sts->residual_len);
2352 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
2353 fw_resid_len = le32_to_cpu(sts->residual_len);
2354 rsp_info = sense_data = sts->data;
2355 par_sense_len = sizeof(sts->data);
2357 /* Check for overrun. */
2358 if (comp_status == CS_COMPLETE &&
2359 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
2360 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
2363 * Based on Host and scsi status generate status code for Linux
2365 switch (le16_to_cpu(comp_status)) {
2368 if (scsi_status == 0) {
2372 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2373 | (uint16_t)SS_RESIDUAL_OVER))) {
2375 scsi_set_resid(cp, resid);
2377 if (!lscsi_status &&
2378 ((unsigned)(scsi_bufflen(cp) - resid) <
2380 ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
2381 "Mid-layer underflow "
2382 "detected (0x%x of 0x%x bytes).\n",
2383 resid, scsi_bufflen(cp));
2385 res = DID_ERROR << 16;
2389 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2392 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2393 ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
2394 "QUEUE FULL detected.\n");
2398 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2401 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2402 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2405 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2409 case CS_DATA_UNDERRUN:
2410 /* Use F/W calculated residual length. */
2411 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2412 resid = fw_resid_len;
2415 scsi_set_resid(cp, resid);
2416 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
2417 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2418 && fw_resid_len != resid_len) {
2419 ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
2420 "Dropped frame(s) detected "
2421 "(0x%x of 0x%x bytes).\n",
2422 resid, scsi_bufflen(cp));
2424 res = DID_ERROR << 16 |
2425 le16_to_cpu(lscsi_status);
2426 goto check_scsi_status;
2429 if (!lscsi_status &&
2430 ((unsigned)(scsi_bufflen(cp) - resid) <
2432 ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
2433 "Mid-layer underflow "
2434 "detected (0x%x of 0x%x bytes, "
2435 "cp->underflow: 0x%x).\n",
2436 resid, scsi_bufflen(cp), cp->underflow);
2438 res = DID_ERROR << 16;
2441 } else if (lscsi_status !=
2442 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
2443 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
2445 * scsi status of task set and busy are considered
2446 * to be task not completed.
2449 ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
2450 "Dropped frame(s) detected (0x%x "
2451 "of 0x%x bytes).\n", resid,
2454 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
2455 goto check_scsi_status;
2457 ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
2458 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2459 scsi_status, lscsi_status);
2462 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2467 * Check to see if SCSI Status is non zero. If so report SCSI
2470 if (lscsi_status != 0) {
2472 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2473 ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
2474 "QUEUE FULL detected.\n");
2479 cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2482 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2484 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2487 qlafx00_handle_sense(sp, sense_data, par_sense_len,
2488 sense_len, rsp, res);
2492 case CS_PORT_LOGGED_OUT:
2493 case CS_PORT_CONFIG_CHG:
2496 case CS_PORT_UNAVAILABLE:
2501 * We are going to have the fc class block the rport
2502 * while we try to recover so instruct the mid layer
2503 * to requeue until the class decides how to handle this.
2505 res = DID_TRANSPORT_DISRUPTED << 16;
2507 ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
2508 "Port down status: port-state=0x%x.\n",
2509 atomic_read(&fcport->state));
2511 if (atomic_read(&fcport->state) == FCS_ONLINE)
2512 qla2x00_mark_device_lost(fcport->vha, fcport, 1);
2516 res = DID_RESET << 16;
2520 res = DID_ERROR << 16;
2525 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2526 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2527 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2528 "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2529 "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2530 comp_status, scsi_status, res, vha->host_no,
2531 cp->device->id, cp->device->lun, fcport->tgt_id,
2532 lscsi_status, cp->cmnd, scsi_bufflen(cp),
2533 rsp_info, resid_len, fw_resid_len, sense_len,
2534 par_sense_len, rsp_info_len);
2536 if (rsp->status_srb == NULL)
2543 * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2544 * @rsp: response queue
2545 * @pkt: Entry pointer
2547 * Extended sense data.
2550 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2552 uint8_t sense_sz = 0;
2553 struct qla_hw_data *ha = rsp->hw;
2554 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2555 srb_t *sp = rsp->status_srb;
2556 struct scsi_cmnd *cp;
2561 ql_dbg(ql_dbg_io, vha, 0x3037,
2562 "no SP, sp = %p\n", sp);
2566 if (!GET_FW_SENSE_LEN(sp)) {
2567 ql_dbg(ql_dbg_io, vha, 0x304b,
2568 "no fw sense data, sp = %p\n", sp);
2571 cp = GET_CMD_SP(sp);
2573 ql_log(ql_log_warn, vha, 0x303b,
2574 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2576 rsp->status_srb = NULL;
2580 if (!GET_CMD_SENSE_LEN(sp)) {
2581 ql_dbg(ql_dbg_io, vha, 0x304c,
2582 "no sense data, sp = %p\n", sp);
2584 sense_len = GET_CMD_SENSE_LEN(sp);
2585 sense_ptr = GET_CMD_SENSE_PTR(sp);
2586 ql_dbg(ql_dbg_io, vha, 0x304f,
2587 "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2588 sp, sense_len, sense_ptr);
2590 if (sense_len > sizeof(pkt->data))
2591 sense_sz = sizeof(pkt->data);
2593 sense_sz = sense_len;
2595 /* Move sense data. */
2596 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
2598 memcpy(sense_ptr, pkt->data, sense_sz);
2599 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
2600 sense_ptr, sense_sz);
2602 sense_len -= sense_sz;
2603 sense_ptr += sense_sz;
2605 SET_CMD_SENSE_PTR(sp, sense_ptr);
2606 SET_CMD_SENSE_LEN(sp, sense_len);
2608 sense_len = GET_FW_SENSE_LEN(sp);
2609 sense_len = (sense_len > sizeof(pkt->data)) ?
2610 (sense_len - sizeof(pkt->data)) : 0;
2611 SET_FW_SENSE_LEN(sp, sense_len);
2613 /* Place command on done queue. */
2614 if (sense_len == 0) {
2615 rsp->status_srb = NULL;
2616 sp->done(sp, cp->result);
2623 * qlafx00_multistatus_entry() - Process Multi response queue entries.
2624 * @vha: SCSI driver HA context
2625 * @rsp: response queue
2626 * @pkt: received packet
2629 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
2630 struct rsp_que *rsp, void *pkt)
2633 struct multi_sts_entry_fx00 *stsmfx;
2634 struct qla_hw_data *ha = vha->hw;
2635 uint32_t handle, hindex, handle_count, i;
2637 struct req_que *req;
2640 stsmfx = (struct multi_sts_entry_fx00 *) pkt;
2642 handle_count = stsmfx->handle_count;
2644 if (handle_count > MAX_HANDLE_COUNT) {
2645 ql_dbg(ql_dbg_io, vha, 0x3035,
2646 "Invalid handle count (0x%x).\n", handle_count);
2647 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2648 qla2xxx_wake_dpc(vha);
2652 handle_ptr = &stsmfx->handles[0];
2654 for (i = 0; i < handle_count; i++) {
2655 hindex = le32_to_cpu(*handle_ptr);
2656 handle = LSW(hindex);
2658 req = ha->req_q_map[que];
2660 /* Validate handle. */
2661 if (handle < req->num_outstanding_cmds)
2662 sp = req->outstanding_cmds[handle];
2667 ql_dbg(ql_dbg_io, vha, 0x3044,
2668 "Invalid status handle (0x%x).\n", handle);
2669 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2670 qla2xxx_wake_dpc(vha);
2673 qla2x00_process_completed_request(vha, req, handle);
2679 * qlafx00_error_entry() - Process an error entry.
2680 * @vha: SCSI driver HA context
2681 * @rsp: response queue
2682 * @pkt: Entry pointer
2685 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2686 struct sts_entry_fx00 *pkt)
2689 struct qla_hw_data *ha = vha->hw;
2690 const char func[] = "ERROR-IOCB";
2692 struct req_que *req = NULL;
2693 int res = DID_ERROR << 16;
2695 req = ha->req_q_map[que];
2697 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2703 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2704 qla2xxx_wake_dpc(vha);
2708 * qlafx00_process_response_queue() - Process response queue entries.
2709 * @vha: SCSI driver HA context
2710 * @rsp: response queue
2713 qlafx00_process_response_queue(struct scsi_qla_host *vha,
2714 struct rsp_que *rsp)
2716 struct sts_entry_fx00 *pkt;
2718 uint16_t lreq_q_in = 0;
2719 uint16_t lreq_q_out = 0;
2721 lreq_q_in = rd_reg_dword(rsp->rsp_q_in);
2722 lreq_q_out = rsp->ring_index;
2724 while (lreq_q_in != lreq_q_out) {
2725 lptr = rsp->ring_ptr;
2726 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2727 sizeof(rsp->rsp_pkt));
2728 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2732 if (rsp->ring_index == rsp->length) {
2734 rsp->ring_index = 0;
2735 rsp->ring_ptr = rsp->ring;
2740 if (pkt->entry_status != 0 &&
2741 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
2742 ql_dbg(ql_dbg_async, vha, 0x507f,
2743 "type of error status in response: 0x%x\n",
2745 qlafx00_error_entry(vha, rsp,
2746 (struct sts_entry_fx00 *)pkt);
2750 switch (pkt->entry_type) {
2751 case STATUS_TYPE_FX00:
2752 qlafx00_status_entry(vha, rsp, pkt);
2755 case STATUS_CONT_TYPE_FX00:
2756 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2759 case MULTI_STATUS_TYPE_FX00:
2760 qlafx00_multistatus_entry(vha, rsp, pkt);
2763 case ABORT_IOCB_TYPE_FX00:
2764 qlafx00_abort_iocb_entry(vha, rsp->req,
2765 (struct abort_iocb_entry_fx00 *)pkt);
2768 case IOCTL_IOSB_TYPE_FX00:
2769 qlafx00_ioctl_iosb_entry(vha, rsp->req,
2770 (struct ioctl_iocb_entry_fx00 *)pkt);
2773 /* Type Not Supported. */
2774 ql_dbg(ql_dbg_async, vha, 0x5081,
2775 "Received unknown response pkt type %x "
2776 "entry status=%x.\n",
2777 pkt->entry_type, pkt->entry_status);
2782 /* Adjust ring index */
2783 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
2787 * qlafx00_async_event() - Process aynchronous events.
2788 * @vha: SCSI driver HA context
2791 qlafx00_async_event(scsi_qla_host_t *vha)
2793 struct qla_hw_data *ha = vha->hw;
2794 struct device_reg_fx00 __iomem *reg;
2797 reg = &ha->iobase->ispfx00;
2798 /* Setup to process RIO completion. */
2799 switch (ha->aenmb[0]) {
2800 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
2801 ql_log(ql_log_warn, vha, 0x5079,
2802 "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2803 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2806 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
2807 ql_dbg(ql_dbg_async, vha, 0x5076,
2808 "Asynchronous FW shutdown requested.\n");
2809 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2810 qla2xxx_wake_dpc(vha);
2813 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2814 ha->aenmb[1] = rd_reg_dword(®->aenmailbox1);
2815 ha->aenmb[2] = rd_reg_dword(®->aenmailbox2);
2816 ha->aenmb[3] = rd_reg_dword(®->aenmailbox3);
2817 ql_dbg(ql_dbg_async, vha, 0x5077,
2818 "Asynchronous port Update received "
2819 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2820 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2824 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
2825 ql_log(ql_log_info, vha, 0x5085,
2826 "Asynchronous over temperature event received "
2831 case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
2832 ql_log(ql_log_info, vha, 0x5086,
2833 "Asynchronous normal temperature event received "
2838 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
2839 ql_log(ql_log_info, vha, 0x5083,
2840 "Asynchronous critical temperature event received "
2846 ha->aenmb[1] = rd_reg_dword(®->aenmailbox1);
2847 ha->aenmb[2] = rd_reg_dword(®->aenmailbox2);
2848 ha->aenmb[3] = rd_reg_dword(®->aenmailbox3);
2849 ha->aenmb[4] = rd_reg_dword(®->aenmailbox4);
2850 ha->aenmb[5] = rd_reg_dword(®->aenmailbox5);
2851 ha->aenmb[6] = rd_reg_dword(®->aenmailbox6);
2852 ha->aenmb[7] = rd_reg_dword(®->aenmailbox7);
2853 ql_dbg(ql_dbg_async, vha, 0x5078,
2854 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2855 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2856 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2859 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2860 (uint32_t *)ha->aenmb, data_size);
2864 * qlafx00x_mbx_completion() - Process mailbox command completions.
2865 * @vha: SCSI driver HA context
2866 * @mb0: value to be written into mailbox register 0
2869 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2872 __le32 __iomem *wptr;
2873 struct qla_hw_data *ha = vha->hw;
2874 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2877 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
2879 /* Load return mailbox registers. */
2880 ha->flags.mbox_int = 1;
2881 ha->mailbox_out32[0] = mb0;
2882 wptr = ®->mailbox17;
2884 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2885 ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
2891 * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2892 * @irq: interrupt number
2893 * @dev_id: SCSI driver HA context
2895 * Called by system whenever the host adapter generates an interrupt.
2897 * Returns handled flag.
2900 qlafx00_intr_handler(int irq, void *dev_id)
2902 scsi_qla_host_t *vha;
2903 struct qla_hw_data *ha;
2904 struct device_reg_fx00 __iomem *reg;
2909 struct rsp_que *rsp;
2910 unsigned long flags;
2911 uint32_t clr_intr = 0;
2912 uint32_t intr_stat = 0;
2914 rsp = (struct rsp_que *) dev_id;
2916 ql_log(ql_log_info, NULL, 0x507d,
2917 "%s: NULL response queue pointer.\n", __func__);
2922 reg = &ha->iobase->ispfx00;
2925 if (unlikely(pci_channel_offline(ha->pdev)))
2928 spin_lock_irqsave(&ha->hardware_lock, flags);
2929 vha = pci_get_drvdata(ha->pdev);
2930 for (iter = 50; iter--; clr_intr = 0) {
2931 stat = QLAFX00_RD_INTR_REG(ha);
2932 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2934 intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
2938 if (stat & QLAFX00_INTR_MB_CMPLT) {
2939 mb[0] = rd_reg_dword(®->mailbox16);
2940 qlafx00_mbx_completion(vha, mb[0]);
2941 status |= MBX_INTERRUPT;
2942 clr_intr |= QLAFX00_INTR_MB_CMPLT;
2944 if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
2945 ha->aenmb[0] = rd_reg_dword(®->aenmailbox0);
2946 qlafx00_async_event(vha);
2947 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
2949 if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
2950 qlafx00_process_response_queue(vha, rsp);
2951 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
2954 QLAFX00_CLR_INTR_REG(ha, clr_intr);
2955 QLAFX00_RD_INTR_REG(ha);
2958 qla2x00_handle_mbx_completion(ha, status);
2959 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2964 /** QLAFX00 specific IOCB implementation functions */
2966 static inline cont_a64_entry_t *
2967 qlafx00_prep_cont_type1_iocb(struct req_que *req,
2968 cont_a64_entry_t *lcont_pkt)
2970 cont_a64_entry_t *cont_pkt;
2972 /* Adjust ring index. */
2974 if (req->ring_index == req->length) {
2975 req->ring_index = 0;
2976 req->ring_ptr = req->ring;
2981 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
2983 /* Load packet defaults. */
2984 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
2990 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
2991 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
2993 uint16_t avail_dsds;
2994 struct dsd64 *cur_dsd;
2995 scsi_qla_host_t *vha;
2996 struct scsi_cmnd *cmd;
2997 struct scatterlist *sg;
2999 struct req_que *req;
3000 cont_a64_entry_t lcont_pkt;
3001 cont_a64_entry_t *cont_pkt;
3006 cmd = GET_CMD_SP(sp);
3010 /* Update entry type to indicate Command Type 3 IOCB */
3011 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
3013 /* No data transfer */
3014 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3015 lcmd_pkt->byte_count = cpu_to_le32(0);
3019 /* Set transfer direction */
3020 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3021 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
3022 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3023 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3024 lcmd_pkt->cntrl_flags = TMF_READ_DATA;
3025 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3028 /* One DSD is available in the Command Type 3 IOCB */
3030 cur_dsd = &lcmd_pkt->dsd;
3032 /* Load data segments */
3033 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3034 /* Allocate additional continuation packets? */
3035 if (avail_dsds == 0) {
3037 * Five DSDs are available in the Continuation
3040 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
3042 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3043 cur_dsd = lcont_pkt.dsd;
3048 append_dsd64(&cur_dsd, sg);
3050 if (avail_dsds == 0 && cont == 1) {
3052 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3057 if (avail_dsds != 0 && cont == 1) {
3058 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3064 * qlafx00_start_scsi() - Send a SCSI command to the ISP
3065 * @sp: command to send to the ISP
3067 * Returns non-zero if a failure occurred, else zero.
3070 qlafx00_start_scsi(srb_t *sp)
3073 unsigned long flags;
3078 struct req_que *req = NULL;
3079 struct rsp_que *rsp = NULL;
3080 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3081 struct scsi_qla_host *vha = sp->vha;
3082 struct qla_hw_data *ha = vha->hw;
3083 struct cmd_type_7_fx00 *cmd_pkt;
3084 struct cmd_type_7_fx00 lcmd_pkt;
3085 struct scsi_lun llun;
3087 /* Setup device pointers. */
3088 rsp = ha->rsp_q_map[0];
3091 /* So we know we haven't pci_map'ed anything yet */
3094 /* Acquire ring specific lock */
3095 spin_lock_irqsave(&ha->hardware_lock, flags);
3097 handle = qla2xxx_get_next_handle(req);
3101 /* Map the sg table so we have an accurate count of sg entries needed */
3102 if (scsi_sg_count(cmd)) {
3103 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3104 scsi_sg_count(cmd), cmd->sc_data_direction);
3105 if (unlikely(!nseg))
3111 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3112 if (req->cnt < (req_cnt + 2)) {
3113 cnt = rd_reg_dword_relaxed(req->req_q_out);
3115 if (req->ring_index < cnt)
3116 req->cnt = cnt - req->ring_index;
3118 req->cnt = req->length -
3119 (req->ring_index - cnt);
3120 if (req->cnt < (req_cnt + 2))
3124 /* Build command packet. */
3125 req->current_outstanding_cmd = handle;
3126 req->outstanding_cmds[handle] = sp;
3127 sp->handle = handle;
3128 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3129 req->cnt -= req_cnt;
3131 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
3133 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3135 lcmd_pkt.handle = make_handle(req->id, sp->handle);
3136 lcmd_pkt.reserved_0 = 0;
3137 lcmd_pkt.port_path_ctrl = 0;
3138 lcmd_pkt.reserved_1 = 0;
3139 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3140 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3142 int_to_scsilun(cmd->device->lun, &llun);
3143 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3144 sizeof(lcmd_pkt.lun));
3146 /* Load SCSI command packet. */
3147 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3148 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3150 /* Build IOCB segments */
3151 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
3153 /* Set total data segment count. */
3154 lcmd_pkt.entry_count = (uint8_t)req_cnt;
3156 /* Specify response queue number where completion should happen */
3157 lcmd_pkt.entry_status = (uint8_t) rsp->id;
3159 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
3160 cmd->cmnd, cmd->cmd_len);
3161 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
3162 &lcmd_pkt, sizeof(lcmd_pkt));
3164 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
3167 /* Adjust ring index. */
3169 if (req->ring_index == req->length) {
3170 req->ring_index = 0;
3171 req->ring_ptr = req->ring;
3175 sp->flags |= SRB_DMA_VALID;
3177 /* Set chip new ring index. */
3178 wrt_reg_dword(req->req_q_in, req->ring_index);
3179 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3181 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3186 scsi_dma_unmap(cmd);
3188 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3190 return QLA_FUNCTION_FAILED;
3194 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3196 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3197 scsi_qla_host_t *vha = sp->vha;
3198 struct req_que *req = vha->req;
3199 struct tsk_mgmt_entry_fx00 tm_iocb;
3200 struct scsi_lun llun;
3202 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
3203 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3204 tm_iocb.entry_count = 1;
3205 tm_iocb.handle = make_handle(req->id, sp->handle);
3206 tm_iocb.reserved_0 = 0;
3207 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3208 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3209 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
3210 int_to_scsilun(fxio->u.tmf.lun, &llun);
3211 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
3212 sizeof(struct scsi_lun));
3215 memcpy(ptm_iocb, &tm_iocb,
3216 sizeof(struct tsk_mgmt_entry_fx00));
3221 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
3223 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3224 scsi_qla_host_t *vha = sp->vha;
3225 struct req_que *req = vha->req;
3226 struct abort_iocb_entry_fx00 abt_iocb;
3228 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
3229 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
3230 abt_iocb.entry_count = 1;
3231 abt_iocb.handle = make_handle(req->id, sp->handle);
3232 abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl);
3233 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
3234 abt_iocb.req_que_no = cpu_to_le16(req->id);
3236 memcpy(pabt_iocb, &abt_iocb,
3237 sizeof(struct abort_iocb_entry_fx00));
3242 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3244 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3245 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3246 struct bsg_job *bsg_job;
3247 struct fc_bsg_request *bsg_request;
3248 struct fxdisc_entry_fx00 fx_iocb;
3249 uint8_t entry_cnt = 1;
3251 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
3252 fx_iocb.entry_type = FX00_IOCB_TYPE;
3253 fx_iocb.handle = sp->handle;
3254 fx_iocb.entry_count = entry_cnt;
3256 if (sp->type == SRB_FXIOCB_DCMD) {
3258 sp->u.iocb_cmd.u.fxiocb.req_func_type;
3259 fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
3260 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
3261 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
3262 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
3263 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
3265 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
3266 fx_iocb.req_dsdcnt = cpu_to_le16(1);
3267 fx_iocb.req_xfrcnt =
3268 cpu_to_le16(fxio->u.fxiocb.req_len);
3269 put_unaligned_le64(fxio->u.fxiocb.req_dma_handle,
3270 &fx_iocb.dseg_rq.address);
3271 fx_iocb.dseg_rq.length =
3272 cpu_to_le32(fxio->u.fxiocb.req_len);
3275 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
3276 fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3277 fx_iocb.rsp_xfrcnt =
3278 cpu_to_le16(fxio->u.fxiocb.rsp_len);
3279 put_unaligned_le64(fxio->u.fxiocb.rsp_dma_handle,
3280 &fx_iocb.dseg_rsp.address);
3281 fx_iocb.dseg_rsp.length =
3282 cpu_to_le32(fxio->u.fxiocb.rsp_len);
3285 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
3286 fx_iocb.dataword = fxio->u.fxiocb.req_data;
3288 fx_iocb.flags = fxio->u.fxiocb.flags;
3290 struct scatterlist *sg;
3292 bsg_job = sp->u.bsg_job;
3293 bsg_request = bsg_job->request;
3294 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3295 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3297 fx_iocb.func_num = piocb_rqst->func_type;
3298 fx_iocb.adapid = piocb_rqst->adapid;
3299 fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
3300 fx_iocb.reserved_0 = piocb_rqst->reserved_0;
3301 fx_iocb.reserved_1 = piocb_rqst->reserved_1;
3302 fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
3303 fx_iocb.dataword = piocb_rqst->dataword;
3304 fx_iocb.req_xfrcnt = piocb_rqst->req_len;
3305 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
3307 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
3308 int avail_dsds, tot_dsds;
3309 cont_a64_entry_t lcont_pkt;
3310 cont_a64_entry_t *cont_pkt = NULL;
3311 struct dsd64 *cur_dsd;
3312 int index = 0, cont = 0;
3314 fx_iocb.req_dsdcnt =
3315 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3317 bsg_job->request_payload.sg_cnt;
3318 cur_dsd = &fx_iocb.dseg_rq;
3320 for_each_sg(bsg_job->request_payload.sg_list, sg,
3322 /* Allocate additional continuation packets? */
3323 if (avail_dsds == 0) {
3325 * Five DSDs are available in the Cont.
3328 memset(&lcont_pkt, 0,
3329 REQUEST_ENTRY_SIZE);
3331 qlafx00_prep_cont_type1_iocb(
3332 sp->vha->req, &lcont_pkt);
3333 cur_dsd = lcont_pkt.dsd;
3339 append_dsd64(&cur_dsd, sg);
3342 if (avail_dsds == 0 && cont == 1) {
3345 (void __iomem *)cont_pkt,
3346 &lcont_pkt, REQUEST_ENTRY_SIZE);
3348 ql_dbg_user + ql_dbg_verbose,
3350 (uint8_t *)&lcont_pkt,
3351 REQUEST_ENTRY_SIZE);
3354 if (avail_dsds != 0 && cont == 1) {
3355 memcpy_toio((void __iomem *)cont_pkt,
3356 &lcont_pkt, REQUEST_ENTRY_SIZE);
3357 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3359 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3363 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
3364 int avail_dsds, tot_dsds;
3365 cont_a64_entry_t lcont_pkt;
3366 cont_a64_entry_t *cont_pkt = NULL;
3367 struct dsd64 *cur_dsd;
3368 int index = 0, cont = 0;
3370 fx_iocb.rsp_dsdcnt =
3371 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3372 tot_dsds = bsg_job->reply_payload.sg_cnt;
3373 cur_dsd = &fx_iocb.dseg_rsp;
3376 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3378 /* Allocate additional continuation packets? */
3379 if (avail_dsds == 0) {
3381 * Five DSDs are available in the Cont.
3384 memset(&lcont_pkt, 0,
3385 REQUEST_ENTRY_SIZE);
3387 qlafx00_prep_cont_type1_iocb(
3388 sp->vha->req, &lcont_pkt);
3389 cur_dsd = lcont_pkt.dsd;
3395 append_dsd64(&cur_dsd, sg);
3398 if (avail_dsds == 0 && cont == 1) {
3400 memcpy_toio((void __iomem *)cont_pkt,
3402 REQUEST_ENTRY_SIZE);
3404 ql_dbg_user + ql_dbg_verbose,
3406 (uint8_t *)&lcont_pkt,
3407 REQUEST_ENTRY_SIZE);
3410 if (avail_dsds != 0 && cont == 1) {
3411 memcpy_toio((void __iomem *)cont_pkt,
3412 &lcont_pkt, REQUEST_ENTRY_SIZE);
3413 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3415 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3419 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
3420 fx_iocb.dataword = piocb_rqst->dataword;
3421 fx_iocb.flags = piocb_rqst->flags;
3422 fx_iocb.entry_count = entry_cnt;
3425 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3426 sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb));
3428 memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb));