1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
7 #include "qla_target.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
12 static struct mb_cmd_name {
16 {MBC_GET_PORT_DATABASE, "GPDB"},
17 {MBC_GET_ID_LIST, "GIDList"},
18 {MBC_GET_LINK_PRIV_STATS, "Stats"},
19 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
22 static const char *mb_to_str(uint16_t cmd)
25 struct mb_cmd_name *e;
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
35 static struct rom_cmd {
39 { MBC_EXECUTE_FIRMWARE },
40 { MBC_READ_RAM_WORD },
41 { MBC_MAILBOX_REGISTER_TEST },
42 { MBC_VERIFY_CHECKSUM },
43 { MBC_GET_FIRMWARE_VERSION },
44 { MBC_LOAD_RISC_RAM },
45 { MBC_DUMP_RISC_RAM },
46 { MBC_LOAD_RISC_RAM_EXTENDED },
47 { MBC_DUMP_RISC_RAM_EXTENDED },
48 { MBC_WRITE_RAM_WORD_EXTENDED },
49 { MBC_READ_RAM_EXTENDED },
50 { MBC_GET_RESOURCE_COUNTS },
51 { MBC_SET_FIRMWARE_OPTION },
52 { MBC_MID_INITIALIZE_FIRMWARE },
53 { MBC_GET_FIRMWARE_STATE },
54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 { MBC_GET_RETRY_COUNT },
56 { MBC_TRACE_CONTROL },
57 { MBC_INITIALIZE_MULTIQ },
58 { MBC_IOCB_COMMAND_A64 },
59 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_SET_RNID_PARAMS },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
66 static int is_rom_cmd(uint16_t cmd)
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
103 unsigned long flags = 0;
105 uint8_t abort_active, eeh_delay;
107 uint16_t command = 0;
109 __le16 __iomem *optr;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
120 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "PCI channel failed permanently, exiting.\n");
123 return QLA_FUNCTION_TIMEOUT;
126 if (vha->device_flags & DFLG_DEV_FAILED) {
127 ql_log(ql_log_warn, vha, 0x1002,
128 "Device in failed state, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 /* if PCI error, then avoid mbx processing.*/
133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 ql_log(ql_log_warn, vha, 0xd04e,
136 "PCI error, exiting.\n");
137 return QLA_FUNCTION_TIMEOUT;
141 io_lock_on = base_vha->flags.init_done;
144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 chip_reset = ha->chip_reset;
147 if (ha->flags.pci_channel_io_perm_failure) {
148 ql_log(ql_log_warn, vha, 0x1003,
149 "Perm failure on EEH timeout MBX, exiting.\n");
150 return QLA_FUNCTION_TIMEOUT;
153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 /* Setting Link-Down error */
155 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 ql_log(ql_log_warn, vha, 0x1004,
157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 return QLA_FUNCTION_TIMEOUT;
161 /* check if ISP abort is active and return cmd with timeout */
162 if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
166 ql_log(ql_log_info, vha, 0x1005,
167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
169 return QLA_FUNCTION_TIMEOUT;
172 atomic_inc(&ha->num_pend_mbx_stage1);
174 * Wait for active mailbox commands to finish by waiting at most tov
175 * seconds. This is to serialize actual issuing of mailbox cmds during
176 * non ISP abort time.
178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 /* Timeout occurred. Return error. */
180 ql_log(ql_log_warn, vha, 0xd035,
181 "Cmd access timeout, cmd=0x%x, Exiting.\n",
184 atomic_dec(&ha->num_pend_mbx_stage1);
185 return QLA_FUNCTION_TIMEOUT;
187 atomic_dec(&ha->num_pend_mbx_stage1);
188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
189 ha->flags.eeh_busy) {
190 ql_log(ql_log_warn, vha, 0xd035,
191 "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
192 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
198 /* Save mailbox command for debug */
201 ql_dbg(ql_dbg_mbx, vha, 0x1006,
202 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
204 spin_lock_irqsave(&ha->hardware_lock, flags);
206 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
207 ha->flags.mbox_busy) {
209 spin_unlock_irqrestore(&ha->hardware_lock, flags);
212 ha->flags.mbox_busy = 1;
214 /* Load mailbox registers. */
216 optr = ®->isp82.mailbox_in[0];
217 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
218 optr = ®->isp24.mailbox0;
220 optr = MAILBOX_REG(ha, ®->isp, 0);
223 command = mcp->mb[0];
224 mboxes = mcp->out_mb;
226 ql_dbg(ql_dbg_mbx, vha, 0x1111,
227 "Mailbox registers (OUT):\n");
228 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
229 if (IS_QLA2200(ha) && cnt == 8)
230 optr = MAILBOX_REG(ha, ®->isp, 8);
231 if (mboxes & BIT_0) {
232 ql_dbg(ql_dbg_mbx, vha, 0x1112,
233 "mbox[%d]<-0x%04x\n", cnt, *iptr);
234 wrt_reg_word(optr, *iptr);
242 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
243 "I/O Address = %p.\n", optr);
245 /* Issue set host interrupt command to send cmd out. */
246 ha->flags.mbox_int = 0;
247 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
249 /* Unlock mbx registers and wait for interrupt */
250 ql_dbg(ql_dbg_mbx, vha, 0x100f,
251 "Going to unlock irq & waiting for interrupts. "
252 "jiffies=%lx.\n", jiffies);
254 /* Wait for mbx cmd completion until timeout */
255 atomic_inc(&ha->num_pend_mbx_stage2);
256 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
257 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
260 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
261 else if (IS_FWI2_CAPABLE(ha))
262 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
264 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
265 spin_unlock_irqrestore(&ha->hardware_lock, flags);
268 atomic_inc(&ha->num_pend_mbx_stage3);
269 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
271 if (chip_reset != ha->chip_reset) {
272 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
274 spin_lock_irqsave(&ha->hardware_lock, flags);
275 ha->flags.mbox_busy = 0;
276 spin_unlock_irqrestore(&ha->hardware_lock,
278 atomic_dec(&ha->num_pend_mbx_stage2);
279 atomic_dec(&ha->num_pend_mbx_stage3);
283 ql_dbg(ql_dbg_mbx, vha, 0x117a,
284 "cmd=%x Timeout.\n", command);
285 spin_lock_irqsave(&ha->hardware_lock, flags);
286 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
289 } else if (ha->flags.purge_mbox ||
290 chip_reset != ha->chip_reset) {
291 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
293 spin_lock_irqsave(&ha->hardware_lock, flags);
294 ha->flags.mbox_busy = 0;
295 spin_unlock_irqrestore(&ha->hardware_lock, flags);
296 atomic_dec(&ha->num_pend_mbx_stage2);
297 atomic_dec(&ha->num_pend_mbx_stage3);
301 atomic_dec(&ha->num_pend_mbx_stage3);
303 if (time_after(jiffies, wait_time + 5 * HZ))
304 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
305 command, jiffies_to_msecs(jiffies - wait_time));
307 ql_dbg(ql_dbg_mbx, vha, 0x1011,
308 "Cmd=%x Polling Mode.\n", command);
310 if (IS_P3P_TYPE(ha)) {
311 if (rd_reg_dword(®->isp82.hint) &
312 HINT_MBX_INT_PENDING) {
313 ha->flags.mbox_busy = 0;
314 spin_unlock_irqrestore(&ha->hardware_lock,
316 atomic_dec(&ha->num_pend_mbx_stage2);
317 ql_dbg(ql_dbg_mbx, vha, 0x1012,
318 "Pending mailbox timeout, exiting.\n");
320 rval = QLA_FUNCTION_TIMEOUT;
323 wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING);
324 } else if (IS_FWI2_CAPABLE(ha))
325 wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT);
327 wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT);
328 spin_unlock_irqrestore(&ha->hardware_lock, flags);
330 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
331 while (!ha->flags.mbox_int) {
332 if (ha->flags.purge_mbox ||
333 chip_reset != ha->chip_reset) {
334 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
336 spin_lock_irqsave(&ha->hardware_lock, flags);
337 ha->flags.mbox_busy = 0;
338 spin_unlock_irqrestore(&ha->hardware_lock,
340 atomic_dec(&ha->num_pend_mbx_stage2);
345 if (time_after(jiffies, wait_time))
348 /* Check for pending interrupts. */
349 qla2x00_poll(ha->rsp_q_map[0]);
351 if (!ha->flags.mbox_int &&
353 command == MBC_LOAD_RISC_RAM_EXTENDED))
356 ql_dbg(ql_dbg_mbx, vha, 0x1013,
358 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
360 atomic_dec(&ha->num_pend_mbx_stage2);
362 /* Check whether we timed out */
363 if (ha->flags.mbox_int) {
366 ql_dbg(ql_dbg_mbx, vha, 0x1014,
367 "Cmd=%x completed.\n", command);
369 /* Got interrupt. Clear the flag. */
370 ha->flags.mbox_int = 0;
371 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
373 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
374 spin_lock_irqsave(&ha->hardware_lock, flags);
375 ha->flags.mbox_busy = 0;
376 spin_unlock_irqrestore(&ha->hardware_lock, flags);
378 /* Setting Link-Down error */
379 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
381 rval = QLA_FUNCTION_FAILED;
382 ql_log(ql_log_warn, vha, 0xd048,
383 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
387 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
388 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
389 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
390 MBS_COMMAND_COMPLETE);
391 rval = QLA_FUNCTION_FAILED;
394 /* Load return mailbox registers. */
396 iptr = (uint16_t *)&ha->mailbox_out[0];
399 ql_dbg(ql_dbg_mbx, vha, 0x1113,
400 "Mailbox registers (IN):\n");
401 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
402 if (mboxes & BIT_0) {
404 ql_dbg(ql_dbg_mbx, vha, 0x1114,
405 "mbox[%d]->0x%04x\n", cnt, *iptr2);
415 uint32_t ictrl, host_status, hccr;
418 if (IS_FWI2_CAPABLE(ha)) {
419 mb[0] = rd_reg_word(®->isp24.mailbox0);
420 mb[1] = rd_reg_word(®->isp24.mailbox1);
421 mb[2] = rd_reg_word(®->isp24.mailbox2);
422 mb[3] = rd_reg_word(®->isp24.mailbox3);
423 mb[7] = rd_reg_word(®->isp24.mailbox7);
424 ictrl = rd_reg_dword(®->isp24.ictrl);
425 host_status = rd_reg_dword(®->isp24.host_status);
426 hccr = rd_reg_dword(®->isp24.hccr);
428 ql_log(ql_log_warn, vha, 0xd04c,
429 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
430 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
431 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
432 mb[7], host_status, hccr);
436 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
437 ictrl = rd_reg_word(®->isp.ictrl);
438 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
439 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
440 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
443 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
445 /* Capture FW dump only, if PCI device active */
446 if (!pci_channel_offline(vha->hw->pdev)) {
447 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
448 if (w == 0xffff || ictrl == 0xffffffff ||
449 (chip_reset != ha->chip_reset)) {
450 /* This is special case if there is unload
451 * of driver happening and if PCI device go
452 * into bad state due to PCI error condition
453 * then only PCI ERR flag would be set.
454 * we will do premature exit for above case.
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457 ha->flags.mbox_busy = 0;
458 spin_unlock_irqrestore(&ha->hardware_lock,
460 rval = QLA_FUNCTION_TIMEOUT;
464 /* Attempt to capture firmware dump for further
465 * anallysis of the current formware state. we do not
466 * need to do this if we are intentionally generating
469 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
470 qla2xxx_dump_fw(vha);
471 rval = QLA_FUNCTION_TIMEOUT;
474 spin_lock_irqsave(&ha->hardware_lock, flags);
475 ha->flags.mbox_busy = 0;
476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
481 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
482 ql_dbg(ql_dbg_mbx, vha, 0x101a,
483 "Checking for additional resp interrupt.\n");
485 /* polling mode for non isp_abort commands. */
486 qla2x00_poll(ha->rsp_q_map[0]);
489 if (rval == QLA_FUNCTION_TIMEOUT &&
490 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
491 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
492 ha->flags.eeh_busy) {
493 /* not in dpc. schedule it for dpc to take over. */
494 ql_dbg(ql_dbg_mbx, vha, 0x101b,
495 "Timeout, schedule isp_abort_needed.\n");
497 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
498 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
499 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
500 if (IS_QLA82XX(ha)) {
501 ql_dbg(ql_dbg_mbx, vha, 0x112a,
502 "disabling pause transmit on port "
505 QLA82XX_CRB_NIU + 0x98,
506 CRB_NIU_XG_PAUSE_CTL_P0|
507 CRB_NIU_XG_PAUSE_CTL_P1);
509 ql_log(ql_log_info, base_vha, 0x101c,
510 "Mailbox cmd timeout occurred, cmd=0x%x, "
511 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
512 "abort.\n", command, mcp->mb[0],
515 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
516 qla2xxx_wake_dpc(vha);
518 } else if (current == ha->dpc_thread) {
519 /* call abort directly since we are in the DPC thread */
520 ql_dbg(ql_dbg_mbx, vha, 0x101d,
521 "Timeout, calling abort_isp.\n");
523 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
524 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
525 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
526 if (IS_QLA82XX(ha)) {
527 ql_dbg(ql_dbg_mbx, vha, 0x112b,
528 "disabling pause transmit on port "
531 QLA82XX_CRB_NIU + 0x98,
532 CRB_NIU_XG_PAUSE_CTL_P0|
533 CRB_NIU_XG_PAUSE_CTL_P1);
535 ql_log(ql_log_info, base_vha, 0x101e,
536 "Mailbox cmd timeout occurred, cmd=0x%x, "
537 "mb[0]=0x%x. Scheduling ISP abort ",
538 command, mcp->mb[0]);
540 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
541 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
542 /* Allow next mbx cmd to come in. */
543 complete(&ha->mbx_cmd_comp);
544 if (ha->isp_ops->abort_isp(vha) &&
545 !ha->flags.eeh_busy) {
546 /* Failed. retry later. */
547 set_bit(ISP_ABORT_NEEDED,
550 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
551 ql_dbg(ql_dbg_mbx, vha, 0x101f,
552 "Finished abort_isp.\n");
559 /* Allow next mbx cmd to come in. */
560 complete(&ha->mbx_cmd_comp);
563 if (rval == QLA_ABORTED) {
564 ql_log(ql_log_info, vha, 0xd035,
565 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
568 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
569 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
570 dev_name(&ha->pdev->dev), 0x1020+0x800,
574 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
575 if (mboxes & BIT_0) {
576 printk(" mb[%u]=%x", i, mcp->mb[i]);
579 pr_warn(" cmd=%x ****\n", command);
581 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
582 ql_dbg(ql_dbg_mbx, vha, 0x1198,
583 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
584 rd_reg_dword(®->isp24.host_status),
585 rd_reg_dword(®->isp24.ictrl),
586 rd_reg_dword(®->isp24.istatus));
588 ql_dbg(ql_dbg_mbx, vha, 0x1206,
589 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
590 rd_reg_word(®->isp.ctrl_status),
591 rd_reg_word(®->isp.ictrl),
592 rd_reg_word(®->isp.istatus));
595 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
599 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
601 * The caller of this mailbox encounter pci error.
602 * Hold the thread until PCIE link reset complete to make
603 * sure caller does not unmap dma while recovery is
613 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
614 uint32_t risc_code_size)
617 struct qla_hw_data *ha = vha->hw;
619 mbx_cmd_t *mcp = &mc;
621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
622 "Entered %s.\n", __func__);
624 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
625 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
626 mcp->mb[8] = MSW(risc_addr);
627 mcp->out_mb = MBX_8|MBX_0;
629 mcp->mb[0] = MBC_LOAD_RISC_RAM;
632 mcp->mb[1] = LSW(risc_addr);
633 mcp->mb[2] = MSW(req_dma);
634 mcp->mb[3] = LSW(req_dma);
635 mcp->mb[6] = MSW(MSD(req_dma));
636 mcp->mb[7] = LSW(MSD(req_dma));
637 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
638 if (IS_FWI2_CAPABLE(ha)) {
639 mcp->mb[4] = MSW(risc_code_size);
640 mcp->mb[5] = LSW(risc_code_size);
641 mcp->out_mb |= MBX_5|MBX_4;
643 mcp->mb[4] = LSW(risc_code_size);
644 mcp->out_mb |= MBX_4;
647 mcp->in_mb = MBX_1|MBX_0;
648 mcp->tov = MBX_TOV_SECONDS;
650 rval = qla2x00_mailbox_command(vha, mcp);
652 if (rval != QLA_SUCCESS) {
653 ql_dbg(ql_dbg_mbx, vha, 0x1023,
654 "Failed=%x mb[0]=%x mb[1]=%x.\n",
655 rval, mcp->mb[0], mcp->mb[1]);
658 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
659 "Done %s.\n", __func__);
665 #define NVME_ENABLE_FLAG BIT_3
669 * Start adapter firmware.
672 * ha = adapter block pointer.
673 * TARGET_QUEUE_LOCK must be released.
674 * ADAPTER_STATE_LOCK must be released.
677 * qla2x00 local function return status code.
683 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
686 struct qla_hw_data *ha = vha->hw;
688 mbx_cmd_t *mcp = &mc;
690 #define EXE_FW_FORCE_SEMAPHORE BIT_7
693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
694 "Entered %s.\n", __func__);
697 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
700 if (IS_FWI2_CAPABLE(ha)) {
701 mcp->mb[1] = MSW(risc_addr);
702 mcp->mb[2] = LSW(risc_addr);
708 if (ha->flags.lr_detected) {
710 if (IS_BPM_RANGE_CAPABLE(ha))
712 ha->lr_distance << LR_DIST_FW_POS;
715 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
716 mcp->mb[4] |= NVME_ENABLE_FLAG;
718 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
719 struct nvram_81xx *nv = ha->nvram;
720 /* set minimum speed if specified in nvram */
721 if (nv->min_supported_speed >= 2 &&
722 nv->min_supported_speed <= 5) {
724 mcp->mb[11] |= nv->min_supported_speed & 0xF;
725 mcp->out_mb |= MBX_11;
727 vha->min_supported_speed =
728 nv->min_supported_speed;
732 if (ha->flags.exlogins_enabled)
733 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
735 if (ha->flags.exchoffld_enabled)
736 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
739 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
741 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
742 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
744 mcp->mb[1] = LSW(risc_addr);
745 mcp->out_mb |= MBX_1;
746 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
748 mcp->out_mb |= MBX_2;
752 mcp->tov = MBX_TOV_SECONDS;
754 rval = qla2x00_mailbox_command(vha, mcp);
756 if (rval != QLA_SUCCESS) {
757 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
758 mcp->mb[1] == 0x27 && retry) {
761 ql_dbg(ql_dbg_async, vha, 0x1026,
762 "Exe FW: force semaphore.\n");
766 ql_dbg(ql_dbg_mbx, vha, 0x1026,
767 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
772 if (!IS_FWI2_CAPABLE(ha))
775 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
776 ql_dbg(ql_dbg_mbx, vha, 0x119a,
777 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
778 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
779 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
780 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
781 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
782 ha->max_supported_speed == 0 ? "16Gps" :
783 ha->max_supported_speed == 1 ? "32Gps" :
784 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
785 if (vha->min_supported_speed) {
786 ha->min_supported_speed = mcp->mb[5] &
787 (BIT_0 | BIT_1 | BIT_2);
788 ql_dbg(ql_dbg_mbx, vha, 0x119c,
789 "min_supported_speed=%s.\n",
790 ha->min_supported_speed == 6 ? "64Gps" :
791 ha->min_supported_speed == 5 ? "32Gps" :
792 ha->min_supported_speed == 4 ? "16Gps" :
793 ha->min_supported_speed == 3 ? "8Gps" :
794 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
800 "Done %s.\n", __func__);
806 * qla_get_exlogin_status
807 * Get extended login status
808 * uses the memory offload control/status Mailbox
811 * ha: adapter state pointer.
812 * fwopt: firmware options
815 * qla2x00 local function status
820 #define FETCH_XLOGINS_STAT 0x8
822 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
823 uint16_t *ex_logins_cnt)
827 mbx_cmd_t *mcp = &mc;
829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
830 "Entered %s\n", __func__);
832 memset(mcp->mb, 0 , sizeof(mcp->mb));
833 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
834 mcp->mb[1] = FETCH_XLOGINS_STAT;
835 mcp->out_mb = MBX_1|MBX_0;
836 mcp->in_mb = MBX_10|MBX_4|MBX_0;
837 mcp->tov = MBX_TOV_SECONDS;
840 rval = qla2x00_mailbox_command(vha, mcp);
841 if (rval != QLA_SUCCESS) {
842 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
844 *buf_sz = mcp->mb[4];
845 *ex_logins_cnt = mcp->mb[10];
847 ql_log(ql_log_info, vha, 0x1190,
848 "buffer size 0x%x, exchange login count=%d\n",
849 mcp->mb[4], mcp->mb[10]);
851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
852 "Done %s.\n", __func__);
859 * qla_set_exlogin_mem_cfg
860 * set extended login memory configuration
861 * Mbx needs to be issues before init_cb is set
864 * ha: adapter state pointer.
865 * buffer: buffer pointer
866 * phys_addr: physical address of buffer
867 * size: size of buffer
868 * TARGET_QUEUE_LOCK must be released
869 * ADAPTER_STATE_LOCK must be release
872 * qla2x00 local funxtion status code.
877 #define CONFIG_XLOGINS_MEM 0x9
879 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
883 mbx_cmd_t *mcp = &mc;
884 struct qla_hw_data *ha = vha->hw;
886 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
887 "Entered %s.\n", __func__);
889 memset(mcp->mb, 0 , sizeof(mcp->mb));
890 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
891 mcp->mb[1] = CONFIG_XLOGINS_MEM;
892 mcp->mb[2] = MSW(phys_addr);
893 mcp->mb[3] = LSW(phys_addr);
894 mcp->mb[6] = MSW(MSD(phys_addr));
895 mcp->mb[7] = LSW(MSD(phys_addr));
896 mcp->mb[8] = MSW(ha->exlogin_size);
897 mcp->mb[9] = LSW(ha->exlogin_size);
898 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
899 mcp->in_mb = MBX_11|MBX_0;
900 mcp->tov = MBX_TOV_SECONDS;
902 rval = qla2x00_mailbox_command(vha, mcp);
903 if (rval != QLA_SUCCESS) {
904 ql_dbg(ql_dbg_mbx, vha, 0x111b,
905 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
906 rval, mcp->mb[0], mcp->mb[11]);
908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
909 "Done %s.\n", __func__);
916 * qla_get_exchoffld_status
917 * Get exchange offload status
918 * uses the memory offload control/status Mailbox
921 * ha: adapter state pointer.
922 * fwopt: firmware options
925 * qla2x00 local function status
930 #define FETCH_XCHOFFLD_STAT 0x2
932 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
933 uint16_t *ex_logins_cnt)
937 mbx_cmd_t *mcp = &mc;
939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
940 "Entered %s\n", __func__);
942 memset(mcp->mb, 0 , sizeof(mcp->mb));
943 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
944 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
945 mcp->out_mb = MBX_1|MBX_0;
946 mcp->in_mb = MBX_10|MBX_4|MBX_0;
947 mcp->tov = MBX_TOV_SECONDS;
950 rval = qla2x00_mailbox_command(vha, mcp);
951 if (rval != QLA_SUCCESS) {
952 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
954 *buf_sz = mcp->mb[4];
955 *ex_logins_cnt = mcp->mb[10];
957 ql_log(ql_log_info, vha, 0x118e,
958 "buffer size 0x%x, exchange offload count=%d\n",
959 mcp->mb[4], mcp->mb[10]);
961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
962 "Done %s.\n", __func__);
969 * qla_set_exchoffld_mem_cfg
970 * Set exchange offload memory configuration
971 * Mbx needs to be issues before init_cb is set
974 * ha: adapter state pointer.
975 * buffer: buffer pointer
976 * phys_addr: physical address of buffer
977 * size: size of buffer
978 * TARGET_QUEUE_LOCK must be released
979 * ADAPTER_STATE_LOCK must be release
982 * qla2x00 local funxtion status code.
987 #define CONFIG_XCHOFFLD_MEM 0x3
989 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
993 mbx_cmd_t *mcp = &mc;
994 struct qla_hw_data *ha = vha->hw;
996 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
997 "Entered %s.\n", __func__);
999 memset(mcp->mb, 0 , sizeof(mcp->mb));
1000 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1001 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1002 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1003 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1004 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1005 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1006 mcp->mb[8] = MSW(ha->exchoffld_size);
1007 mcp->mb[9] = LSW(ha->exchoffld_size);
1008 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1009 mcp->in_mb = MBX_11|MBX_0;
1010 mcp->tov = MBX_TOV_SECONDS;
1012 rval = qla2x00_mailbox_command(vha, mcp);
1013 if (rval != QLA_SUCCESS) {
1015 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1017 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1018 "Done %s.\n", __func__);
1025 * qla2x00_get_fw_version
1026 * Get firmware version.
1029 * ha: adapter state pointer.
1030 * major: pointer for major number.
1031 * minor: pointer for minor number.
1032 * subminor: pointer for subminor number.
1035 * qla2x00 local function return status code.
1041 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1045 mbx_cmd_t *mcp = &mc;
1046 struct qla_hw_data *ha = vha->hw;
1048 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1049 "Entered %s.\n", __func__);
1051 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1052 mcp->out_mb = MBX_0;
1053 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1054 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1055 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1056 if (IS_FWI2_CAPABLE(ha))
1057 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1058 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1060 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1061 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1064 mcp->tov = MBX_TOV_SECONDS;
1065 rval = qla2x00_mailbox_command(vha, mcp);
1066 if (rval != QLA_SUCCESS)
1069 /* Return mailbox data. */
1070 ha->fw_major_version = mcp->mb[1];
1071 ha->fw_minor_version = mcp->mb[2];
1072 ha->fw_subminor_version = mcp->mb[3];
1073 ha->fw_attributes = mcp->mb[6];
1074 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1075 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1077 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1079 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1080 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1081 ha->mpi_version[1] = mcp->mb[11] >> 8;
1082 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1083 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1084 ha->phy_version[0] = mcp->mb[8] & 0xff;
1085 ha->phy_version[1] = mcp->mb[9] >> 8;
1086 ha->phy_version[2] = mcp->mb[9] & 0xff;
1089 if (IS_FWI2_CAPABLE(ha)) {
1090 ha->fw_attributes_h = mcp->mb[15];
1091 ha->fw_attributes_ext[0] = mcp->mb[16];
1092 ha->fw_attributes_ext[1] = mcp->mb[17];
1093 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1094 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1095 __func__, mcp->mb[15], mcp->mb[6]);
1096 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1097 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1098 __func__, mcp->mb[17], mcp->mb[16]);
1100 if (ha->fw_attributes_h & 0x4)
1101 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1102 "%s: Firmware supports Extended Login 0x%x\n",
1103 __func__, ha->fw_attributes_h);
1105 if (ha->fw_attributes_h & 0x8)
1106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1107 "%s: Firmware supports Exchange Offload 0x%x\n",
1108 __func__, ha->fw_attributes_h);
1111 * FW supports nvme and driver load parameter requested nvme.
1112 * BIT 26 of fw_attributes indicates NVMe support.
1114 if ((ha->fw_attributes_h &
1115 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1117 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1118 vha->flags.nvme_first_burst = 1;
1120 vha->flags.nvme_enabled = 1;
1121 ql_log(ql_log_info, vha, 0xd302,
1122 "%s: FC-NVMe is Enabled (0x%x)\n",
1123 __func__, ha->fw_attributes_h);
1126 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1127 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1128 ql_log(ql_log_info, vha, 0xd302,
1129 "Firmware supports NVMe2 0x%x\n",
1130 ha->fw_attributes_ext[0]);
1131 vha->flags.nvme2_enabled = 1;
1135 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1136 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1137 ha->serdes_version[1] = mcp->mb[8] >> 8;
1138 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1139 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1140 ha->mpi_version[1] = mcp->mb[11] >> 8;
1141 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1142 ha->pep_version[0] = mcp->mb[13] & 0xff;
1143 ha->pep_version[1] = mcp->mb[14] >> 8;
1144 ha->pep_version[2] = mcp->mb[14] & 0xff;
1145 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1146 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1147 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1148 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1149 if (IS_QLA28XX(ha)) {
1150 if (mcp->mb[16] & BIT_10)
1151 ha->flags.secure_fw = 1;
1153 ql_log(ql_log_info, vha, 0xffff,
1154 "Secure Flash Update in FW: %s\n",
1155 (ha->flags.secure_fw) ? "Supported" :
1159 if (ha->flags.scm_supported_a &&
1160 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1161 ha->flags.scm_supported_f = 1;
1162 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1164 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1165 (ha->flags.scm_supported_f) ? "Supported" :
1168 if (vha->flags.nvme2_enabled) {
1169 /* set BIT_15 of special feature control block for SLER */
1170 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1171 /* set BIT_14 of special feature control block for PI CTRL*/
1172 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1177 if (rval != QLA_SUCCESS) {
1179 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1183 "Done %s.\n", __func__);
1189 * qla2x00_get_fw_options
1190 * Set firmware options.
1193 * ha = adapter block pointer.
1194 * fwopt = pointer for firmware options.
1197 * qla2x00 local function return status code.
1203 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1207 mbx_cmd_t *mcp = &mc;
1209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1210 "Entered %s.\n", __func__);
1212 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1213 mcp->out_mb = MBX_0;
1214 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1215 mcp->tov = MBX_TOV_SECONDS;
1217 rval = qla2x00_mailbox_command(vha, mcp);
1219 if (rval != QLA_SUCCESS) {
1221 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1223 fwopts[0] = mcp->mb[0];
1224 fwopts[1] = mcp->mb[1];
1225 fwopts[2] = mcp->mb[2];
1226 fwopts[3] = mcp->mb[3];
1228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1229 "Done %s.\n", __func__);
1237 * qla2x00_set_fw_options
1238 * Set firmware options.
1241 * ha = adapter block pointer.
1242 * fwopt = pointer for firmware options.
1245 * qla2x00 local function return status code.
1251 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1255 mbx_cmd_t *mcp = &mc;
1257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1258 "Entered %s.\n", __func__);
1260 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1261 mcp->mb[1] = fwopts[1];
1262 mcp->mb[2] = fwopts[2];
1263 mcp->mb[3] = fwopts[3];
1264 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1266 if (IS_FWI2_CAPABLE(vha->hw)) {
1267 mcp->in_mb |= MBX_1;
1268 mcp->mb[10] = fwopts[10];
1269 mcp->out_mb |= MBX_10;
1271 mcp->mb[10] = fwopts[10];
1272 mcp->mb[11] = fwopts[11];
1273 mcp->mb[12] = 0; /* Undocumented, but used */
1274 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1276 mcp->tov = MBX_TOV_SECONDS;
1278 rval = qla2x00_mailbox_command(vha, mcp);
1280 fwopts[0] = mcp->mb[0];
1282 if (rval != QLA_SUCCESS) {
1284 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1285 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1289 "Done %s.\n", __func__);
1296 * qla2x00_mbx_reg_test
1297 * Mailbox register wrap test.
1300 * ha = adapter block pointer.
1301 * TARGET_QUEUE_LOCK must be released.
1302 * ADAPTER_STATE_LOCK must be released.
1305 * qla2x00 local function return status code.
1311 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1315 mbx_cmd_t *mcp = &mc;
1317 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1318 "Entered %s.\n", __func__);
1320 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1321 mcp->mb[1] = 0xAAAA;
1322 mcp->mb[2] = 0x5555;
1323 mcp->mb[3] = 0xAA55;
1324 mcp->mb[4] = 0x55AA;
1325 mcp->mb[5] = 0xA5A5;
1326 mcp->mb[6] = 0x5A5A;
1327 mcp->mb[7] = 0x2525;
1328 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1329 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1330 mcp->tov = MBX_TOV_SECONDS;
1332 rval = qla2x00_mailbox_command(vha, mcp);
1334 if (rval == QLA_SUCCESS) {
1335 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1336 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1337 rval = QLA_FUNCTION_FAILED;
1338 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1339 mcp->mb[7] != 0x2525)
1340 rval = QLA_FUNCTION_FAILED;
1343 if (rval != QLA_SUCCESS) {
1345 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1350 "Done %s.\n", __func__);
1357 * qla2x00_verify_checksum
1358 * Verify firmware checksum.
1361 * ha = adapter block pointer.
1362 * TARGET_QUEUE_LOCK must be released.
1363 * ADAPTER_STATE_LOCK must be released.
1366 * qla2x00 local function return status code.
1372 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1376 mbx_cmd_t *mcp = &mc;
1378 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1379 "Entered %s.\n", __func__);
1381 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1382 mcp->out_mb = MBX_0;
1384 if (IS_FWI2_CAPABLE(vha->hw)) {
1385 mcp->mb[1] = MSW(risc_addr);
1386 mcp->mb[2] = LSW(risc_addr);
1387 mcp->out_mb |= MBX_2|MBX_1;
1388 mcp->in_mb |= MBX_2|MBX_1;
1390 mcp->mb[1] = LSW(risc_addr);
1391 mcp->out_mb |= MBX_1;
1392 mcp->in_mb |= MBX_1;
1395 mcp->tov = MBX_TOV_SECONDS;
1397 rval = qla2x00_mailbox_command(vha, mcp);
1399 if (rval != QLA_SUCCESS) {
1400 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1401 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1402 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1404 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1405 "Done %s.\n", __func__);
1412 * qla2x00_issue_iocb
1413 * Issue IOCB using mailbox command
1416 * ha = adapter state pointer.
1417 * buffer = buffer pointer.
1418 * phys_addr = physical address of buffer.
1419 * size = size of buffer.
1420 * TARGET_QUEUE_LOCK must be released.
1421 * ADAPTER_STATE_LOCK must be released.
1424 * qla2x00 local function return status code.
1430 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1431 dma_addr_t phys_addr, size_t size, uint32_t tov)
1435 mbx_cmd_t *mcp = &mc;
1437 if (!vha->hw->flags.fw_started)
1438 return QLA_INVALID_COMMAND;
1440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1441 "Entered %s.\n", __func__);
1443 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1445 mcp->mb[2] = MSW(LSD(phys_addr));
1446 mcp->mb[3] = LSW(LSD(phys_addr));
1447 mcp->mb[6] = MSW(MSD(phys_addr));
1448 mcp->mb[7] = LSW(MSD(phys_addr));
1449 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1450 mcp->in_mb = MBX_1|MBX_0;
1453 rval = qla2x00_mailbox_command(vha, mcp);
1455 if (rval != QLA_SUCCESS) {
1457 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1459 sts_entry_t *sts_entry = buffer;
1461 /* Mask reserved bits. */
1462 sts_entry->entry_status &=
1463 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1464 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1465 "Done %s (status=%x).\n", __func__,
1466 sts_entry->entry_status);
1473 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1476 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1481 * qla2x00_abort_command
1482 * Abort command aborts a specified IOCB.
1485 * ha = adapter block pointer.
1486 * sp = SB structure pointer.
1489 * qla2x00 local function return status code.
1495 qla2x00_abort_command(srb_t *sp)
1497 unsigned long flags = 0;
1499 uint32_t handle = 0;
1501 mbx_cmd_t *mcp = &mc;
1502 fc_port_t *fcport = sp->fcport;
1503 scsi_qla_host_t *vha = fcport->vha;
1504 struct qla_hw_data *ha = vha->hw;
1505 struct req_que *req;
1506 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1509 "Entered %s.\n", __func__);
1512 req = sp->qpair->req;
1516 spin_lock_irqsave(&ha->hardware_lock, flags);
1517 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1518 if (req->outstanding_cmds[handle] == sp)
1521 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1523 if (handle == req->num_outstanding_cmds) {
1524 /* command not found */
1525 return QLA_FUNCTION_FAILED;
1528 mcp->mb[0] = MBC_ABORT_COMMAND;
1529 if (HAS_EXTENDED_IDS(ha))
1530 mcp->mb[1] = fcport->loop_id;
1532 mcp->mb[1] = fcport->loop_id << 8;
1533 mcp->mb[2] = (uint16_t)handle;
1534 mcp->mb[3] = (uint16_t)(handle >> 16);
1535 mcp->mb[6] = (uint16_t)cmd->device->lun;
1536 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1538 mcp->tov = MBX_TOV_SECONDS;
1540 rval = qla2x00_mailbox_command(vha, mcp);
1542 if (rval != QLA_SUCCESS) {
1543 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1546 "Done %s.\n", __func__);
1553 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1557 mbx_cmd_t *mcp = &mc;
1558 scsi_qla_host_t *vha;
1562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1563 "Entered %s.\n", __func__);
1565 mcp->mb[0] = MBC_ABORT_TARGET;
1566 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1567 if (HAS_EXTENDED_IDS(vha->hw)) {
1568 mcp->mb[1] = fcport->loop_id;
1570 mcp->out_mb |= MBX_10;
1572 mcp->mb[1] = fcport->loop_id << 8;
1574 mcp->mb[2] = vha->hw->loop_reset_delay;
1575 mcp->mb[9] = vha->vp_idx;
1578 mcp->tov = MBX_TOV_SECONDS;
1580 rval = qla2x00_mailbox_command(vha, mcp);
1581 if (rval != QLA_SUCCESS) {
1582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1583 "Failed=%x.\n", rval);
1586 /* Issue marker IOCB. */
1587 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1589 if (rval2 != QLA_SUCCESS) {
1590 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1591 "Failed to issue marker IOCB (%x).\n", rval2);
1593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1594 "Done %s.\n", __func__);
1601 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1605 mbx_cmd_t *mcp = &mc;
1606 scsi_qla_host_t *vha;
1610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1611 "Entered %s.\n", __func__);
1613 mcp->mb[0] = MBC_LUN_RESET;
1614 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1615 if (HAS_EXTENDED_IDS(vha->hw))
1616 mcp->mb[1] = fcport->loop_id;
1618 mcp->mb[1] = fcport->loop_id << 8;
1619 mcp->mb[2] = (u32)l;
1621 mcp->mb[9] = vha->vp_idx;
1624 mcp->tov = MBX_TOV_SECONDS;
1626 rval = qla2x00_mailbox_command(vha, mcp);
1627 if (rval != QLA_SUCCESS) {
1628 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1631 /* Issue marker IOCB. */
1632 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1634 if (rval2 != QLA_SUCCESS) {
1635 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1636 "Failed to issue marker IOCB (%x).\n", rval2);
1638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1639 "Done %s.\n", __func__);
1646 * qla2x00_get_adapter_id
1647 * Get adapter ID and topology.
1650 * ha = adapter block pointer.
1651 * id = pointer for loop ID.
1652 * al_pa = pointer for AL_PA.
1653 * area = pointer for area.
1654 * domain = pointer for domain.
1655 * top = pointer for topology.
1656 * TARGET_QUEUE_LOCK must be released.
1657 * ADAPTER_STATE_LOCK must be released.
1660 * qla2x00 local function return status code.
1666 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1667 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1671 mbx_cmd_t *mcp = &mc;
1673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1674 "Entered %s.\n", __func__);
1676 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1677 mcp->mb[9] = vha->vp_idx;
1678 mcp->out_mb = MBX_9|MBX_0;
1679 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1680 if (IS_CNA_CAPABLE(vha->hw))
1681 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1682 if (IS_FWI2_CAPABLE(vha->hw))
1683 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1684 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1685 mcp->in_mb |= MBX_15;
1686 mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
1689 mcp->tov = MBX_TOV_SECONDS;
1691 rval = qla2x00_mailbox_command(vha, mcp);
1692 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1693 rval = QLA_COMMAND_ERROR;
1694 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1695 rval = QLA_INVALID_COMMAND;
1699 *al_pa = LSB(mcp->mb[2]);
1700 *area = MSB(mcp->mb[2]);
1701 *domain = LSB(mcp->mb[3]);
1703 *sw_cap = mcp->mb[7];
1705 if (rval != QLA_SUCCESS) {
1707 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1709 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1710 "Done %s.\n", __func__);
1712 if (IS_CNA_CAPABLE(vha->hw)) {
1713 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1714 vha->fcoe_fcf_idx = mcp->mb[10];
1715 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1716 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1717 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1718 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1719 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1720 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1722 /* If FA-WWN supported */
1723 if (IS_FAWWN_CAPABLE(vha->hw)) {
1724 if (mcp->mb[7] & BIT_14) {
1725 vha->port_name[0] = MSB(mcp->mb[16]);
1726 vha->port_name[1] = LSB(mcp->mb[16]);
1727 vha->port_name[2] = MSB(mcp->mb[17]);
1728 vha->port_name[3] = LSB(mcp->mb[17]);
1729 vha->port_name[4] = MSB(mcp->mb[18]);
1730 vha->port_name[5] = LSB(mcp->mb[18]);
1731 vha->port_name[6] = MSB(mcp->mb[19]);
1732 vha->port_name[7] = LSB(mcp->mb[19]);
1733 fc_host_port_name(vha->host) =
1734 wwn_to_u64(vha->port_name);
1735 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1736 "FA-WWN acquired %016llx\n",
1737 wwn_to_u64(vha->port_name));
1741 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1742 vha->bbcr = mcp->mb[15];
1743 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1744 ql_log(ql_log_info, vha, 0x11a4,
1745 "SCM: EDC ELS completed, flags 0x%x\n",
1748 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1749 vha->hw->flags.scm_enabled = 1;
1750 vha->scm_fabric_connection_flags |=
1751 SCM_FLAG_RDF_COMPLETED;
1752 ql_log(ql_log_info, vha, 0x11a5,
1753 "SCM: RDF ELS completed, flags 0x%x\n",
1763 * qla2x00_get_retry_cnt
1764 * Get current firmware login retry count and delay.
1767 * ha = adapter block pointer.
1768 * retry_cnt = pointer to login retry count.
1769 * tov = pointer to login timeout value.
1772 * qla2x00 local function return status code.
1778 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1784 mbx_cmd_t *mcp = &mc;
1786 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1787 "Entered %s.\n", __func__);
1789 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1790 mcp->out_mb = MBX_0;
1791 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1792 mcp->tov = MBX_TOV_SECONDS;
1794 rval = qla2x00_mailbox_command(vha, mcp);
1796 if (rval != QLA_SUCCESS) {
1798 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1799 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1801 /* Convert returned data and check our values. */
1802 *r_a_tov = mcp->mb[3] / 2;
1803 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1804 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1805 /* Update to the larger values */
1806 *retry_cnt = (uint8_t)mcp->mb[1];
1810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1811 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1818 * qla2x00_init_firmware
1819 * Initialize adapter firmware.
1822 * ha = adapter block pointer.
1823 * dptr = Initialization control block pointer.
1824 * size = size of initialization control block.
1825 * TARGET_QUEUE_LOCK must be released.
1826 * ADAPTER_STATE_LOCK must be released.
1829 * qla2x00 local function return status code.
1835 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1839 mbx_cmd_t *mcp = &mc;
1840 struct qla_hw_data *ha = vha->hw;
1842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1843 "Entered %s.\n", __func__);
1845 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1846 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1847 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1849 if (ha->flags.npiv_supported)
1850 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1852 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1855 mcp->mb[2] = MSW(ha->init_cb_dma);
1856 mcp->mb[3] = LSW(ha->init_cb_dma);
1857 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1858 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1859 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1860 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1862 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1863 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1864 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1865 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1866 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1867 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1870 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1871 mcp->mb[1] |= BIT_1;
1872 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1873 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1874 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1875 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1876 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1877 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1880 /* 1 and 2 should normally be captured. */
1881 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1882 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1883 /* mb3 is additional info about the installed SFP. */
1884 mcp->in_mb |= MBX_3;
1885 mcp->buf_size = size;
1886 mcp->flags = MBX_DMA_OUT;
1887 mcp->tov = MBX_TOV_SECONDS;
1888 rval = qla2x00_mailbox_command(vha, mcp);
1890 if (rval != QLA_SUCCESS) {
1892 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1893 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1894 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1896 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1897 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1898 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1900 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1901 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1902 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1903 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1906 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1907 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1908 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1909 "Invalid SFP/Validation Failed\n");
1911 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1912 "Done %s.\n", __func__);
1920 * qla2x00_get_port_database
1921 * Issue normal/enhanced get port database mailbox command
1922 * and copy device name as necessary.
1925 * ha = adapter state pointer.
1926 * dev = structure pointer.
1927 * opt = enhanced cmd option byte.
1930 * qla2x00 local function return status code.
1936 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1940 mbx_cmd_t *mcp = &mc;
1941 port_database_t *pd;
1942 struct port_database_24xx *pd24;
1944 struct qla_hw_data *ha = vha->hw;
1946 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1947 "Entered %s.\n", __func__);
1950 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1952 ql_log(ql_log_warn, vha, 0x1050,
1953 "Failed to allocate port database structure.\n");
1955 return QLA_MEMORY_ALLOC_FAILED;
1958 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1959 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1960 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1961 mcp->mb[2] = MSW(pd_dma);
1962 mcp->mb[3] = LSW(pd_dma);
1963 mcp->mb[6] = MSW(MSD(pd_dma));
1964 mcp->mb[7] = LSW(MSD(pd_dma));
1965 mcp->mb[9] = vha->vp_idx;
1966 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1968 if (IS_FWI2_CAPABLE(ha)) {
1969 mcp->mb[1] = fcport->loop_id;
1971 mcp->out_mb |= MBX_10|MBX_1;
1972 mcp->in_mb |= MBX_1;
1973 } else if (HAS_EXTENDED_IDS(ha)) {
1974 mcp->mb[1] = fcport->loop_id;
1976 mcp->out_mb |= MBX_10|MBX_1;
1978 mcp->mb[1] = fcport->loop_id << 8 | opt;
1979 mcp->out_mb |= MBX_1;
1981 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1982 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1983 mcp->flags = MBX_DMA_IN;
1984 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1985 rval = qla2x00_mailbox_command(vha, mcp);
1986 if (rval != QLA_SUCCESS)
1989 if (IS_FWI2_CAPABLE(ha)) {
1991 u8 current_login_state, last_login_state;
1993 pd24 = (struct port_database_24xx *) pd;
1995 /* Check for logged in state. */
1996 if (NVME_TARGET(ha, fcport)) {
1997 current_login_state = pd24->current_login_state >> 4;
1998 last_login_state = pd24->last_login_state >> 4;
2000 current_login_state = pd24->current_login_state & 0xf;
2001 last_login_state = pd24->last_login_state & 0xf;
2003 fcport->current_login_state = pd24->current_login_state;
2004 fcport->last_login_state = pd24->last_login_state;
2006 /* Check for logged in state. */
2007 if (current_login_state != PDS_PRLI_COMPLETE &&
2008 last_login_state != PDS_PRLI_COMPLETE) {
2009 ql_dbg(ql_dbg_mbx, vha, 0x119a,
2010 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
2011 current_login_state, last_login_state,
2013 rval = QLA_FUNCTION_FAILED;
2019 if (fcport->loop_id == FC_NO_LOOP_ID ||
2020 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2021 memcmp(fcport->port_name, pd24->port_name, 8))) {
2022 /* We lost the device mid way. */
2023 rval = QLA_NOT_LOGGED_IN;
2027 /* Names are little-endian. */
2028 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
2029 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2031 /* Get port_id of device. */
2032 fcport->d_id.b.domain = pd24->port_id[0];
2033 fcport->d_id.b.area = pd24->port_id[1];
2034 fcport->d_id.b.al_pa = pd24->port_id[2];
2035 fcport->d_id.b.rsvd_1 = 0;
2037 /* If not target must be initiator or unknown type. */
2038 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2039 fcport->port_type = FCT_INITIATOR;
2041 fcport->port_type = FCT_TARGET;
2043 /* Passback COS information. */
2044 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2045 FC_COS_CLASS2 : FC_COS_CLASS3;
2047 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2048 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2052 /* Check for logged in state. */
2053 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2054 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2055 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2056 "Unable to verify login-state (%x/%x) - "
2057 "portid=%02x%02x%02x.\n", pd->master_state,
2058 pd->slave_state, fcport->d_id.b.domain,
2059 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2060 rval = QLA_FUNCTION_FAILED;
2064 if (fcport->loop_id == FC_NO_LOOP_ID ||
2065 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2066 memcmp(fcport->port_name, pd->port_name, 8))) {
2067 /* We lost the device mid way. */
2068 rval = QLA_NOT_LOGGED_IN;
2072 /* Names are little-endian. */
2073 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2074 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2076 /* Get port_id of device. */
2077 fcport->d_id.b.domain = pd->port_id[0];
2078 fcport->d_id.b.area = pd->port_id[3];
2079 fcport->d_id.b.al_pa = pd->port_id[2];
2080 fcport->d_id.b.rsvd_1 = 0;
2082 /* If not target must be initiator or unknown type. */
2083 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2084 fcport->port_type = FCT_INITIATOR;
2086 fcport->port_type = FCT_TARGET;
2088 /* Passback COS information. */
2089 fcport->supported_classes = (pd->options & BIT_4) ?
2090 FC_COS_CLASS2 : FC_COS_CLASS3;
2094 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2097 if (rval != QLA_SUCCESS) {
2098 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2099 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2100 mcp->mb[0], mcp->mb[1]);
2102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2103 "Done %s.\n", __func__);
2110 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2111 struct port_database_24xx *pdb)
2114 mbx_cmd_t *mcp = &mc;
2118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2119 "Entered %s.\n", __func__);
2121 memset(pdb, 0, sizeof(*pdb));
2123 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2124 sizeof(*pdb), DMA_FROM_DEVICE);
2126 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2127 return QLA_MEMORY_ALLOC_FAILED;
2130 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2131 mcp->mb[1] = nport_handle;
2132 mcp->mb[2] = MSW(LSD(pdb_dma));
2133 mcp->mb[3] = LSW(LSD(pdb_dma));
2134 mcp->mb[6] = MSW(MSD(pdb_dma));
2135 mcp->mb[7] = LSW(MSD(pdb_dma));
2138 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2139 mcp->in_mb = MBX_1|MBX_0;
2140 mcp->buf_size = sizeof(*pdb);
2141 mcp->flags = MBX_DMA_IN;
2142 mcp->tov = vha->hw->login_timeout * 2;
2143 rval = qla2x00_mailbox_command(vha, mcp);
2145 if (rval != QLA_SUCCESS) {
2146 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2147 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2148 rval, mcp->mb[0], mcp->mb[1]);
2150 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2151 "Done %s.\n", __func__);
2154 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2155 sizeof(*pdb), DMA_FROM_DEVICE);
2161 * qla2x00_get_firmware_state
2162 * Get adapter firmware state.
2165 * ha = adapter block pointer.
2166 * dptr = pointer for firmware state.
2167 * TARGET_QUEUE_LOCK must be released.
2168 * ADAPTER_STATE_LOCK must be released.
2171 * qla2x00 local function return status code.
2177 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2181 mbx_cmd_t *mcp = &mc;
2182 struct qla_hw_data *ha = vha->hw;
2184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2185 "Entered %s.\n", __func__);
2187 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2188 mcp->out_mb = MBX_0;
2189 if (IS_FWI2_CAPABLE(vha->hw))
2190 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2192 mcp->in_mb = MBX_1|MBX_0;
2193 mcp->tov = MBX_TOV_SECONDS;
2195 rval = qla2x00_mailbox_command(vha, mcp);
2197 /* Return firmware states. */
2198 states[0] = mcp->mb[1];
2199 if (IS_FWI2_CAPABLE(vha->hw)) {
2200 states[1] = mcp->mb[2];
2201 states[2] = mcp->mb[3]; /* SFP info */
2202 states[3] = mcp->mb[4];
2203 states[4] = mcp->mb[5];
2204 states[5] = mcp->mb[6]; /* DPORT status */
2207 if (rval != QLA_SUCCESS) {
2209 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2211 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2212 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2213 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2214 "Invalid SFP/Validation Failed\n");
2216 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2217 "Done %s.\n", __func__);
2224 * qla2x00_get_port_name
2225 * Issue get port name mailbox command.
2226 * Returned name is in big endian format.
2229 * ha = adapter block pointer.
2230 * loop_id = loop ID of device.
2231 * name = pointer for name.
2232 * TARGET_QUEUE_LOCK must be released.
2233 * ADAPTER_STATE_LOCK must be released.
2236 * qla2x00 local function return status code.
2242 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2247 mbx_cmd_t *mcp = &mc;
2249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2250 "Entered %s.\n", __func__);
2252 mcp->mb[0] = MBC_GET_PORT_NAME;
2253 mcp->mb[9] = vha->vp_idx;
2254 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2255 if (HAS_EXTENDED_IDS(vha->hw)) {
2256 mcp->mb[1] = loop_id;
2258 mcp->out_mb |= MBX_10;
2260 mcp->mb[1] = loop_id << 8 | opt;
2263 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2264 mcp->tov = MBX_TOV_SECONDS;
2266 rval = qla2x00_mailbox_command(vha, mcp);
2268 if (rval != QLA_SUCCESS) {
2270 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2273 /* This function returns name in big endian. */
2274 name[0] = MSB(mcp->mb[2]);
2275 name[1] = LSB(mcp->mb[2]);
2276 name[2] = MSB(mcp->mb[3]);
2277 name[3] = LSB(mcp->mb[3]);
2278 name[4] = MSB(mcp->mb[6]);
2279 name[5] = LSB(mcp->mb[6]);
2280 name[6] = MSB(mcp->mb[7]);
2281 name[7] = LSB(mcp->mb[7]);
2284 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2285 "Done %s.\n", __func__);
2292 * qla24xx_link_initialization
2293 * Issue link initialization mailbox command.
2296 * ha = adapter block pointer.
2297 * TARGET_QUEUE_LOCK must be released.
2298 * ADAPTER_STATE_LOCK must be released.
2301 * qla2x00 local function return status code.
2307 qla24xx_link_initialize(scsi_qla_host_t *vha)
2311 mbx_cmd_t *mcp = &mc;
2313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2314 "Entered %s.\n", __func__);
2316 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2317 return QLA_FUNCTION_FAILED;
2319 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2321 if (vha->hw->operating_mode == LOOP)
2322 mcp->mb[1] |= BIT_6;
2324 mcp->mb[1] |= BIT_5;
2327 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2329 mcp->tov = MBX_TOV_SECONDS;
2331 rval = qla2x00_mailbox_command(vha, mcp);
2333 if (rval != QLA_SUCCESS) {
2334 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2337 "Done %s.\n", __func__);
2345 * Issue LIP reset mailbox command.
2348 * ha = adapter block pointer.
2349 * TARGET_QUEUE_LOCK must be released.
2350 * ADAPTER_STATE_LOCK must be released.
2353 * qla2x00 local function return status code.
2359 qla2x00_lip_reset(scsi_qla_host_t *vha)
2363 mbx_cmd_t *mcp = &mc;
2365 ql_dbg(ql_dbg_disc, vha, 0x105a,
2366 "Entered %s.\n", __func__);
2368 if (IS_CNA_CAPABLE(vha->hw)) {
2369 /* Logout across all FCFs. */
2370 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2373 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2374 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2375 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2378 mcp->mb[3] = vha->hw->loop_reset_delay;
2379 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2381 mcp->mb[0] = MBC_LIP_RESET;
2382 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2383 if (HAS_EXTENDED_IDS(vha->hw)) {
2384 mcp->mb[1] = 0x00ff;
2386 mcp->out_mb |= MBX_10;
2388 mcp->mb[1] = 0xff00;
2390 mcp->mb[2] = vha->hw->loop_reset_delay;
2394 mcp->tov = MBX_TOV_SECONDS;
2396 rval = qla2x00_mailbox_command(vha, mcp);
2398 if (rval != QLA_SUCCESS) {
2400 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2403 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2404 "Done %s.\n", __func__);
2415 * ha = adapter block pointer.
2416 * sns = pointer for command.
2417 * cmd_size = command size.
2418 * buf_size = response/command size.
2419 * TARGET_QUEUE_LOCK must be released.
2420 * ADAPTER_STATE_LOCK must be released.
2423 * qla2x00 local function return status code.
2429 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2430 uint16_t cmd_size, size_t buf_size)
2434 mbx_cmd_t *mcp = &mc;
2436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2437 "Entered %s.\n", __func__);
2439 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2440 "Retry cnt=%d ratov=%d total tov=%d.\n",
2441 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2443 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2444 mcp->mb[1] = cmd_size;
2445 mcp->mb[2] = MSW(sns_phys_address);
2446 mcp->mb[3] = LSW(sns_phys_address);
2447 mcp->mb[6] = MSW(MSD(sns_phys_address));
2448 mcp->mb[7] = LSW(MSD(sns_phys_address));
2449 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2450 mcp->in_mb = MBX_0|MBX_1;
2451 mcp->buf_size = buf_size;
2452 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2453 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2454 rval = qla2x00_mailbox_command(vha, mcp);
2456 if (rval != QLA_SUCCESS) {
2458 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2459 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2460 rval, mcp->mb[0], mcp->mb[1]);
2463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2464 "Done %s.\n", __func__);
2471 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2472 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2476 struct logio_entry_24xx *lg;
2479 struct qla_hw_data *ha = vha->hw;
2480 struct req_que *req;
2482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2483 "Entered %s.\n", __func__);
2485 if (vha->vp_idx && vha->qpair)
2486 req = vha->qpair->req;
2488 req = ha->req_q_map[0];
2490 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2492 ql_log(ql_log_warn, vha, 0x1062,
2493 "Failed to allocate login IOCB.\n");
2494 return QLA_MEMORY_ALLOC_FAILED;
2497 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2498 lg->entry_count = 1;
2499 lg->handle = make_handle(req->id, lg->handle);
2500 lg->nport_handle = cpu_to_le16(loop_id);
2501 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2503 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2505 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2506 lg->port_id[0] = al_pa;
2507 lg->port_id[1] = area;
2508 lg->port_id[2] = domain;
2509 lg->vp_index = vha->vp_idx;
2510 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2511 (ha->r_a_tov / 10 * 2) + 2);
2512 if (rval != QLA_SUCCESS) {
2513 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2514 "Failed to issue login IOCB (%x).\n", rval);
2515 } else if (lg->entry_status != 0) {
2516 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2517 "Failed to complete IOCB -- error status (%x).\n",
2519 rval = QLA_FUNCTION_FAILED;
2520 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2521 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2522 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2524 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2525 "Failed to complete IOCB -- completion status (%x) "
2526 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2530 case LSC_SCODE_PORTID_USED:
2531 mb[0] = MBS_PORT_ID_USED;
2532 mb[1] = LSW(iop[1]);
2534 case LSC_SCODE_NPORT_USED:
2535 mb[0] = MBS_LOOP_ID_USED;
2537 case LSC_SCODE_NOLINK:
2538 case LSC_SCODE_NOIOCB:
2539 case LSC_SCODE_NOXCB:
2540 case LSC_SCODE_CMD_FAILED:
2541 case LSC_SCODE_NOFABRIC:
2542 case LSC_SCODE_FW_NOT_READY:
2543 case LSC_SCODE_NOT_LOGGED_IN:
2544 case LSC_SCODE_NOPCB:
2545 case LSC_SCODE_ELS_REJECT:
2546 case LSC_SCODE_CMD_PARAM_ERR:
2547 case LSC_SCODE_NONPORT:
2548 case LSC_SCODE_LOGGED_IN:
2549 case LSC_SCODE_NOFLOGI_ACC:
2551 mb[0] = MBS_COMMAND_ERROR;
2555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2556 "Done %s.\n", __func__);
2558 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2560 mb[0] = MBS_COMMAND_COMPLETE;
2562 if (iop[0] & BIT_4) {
2568 /* Passback COS information. */
2570 if (lg->io_parameter[7] || lg->io_parameter[8])
2571 mb[10] |= BIT_0; /* Class 2. */
2572 if (lg->io_parameter[9] || lg->io_parameter[10])
2573 mb[10] |= BIT_1; /* Class 3. */
2574 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2575 mb[10] |= BIT_7; /* Confirmed Completion
2580 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2586 * qla2x00_login_fabric
2587 * Issue login fabric port mailbox command.
2590 * ha = adapter block pointer.
2591 * loop_id = device loop ID.
2592 * domain = device domain.
2593 * area = device area.
2594 * al_pa = device AL_PA.
2595 * status = pointer for return status.
2596 * opt = command options.
2597 * TARGET_QUEUE_LOCK must be released.
2598 * ADAPTER_STATE_LOCK must be released.
2601 * qla2x00 local function return status code.
2607 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2608 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2612 mbx_cmd_t *mcp = &mc;
2613 struct qla_hw_data *ha = vha->hw;
2615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2616 "Entered %s.\n", __func__);
2618 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2619 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2620 if (HAS_EXTENDED_IDS(ha)) {
2621 mcp->mb[1] = loop_id;
2623 mcp->out_mb |= MBX_10;
2625 mcp->mb[1] = (loop_id << 8) | opt;
2627 mcp->mb[2] = domain;
2628 mcp->mb[3] = area << 8 | al_pa;
2630 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2631 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2633 rval = qla2x00_mailbox_command(vha, mcp);
2635 /* Return mailbox statuses. */
2642 /* COS retrieved from Get-Port-Database mailbox command. */
2646 if (rval != QLA_SUCCESS) {
2647 /* RLU tmp code: need to change main mailbox_command function to
2648 * return ok even when the mailbox completion value is not
2649 * SUCCESS. The caller needs to be responsible to interpret
2650 * the return values of this mailbox command if we're not
2651 * to change too much of the existing code.
2653 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2654 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2655 mcp->mb[0] == 0x4006)
2659 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2660 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2661 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2665 "Done %s.\n", __func__);
2672 * qla2x00_login_local_device
2673 * Issue login loop port mailbox command.
2676 * ha = adapter block pointer.
2677 * loop_id = device loop ID.
2678 * opt = command options.
2681 * Return status code.
2688 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2689 uint16_t *mb_ret, uint8_t opt)
2693 mbx_cmd_t *mcp = &mc;
2694 struct qla_hw_data *ha = vha->hw;
2696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2697 "Entered %s.\n", __func__);
2699 if (IS_FWI2_CAPABLE(ha))
2700 return qla24xx_login_fabric(vha, fcport->loop_id,
2701 fcport->d_id.b.domain, fcport->d_id.b.area,
2702 fcport->d_id.b.al_pa, mb_ret, opt);
2704 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2705 if (HAS_EXTENDED_IDS(ha))
2706 mcp->mb[1] = fcport->loop_id;
2708 mcp->mb[1] = fcport->loop_id << 8;
2710 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2711 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2712 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2714 rval = qla2x00_mailbox_command(vha, mcp);
2716 /* Return mailbox statuses. */
2717 if (mb_ret != NULL) {
2718 mb_ret[0] = mcp->mb[0];
2719 mb_ret[1] = mcp->mb[1];
2720 mb_ret[6] = mcp->mb[6];
2721 mb_ret[7] = mcp->mb[7];
2724 if (rval != QLA_SUCCESS) {
2725 /* AV tmp code: need to change main mailbox_command function to
2726 * return ok even when the mailbox completion value is not
2727 * SUCCESS. The caller needs to be responsible to interpret
2728 * the return values of this mailbox command if we're not
2729 * to change too much of the existing code.
2731 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2734 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2735 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2736 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2739 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2740 "Done %s.\n", __func__);
2747 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2748 uint8_t area, uint8_t al_pa)
2751 struct logio_entry_24xx *lg;
2753 struct qla_hw_data *ha = vha->hw;
2754 struct req_que *req;
2756 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2757 "Entered %s.\n", __func__);
2759 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2761 ql_log(ql_log_warn, vha, 0x106e,
2762 "Failed to allocate logout IOCB.\n");
2763 return QLA_MEMORY_ALLOC_FAILED;
2767 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2768 lg->entry_count = 1;
2769 lg->handle = make_handle(req->id, lg->handle);
2770 lg->nport_handle = cpu_to_le16(loop_id);
2772 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2774 lg->port_id[0] = al_pa;
2775 lg->port_id[1] = area;
2776 lg->port_id[2] = domain;
2777 lg->vp_index = vha->vp_idx;
2778 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2779 (ha->r_a_tov / 10 * 2) + 2);
2780 if (rval != QLA_SUCCESS) {
2781 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2782 "Failed to issue logout IOCB (%x).\n", rval);
2783 } else if (lg->entry_status != 0) {
2784 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2785 "Failed to complete IOCB -- error status (%x).\n",
2787 rval = QLA_FUNCTION_FAILED;
2788 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2789 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2790 "Failed to complete IOCB -- completion status (%x) "
2791 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2792 le32_to_cpu(lg->io_parameter[0]),
2793 le32_to_cpu(lg->io_parameter[1]));
2796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2797 "Done %s.\n", __func__);
2800 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2806 * qla2x00_fabric_logout
2807 * Issue logout fabric port mailbox command.
2810 * ha = adapter block pointer.
2811 * loop_id = device loop ID.
2812 * TARGET_QUEUE_LOCK must be released.
2813 * ADAPTER_STATE_LOCK must be released.
2816 * qla2x00 local function return status code.
2822 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2823 uint8_t area, uint8_t al_pa)
2827 mbx_cmd_t *mcp = &mc;
2829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2830 "Entered %s.\n", __func__);
2832 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2833 mcp->out_mb = MBX_1|MBX_0;
2834 if (HAS_EXTENDED_IDS(vha->hw)) {
2835 mcp->mb[1] = loop_id;
2837 mcp->out_mb |= MBX_10;
2839 mcp->mb[1] = loop_id << 8;
2842 mcp->in_mb = MBX_1|MBX_0;
2843 mcp->tov = MBX_TOV_SECONDS;
2845 rval = qla2x00_mailbox_command(vha, mcp);
2847 if (rval != QLA_SUCCESS) {
2849 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2850 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2854 "Done %s.\n", __func__);
2861 * qla2x00_full_login_lip
2862 * Issue full login LIP mailbox command.
2865 * ha = adapter block pointer.
2866 * TARGET_QUEUE_LOCK must be released.
2867 * ADAPTER_STATE_LOCK must be released.
2870 * qla2x00 local function return status code.
2876 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2880 mbx_cmd_t *mcp = &mc;
2882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2883 "Entered %s.\n", __func__);
2885 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2886 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2889 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2891 mcp->tov = MBX_TOV_SECONDS;
2893 rval = qla2x00_mailbox_command(vha, mcp);
2895 if (rval != QLA_SUCCESS) {
2897 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2900 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2901 "Done %s.\n", __func__);
2908 * qla2x00_get_id_list
2911 * ha = adapter block pointer.
2914 * qla2x00 local function return status code.
2920 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2925 mbx_cmd_t *mcp = &mc;
2927 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2928 "Entered %s.\n", __func__);
2930 if (id_list == NULL)
2931 return QLA_FUNCTION_FAILED;
2933 mcp->mb[0] = MBC_GET_ID_LIST;
2934 mcp->out_mb = MBX_0;
2935 if (IS_FWI2_CAPABLE(vha->hw)) {
2936 mcp->mb[2] = MSW(id_list_dma);
2937 mcp->mb[3] = LSW(id_list_dma);
2938 mcp->mb[6] = MSW(MSD(id_list_dma));
2939 mcp->mb[7] = LSW(MSD(id_list_dma));
2941 mcp->mb[9] = vha->vp_idx;
2942 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2944 mcp->mb[1] = MSW(id_list_dma);
2945 mcp->mb[2] = LSW(id_list_dma);
2946 mcp->mb[3] = MSW(MSD(id_list_dma));
2947 mcp->mb[6] = LSW(MSD(id_list_dma));
2948 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2950 mcp->in_mb = MBX_1|MBX_0;
2951 mcp->tov = MBX_TOV_SECONDS;
2953 rval = qla2x00_mailbox_command(vha, mcp);
2955 if (rval != QLA_SUCCESS) {
2957 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2959 *entries = mcp->mb[1];
2960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2961 "Done %s.\n", __func__);
2968 * qla2x00_get_resource_cnts
2969 * Get current firmware resource counts.
2972 * ha = adapter block pointer.
2975 * qla2x00 local function return status code.
2981 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2983 struct qla_hw_data *ha = vha->hw;
2986 mbx_cmd_t *mcp = &mc;
2988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2989 "Entered %s.\n", __func__);
2991 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2992 mcp->out_mb = MBX_0;
2993 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2994 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2995 IS_QLA27XX(ha) || IS_QLA28XX(ha))
2996 mcp->in_mb |= MBX_12;
2997 mcp->tov = MBX_TOV_SECONDS;
2999 rval = qla2x00_mailbox_command(vha, mcp);
3001 if (rval != QLA_SUCCESS) {
3003 ql_dbg(ql_dbg_mbx, vha, 0x107d,
3004 "Failed mb[0]=%x.\n", mcp->mb[0]);
3006 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
3007 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
3008 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
3009 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
3010 mcp->mb[11], mcp->mb[12]);
3012 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3013 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3014 ha->cur_fw_xcb_count = mcp->mb[3];
3015 ha->orig_fw_xcb_count = mcp->mb[6];
3016 ha->cur_fw_iocb_count = mcp->mb[7];
3017 ha->orig_fw_iocb_count = mcp->mb[10];
3018 if (ha->flags.npiv_supported)
3019 ha->max_npiv_vports = mcp->mb[11];
3020 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3022 ha->fw_max_fcf_count = mcp->mb[12];
3029 * qla2x00_get_fcal_position_map
3030 * Get FCAL (LILP) position map using mailbox command
3033 * ha = adapter state pointer.
3034 * pos_map = buffer pointer (can be NULL).
3037 * qla2x00 local function return status code.
3043 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
3047 mbx_cmd_t *mcp = &mc;
3049 dma_addr_t pmap_dma;
3050 struct qla_hw_data *ha = vha->hw;
3052 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3053 "Entered %s.\n", __func__);
3055 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3057 ql_log(ql_log_warn, vha, 0x1080,
3058 "Memory alloc failed.\n");
3059 return QLA_MEMORY_ALLOC_FAILED;
3062 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3063 mcp->mb[2] = MSW(pmap_dma);
3064 mcp->mb[3] = LSW(pmap_dma);
3065 mcp->mb[6] = MSW(MSD(pmap_dma));
3066 mcp->mb[7] = LSW(MSD(pmap_dma));
3067 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3068 mcp->in_mb = MBX_1|MBX_0;
3069 mcp->buf_size = FCAL_MAP_SIZE;
3070 mcp->flags = MBX_DMA_IN;
3071 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3072 rval = qla2x00_mailbox_command(vha, mcp);
3074 if (rval == QLA_SUCCESS) {
3075 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3076 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3077 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3078 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3082 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3084 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3086 if (rval != QLA_SUCCESS) {
3087 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3089 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3090 "Done %s.\n", __func__);
3097 * qla2x00_get_link_status
3100 * ha = adapter block pointer.
3101 * loop_id = device loop ID.
3102 * ret_buf = pointer to link status return buffer.
3106 * BIT_0 = mem alloc error.
3107 * BIT_1 = mailbox error.
3110 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3111 struct link_statistics *stats, dma_addr_t stats_dma)
3115 mbx_cmd_t *mcp = &mc;
3116 uint32_t *iter = (uint32_t *)stats;
3117 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3118 struct qla_hw_data *ha = vha->hw;
3120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3121 "Entered %s.\n", __func__);
3123 mcp->mb[0] = MBC_GET_LINK_STATUS;
3124 mcp->mb[2] = MSW(LSD(stats_dma));
3125 mcp->mb[3] = LSW(LSD(stats_dma));
3126 mcp->mb[6] = MSW(MSD(stats_dma));
3127 mcp->mb[7] = LSW(MSD(stats_dma));
3128 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3130 if (IS_FWI2_CAPABLE(ha)) {
3131 mcp->mb[1] = loop_id;
3134 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3135 mcp->in_mb |= MBX_1;
3136 } else if (HAS_EXTENDED_IDS(ha)) {
3137 mcp->mb[1] = loop_id;
3139 mcp->out_mb |= MBX_10|MBX_1;
3141 mcp->mb[1] = loop_id << 8;
3142 mcp->out_mb |= MBX_1;
3144 mcp->tov = MBX_TOV_SECONDS;
3145 mcp->flags = IOCTL_CMD;
3146 rval = qla2x00_mailbox_command(vha, mcp);
3148 if (rval == QLA_SUCCESS) {
3149 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3150 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3151 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3152 rval = QLA_FUNCTION_FAILED;
3154 /* Re-endianize - firmware data is le32. */
3155 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3156 "Done %s.\n", __func__);
3157 for ( ; dwords--; iter++)
3162 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3169 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3170 dma_addr_t stats_dma, uint16_t options)
3174 mbx_cmd_t *mcp = &mc;
3175 uint32_t *iter = (uint32_t *)stats;
3176 ushort dwords = sizeof(*stats)/sizeof(*iter);
3178 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3179 "Entered %s.\n", __func__);
3181 memset(&mc, 0, sizeof(mc));
3182 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3183 mc.mb[2] = MSW(LSD(stats_dma));
3184 mc.mb[3] = LSW(LSD(stats_dma));
3185 mc.mb[6] = MSW(MSD(stats_dma));
3186 mc.mb[7] = LSW(MSD(stats_dma));
3188 mc.mb[9] = vha->vp_idx;
3189 mc.mb[10] = options;
3191 rval = qla24xx_send_mb_cmd(vha, &mc);
3193 if (rval == QLA_SUCCESS) {
3194 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3195 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3196 "Failed mb[0]=%x.\n", mcp->mb[0]);
3197 rval = QLA_FUNCTION_FAILED;
3199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3200 "Done %s.\n", __func__);
3201 /* Re-endianize - firmware data is le32. */
3202 for ( ; dwords--; iter++)
3207 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3214 qla24xx_abort_command(srb_t *sp)
3217 unsigned long flags = 0;
3219 struct abort_entry_24xx *abt;
3222 fc_port_t *fcport = sp->fcport;
3223 struct scsi_qla_host *vha = fcport->vha;
3224 struct qla_hw_data *ha = vha->hw;
3225 struct req_que *req = vha->req;
3226 struct qla_qpair *qpair = sp->qpair;
3228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3229 "Entered %s.\n", __func__);
3232 req = sp->qpair->req;
3234 return QLA_FUNCTION_FAILED;
3236 if (ql2xasynctmfenable)
3237 return qla24xx_async_abort_command(sp);
3239 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3240 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3241 if (req->outstanding_cmds[handle] == sp)
3244 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3245 if (handle == req->num_outstanding_cmds) {
3246 /* Command not found. */
3247 return QLA_FUNCTION_FAILED;
3250 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3252 ql_log(ql_log_warn, vha, 0x108d,
3253 "Failed to allocate abort IOCB.\n");
3254 return QLA_MEMORY_ALLOC_FAILED;
3257 abt->entry_type = ABORT_IOCB_TYPE;
3258 abt->entry_count = 1;
3259 abt->handle = make_handle(req->id, abt->handle);
3260 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3261 abt->handle_to_abort = make_handle(req->id, handle);
3262 abt->port_id[0] = fcport->d_id.b.al_pa;
3263 abt->port_id[1] = fcport->d_id.b.area;
3264 abt->port_id[2] = fcport->d_id.b.domain;
3265 abt->vp_index = fcport->vha->vp_idx;
3267 abt->req_que_no = cpu_to_le16(req->id);
3268 /* Need to pass original sp */
3269 qla_nvme_abort_set_option(abt, sp);
3271 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3272 if (rval != QLA_SUCCESS) {
3273 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3274 "Failed to issue IOCB (%x).\n", rval);
3275 } else if (abt->entry_status != 0) {
3276 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3277 "Failed to complete IOCB -- error status (%x).\n",
3279 rval = QLA_FUNCTION_FAILED;
3280 } else if (abt->nport_handle != cpu_to_le16(0)) {
3281 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3282 "Failed to complete IOCB -- completion status (%x).\n",
3283 le16_to_cpu(abt->nport_handle));
3284 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3285 rval = QLA_FUNCTION_PARAMETER_ERROR;
3287 rval = QLA_FUNCTION_FAILED;
3289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3290 "Done %s.\n", __func__);
3292 if (rval == QLA_SUCCESS)
3293 qla_nvme_abort_process_comp_status(abt, sp);
3295 qla_wait_nvme_release_cmd_kref(sp);
3297 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3302 struct tsk_mgmt_cmd {
3304 struct tsk_mgmt_entry tsk;
3305 struct sts_entry_24xx sts;
3310 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3311 uint64_t l, int tag)
3314 struct tsk_mgmt_cmd *tsk;
3315 struct sts_entry_24xx *sts;
3317 scsi_qla_host_t *vha;
3318 struct qla_hw_data *ha;
3319 struct req_que *req;
3320 struct qla_qpair *qpair;
3326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3327 "Entered %s.\n", __func__);
3329 if (vha->vp_idx && vha->qpair) {
3335 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3337 ql_log(ql_log_warn, vha, 0x1093,
3338 "Failed to allocate task management IOCB.\n");
3339 return QLA_MEMORY_ALLOC_FAILED;
3342 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3343 tsk->p.tsk.entry_count = 1;
3344 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3345 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3346 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3347 tsk->p.tsk.control_flags = cpu_to_le32(type);
3348 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3349 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3350 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3351 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3352 if (type == TCF_LUN_RESET) {
3353 int_to_scsilun(l, &tsk->p.tsk.lun);
3354 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3355 sizeof(tsk->p.tsk.lun));
3359 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3360 if (rval != QLA_SUCCESS) {
3361 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3362 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3363 } else if (sts->entry_status != 0) {
3364 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3365 "Failed to complete IOCB -- error status (%x).\n",
3367 rval = QLA_FUNCTION_FAILED;
3368 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3369 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3370 "Failed to complete IOCB -- completion status (%x).\n",
3371 le16_to_cpu(sts->comp_status));
3372 rval = QLA_FUNCTION_FAILED;
3373 } else if (le16_to_cpu(sts->scsi_status) &
3374 SS_RESPONSE_INFO_LEN_VALID) {
3375 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3376 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3377 "Ignoring inconsistent data length -- not enough "
3378 "response info (%d).\n",
3379 le32_to_cpu(sts->rsp_data_len));
3380 } else if (sts->data[3]) {
3381 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3382 "Failed to complete IOCB -- response (%x).\n",
3384 rval = QLA_FUNCTION_FAILED;
3388 /* Issue marker IOCB. */
3389 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3390 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3391 if (rval2 != QLA_SUCCESS) {
3392 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3393 "Failed to issue marker IOCB (%x).\n", rval2);
3395 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3396 "Done %s.\n", __func__);
3399 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3405 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3407 struct qla_hw_data *ha = fcport->vha->hw;
3409 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3410 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3412 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3416 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3418 struct qla_hw_data *ha = fcport->vha->hw;
3420 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3421 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3423 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3427 qla2x00_system_error(scsi_qla_host_t *vha)
3431 mbx_cmd_t *mcp = &mc;
3432 struct qla_hw_data *ha = vha->hw;
3434 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3435 return QLA_FUNCTION_FAILED;
3437 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3438 "Entered %s.\n", __func__);
3440 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3441 mcp->out_mb = MBX_0;
3445 rval = qla2x00_mailbox_command(vha, mcp);
3447 if (rval != QLA_SUCCESS) {
3448 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3451 "Done %s.\n", __func__);
3458 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3462 mbx_cmd_t *mcp = &mc;
3464 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3465 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3466 return QLA_FUNCTION_FAILED;
3468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3469 "Entered %s.\n", __func__);
3471 mcp->mb[0] = MBC_WRITE_SERDES;
3473 if (IS_QLA2031(vha->hw))
3474 mcp->mb[2] = data & 0xff;
3479 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3481 mcp->tov = MBX_TOV_SECONDS;
3483 rval = qla2x00_mailbox_command(vha, mcp);
3485 if (rval != QLA_SUCCESS) {
3486 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3487 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3489 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3490 "Done %s.\n", __func__);
3497 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3501 mbx_cmd_t *mcp = &mc;
3503 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3504 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3505 return QLA_FUNCTION_FAILED;
3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3508 "Entered %s.\n", __func__);
3510 mcp->mb[0] = MBC_READ_SERDES;
3513 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3514 mcp->in_mb = MBX_1|MBX_0;
3515 mcp->tov = MBX_TOV_SECONDS;
3517 rval = qla2x00_mailbox_command(vha, mcp);
3519 if (IS_QLA2031(vha->hw))
3520 *data = mcp->mb[1] & 0xff;
3524 if (rval != QLA_SUCCESS) {
3525 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3526 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3529 "Done %s.\n", __func__);
3536 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3540 mbx_cmd_t *mcp = &mc;
3542 if (!IS_QLA8044(vha->hw))
3543 return QLA_FUNCTION_FAILED;
3545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3546 "Entered %s.\n", __func__);
3548 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3549 mcp->mb[1] = HCS_WRITE_SERDES;
3550 mcp->mb[3] = LSW(addr);
3551 mcp->mb[4] = MSW(addr);
3552 mcp->mb[5] = LSW(data);
3553 mcp->mb[6] = MSW(data);
3554 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3556 mcp->tov = MBX_TOV_SECONDS;
3558 rval = qla2x00_mailbox_command(vha, mcp);
3560 if (rval != QLA_SUCCESS) {
3561 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3562 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3565 "Done %s.\n", __func__);
3572 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3576 mbx_cmd_t *mcp = &mc;
3578 if (!IS_QLA8044(vha->hw))
3579 return QLA_FUNCTION_FAILED;
3581 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3582 "Entered %s.\n", __func__);
3584 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3585 mcp->mb[1] = HCS_READ_SERDES;
3586 mcp->mb[3] = LSW(addr);
3587 mcp->mb[4] = MSW(addr);
3588 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3589 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3590 mcp->tov = MBX_TOV_SECONDS;
3592 rval = qla2x00_mailbox_command(vha, mcp);
3594 *data = mcp->mb[2] << 16 | mcp->mb[1];
3596 if (rval != QLA_SUCCESS) {
3597 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3598 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3601 "Done %s.\n", __func__);
3608 * qla2x00_set_serdes_params() -
3610 * @sw_em_1g: serial link options
3611 * @sw_em_2g: serial link options
3612 * @sw_em_4g: serial link options
3617 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3618 uint16_t sw_em_2g, uint16_t sw_em_4g)
3622 mbx_cmd_t *mcp = &mc;
3624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3625 "Entered %s.\n", __func__);
3627 mcp->mb[0] = MBC_SERDES_PARAMS;
3629 mcp->mb[2] = sw_em_1g | BIT_15;
3630 mcp->mb[3] = sw_em_2g | BIT_15;
3631 mcp->mb[4] = sw_em_4g | BIT_15;
3632 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3634 mcp->tov = MBX_TOV_SECONDS;
3636 rval = qla2x00_mailbox_command(vha, mcp);
3638 if (rval != QLA_SUCCESS) {
3640 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3641 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3645 "Done %s.\n", __func__);
3652 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3656 mbx_cmd_t *mcp = &mc;
3658 if (!IS_FWI2_CAPABLE(vha->hw))
3659 return QLA_FUNCTION_FAILED;
3661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3662 "Entered %s.\n", __func__);
3664 mcp->mb[0] = MBC_STOP_FIRMWARE;
3666 mcp->out_mb = MBX_1|MBX_0;
3670 rval = qla2x00_mailbox_command(vha, mcp);
3672 if (rval != QLA_SUCCESS) {
3673 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3674 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3675 rval = QLA_INVALID_COMMAND;
3677 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3678 "Done %s.\n", __func__);
3685 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3690 mbx_cmd_t *mcp = &mc;
3692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3693 "Entered %s.\n", __func__);
3695 if (!IS_FWI2_CAPABLE(vha->hw))
3696 return QLA_FUNCTION_FAILED;
3698 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3699 return QLA_FUNCTION_FAILED;
3701 mcp->mb[0] = MBC_TRACE_CONTROL;
3702 mcp->mb[1] = TC_EFT_ENABLE;
3703 mcp->mb[2] = LSW(eft_dma);
3704 mcp->mb[3] = MSW(eft_dma);
3705 mcp->mb[4] = LSW(MSD(eft_dma));
3706 mcp->mb[5] = MSW(MSD(eft_dma));
3707 mcp->mb[6] = buffers;
3708 mcp->mb[7] = TC_AEN_DISABLE;
3709 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3710 mcp->in_mb = MBX_1|MBX_0;
3711 mcp->tov = MBX_TOV_SECONDS;
3713 rval = qla2x00_mailbox_command(vha, mcp);
3714 if (rval != QLA_SUCCESS) {
3715 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3716 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3717 rval, mcp->mb[0], mcp->mb[1]);
3719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3720 "Done %s.\n", __func__);
3727 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3731 mbx_cmd_t *mcp = &mc;
3733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3734 "Entered %s.\n", __func__);
3736 if (!IS_FWI2_CAPABLE(vha->hw))
3737 return QLA_FUNCTION_FAILED;
3739 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3740 return QLA_FUNCTION_FAILED;
3742 mcp->mb[0] = MBC_TRACE_CONTROL;
3743 mcp->mb[1] = TC_EFT_DISABLE;
3744 mcp->out_mb = MBX_1|MBX_0;
3745 mcp->in_mb = MBX_1|MBX_0;
3746 mcp->tov = MBX_TOV_SECONDS;
3748 rval = qla2x00_mailbox_command(vha, mcp);
3749 if (rval != QLA_SUCCESS) {
3750 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3751 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3752 rval, mcp->mb[0], mcp->mb[1]);
3754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3755 "Done %s.\n", __func__);
3762 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3763 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3767 mbx_cmd_t *mcp = &mc;
3769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3770 "Entered %s.\n", __func__);
3772 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3773 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3774 !IS_QLA28XX(vha->hw))
3775 return QLA_FUNCTION_FAILED;
3777 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3778 return QLA_FUNCTION_FAILED;
3780 mcp->mb[0] = MBC_TRACE_CONTROL;
3781 mcp->mb[1] = TC_FCE_ENABLE;
3782 mcp->mb[2] = LSW(fce_dma);
3783 mcp->mb[3] = MSW(fce_dma);
3784 mcp->mb[4] = LSW(MSD(fce_dma));
3785 mcp->mb[5] = MSW(MSD(fce_dma));
3786 mcp->mb[6] = buffers;
3787 mcp->mb[7] = TC_AEN_DISABLE;
3789 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3790 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3791 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3793 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3794 mcp->tov = MBX_TOV_SECONDS;
3796 rval = qla2x00_mailbox_command(vha, mcp);
3797 if (rval != QLA_SUCCESS) {
3798 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3799 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3800 rval, mcp->mb[0], mcp->mb[1]);
3802 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3803 "Done %s.\n", __func__);
3806 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3815 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3819 mbx_cmd_t *mcp = &mc;
3821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3822 "Entered %s.\n", __func__);
3824 if (!IS_FWI2_CAPABLE(vha->hw))
3825 return QLA_FUNCTION_FAILED;
3827 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3828 return QLA_FUNCTION_FAILED;
3830 mcp->mb[0] = MBC_TRACE_CONTROL;
3831 mcp->mb[1] = TC_FCE_DISABLE;
3832 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3833 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3834 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3836 mcp->tov = MBX_TOV_SECONDS;
3838 rval = qla2x00_mailbox_command(vha, mcp);
3839 if (rval != QLA_SUCCESS) {
3840 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3841 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3842 rval, mcp->mb[0], mcp->mb[1]);
3844 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3845 "Done %s.\n", __func__);
3848 *wr = (uint64_t) mcp->mb[5] << 48 |
3849 (uint64_t) mcp->mb[4] << 32 |
3850 (uint64_t) mcp->mb[3] << 16 |
3851 (uint64_t) mcp->mb[2];
3853 *rd = (uint64_t) mcp->mb[9] << 48 |
3854 (uint64_t) mcp->mb[8] << 32 |
3855 (uint64_t) mcp->mb[7] << 16 |
3856 (uint64_t) mcp->mb[6];
3863 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3864 uint16_t *port_speed, uint16_t *mb)
3868 mbx_cmd_t *mcp = &mc;
3870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3871 "Entered %s.\n", __func__);
3873 if (!IS_IIDMA_CAPABLE(vha->hw))
3874 return QLA_FUNCTION_FAILED;
3876 mcp->mb[0] = MBC_PORT_PARAMS;
3877 mcp->mb[1] = loop_id;
3878 mcp->mb[2] = mcp->mb[3] = 0;
3879 mcp->mb[9] = vha->vp_idx;
3880 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3881 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3882 mcp->tov = MBX_TOV_SECONDS;
3884 rval = qla2x00_mailbox_command(vha, mcp);
3886 /* Return mailbox statuses. */
3893 if (rval != QLA_SUCCESS) {
3894 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3897 "Done %s.\n", __func__);
3899 *port_speed = mcp->mb[3];
3906 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3907 uint16_t port_speed, uint16_t *mb)
3911 mbx_cmd_t *mcp = &mc;
3913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3914 "Entered %s.\n", __func__);
3916 if (!IS_IIDMA_CAPABLE(vha->hw))
3917 return QLA_FUNCTION_FAILED;
3919 mcp->mb[0] = MBC_PORT_PARAMS;
3920 mcp->mb[1] = loop_id;
3922 mcp->mb[3] = port_speed & 0x3F;
3923 mcp->mb[9] = vha->vp_idx;
3924 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3925 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3926 mcp->tov = MBX_TOV_SECONDS;
3928 rval = qla2x00_mailbox_command(vha, mcp);
3930 /* Return mailbox statuses. */
3937 if (rval != QLA_SUCCESS) {
3938 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3939 "Failed=%x.\n", rval);
3941 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3942 "Done %s.\n", __func__);
3949 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3950 struct vp_rpt_id_entry_24xx *rptid_entry)
3952 struct qla_hw_data *ha = vha->hw;
3953 scsi_qla_host_t *vp = NULL;
3954 unsigned long flags;
3957 struct fc_port *fcport;
3959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3960 "Entered %s.\n", __func__);
3962 if (rptid_entry->entry_status != 0)
3965 id.b.domain = rptid_entry->port_id[2];
3966 id.b.area = rptid_entry->port_id[1];
3967 id.b.al_pa = rptid_entry->port_id[0];
3969 ha->flags.n2n_ae = 0;
3971 if (rptid_entry->format == 0) {
3973 ql_dbg(ql_dbg_async, vha, 0x10b7,
3974 "Format 0 : Number of VPs setup %d, number of "
3975 "VPs acquired %d.\n", rptid_entry->vp_setup,
3976 rptid_entry->vp_acquired);
3977 ql_dbg(ql_dbg_async, vha, 0x10b8,
3978 "Primary port id %02x%02x%02x.\n",
3979 rptid_entry->port_id[2], rptid_entry->port_id[1],
3980 rptid_entry->port_id[0]);
3981 ha->current_topology = ISP_CFG_NL;
3982 qlt_update_host_map(vha, id);
3984 } else if (rptid_entry->format == 1) {
3986 ql_dbg(ql_dbg_async, vha, 0x10b9,
3987 "Format 1: VP[%d] enabled - status %d - with "
3988 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3989 rptid_entry->vp_status,
3990 rptid_entry->port_id[2], rptid_entry->port_id[1],
3991 rptid_entry->port_id[0]);
3992 ql_dbg(ql_dbg_async, vha, 0x5075,
3993 "Format 1: Remote WWPN %8phC.\n",
3994 rptid_entry->u.f1.port_name);
3996 ql_dbg(ql_dbg_async, vha, 0x5075,
3997 "Format 1: WWPN %8phC.\n",
4000 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
4002 ha->current_topology = ISP_CFG_N;
4003 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4004 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4005 fcport->scan_state = QLA_FCPORT_SCAN;
4006 fcport->n2n_flag = 0;
4009 if (wwn_to_u64(vha->port_name) >
4010 wwn_to_u64(rptid_entry->u.f1.port_name)) {
4012 vha->d_id.b.al_pa = 1;
4013 ha->flags.n2n_bigger = 1;
4016 ql_dbg(ql_dbg_async, vha, 0x5075,
4017 "Format 1: assign local id %x remote id %x\n",
4018 vha->d_id.b24, id.b24);
4020 ql_dbg(ql_dbg_async, vha, 0x5075,
4021 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
4022 rptid_entry->u.f1.port_name);
4023 ha->flags.n2n_bigger = 0;
4026 fcport = qla2x00_find_fcport_by_wwpn(vha,
4027 rptid_entry->u.f1.port_name, 1);
4028 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4032 fcport->plogi_nack_done_deadline = jiffies + HZ;
4033 fcport->dm_login_expire = jiffies +
4034 QLA_N2N_WAIT_TIME * HZ;
4035 fcport->scan_state = QLA_FCPORT_FOUND;
4036 fcport->n2n_flag = 1;
4037 fcport->keep_nport_handle = 1;
4039 if (wwn_to_u64(vha->port_name) >
4040 wwn_to_u64(fcport->port_name)) {
4044 switch (fcport->disc_state) {
4046 set_bit(RELOGIN_NEEDED,
4049 case DSC_DELETE_PEND:
4052 qlt_schedule_sess_for_deletion(fcport);
4056 qla24xx_post_newsess_work(vha, &id,
4057 rptid_entry->u.f1.port_name,
4058 rptid_entry->u.f1.node_name,
4063 /* if our portname is higher then initiate N2N login */
4065 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4068 ha->current_topology = ISP_CFG_FL;
4071 ha->current_topology = ISP_CFG_F;
4077 ha->flags.gpsc_supported = 1;
4078 ha->current_topology = ISP_CFG_F;
4079 /* buffer to buffer credit flag */
4080 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4082 if (rptid_entry->vp_idx == 0) {
4083 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4084 /* FA-WWN is only for physical port */
4085 if (qla_ini_mode_enabled(vha) &&
4086 ha->flags.fawwpn_enabled &&
4087 (rptid_entry->u.f1.flags &
4089 memcpy(vha->port_name,
4090 rptid_entry->u.f1.port_name,
4094 qlt_update_host_map(vha, id);
4097 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4098 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4100 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4101 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4102 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4103 "Could not acquire ID for VP[%d].\n",
4104 rptid_entry->vp_idx);
4109 spin_lock_irqsave(&ha->vport_slock, flags);
4110 list_for_each_entry(vp, &ha->vp_list, list) {
4111 if (rptid_entry->vp_idx == vp->vp_idx) {
4116 spin_unlock_irqrestore(&ha->vport_slock, flags);
4121 qlt_update_host_map(vp, id);
4124 * Cannot configure here as we are still sitting on the
4125 * response queue. Handle it in dpc context.
4127 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4128 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4129 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4131 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4132 qla2xxx_wake_dpc(vha);
4133 } else if (rptid_entry->format == 2) {
4134 ql_dbg(ql_dbg_async, vha, 0x505f,
4135 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4136 rptid_entry->port_id[2], rptid_entry->port_id[1],
4137 rptid_entry->port_id[0]);
4139 ql_dbg(ql_dbg_async, vha, 0x5075,
4140 "N2N: Remote WWPN %8phC.\n",
4141 rptid_entry->u.f2.port_name);
4143 /* N2N. direct connect */
4144 ha->current_topology = ISP_CFG_N;
4145 ha->flags.rida_fmt2 = 1;
4146 vha->d_id.b.domain = rptid_entry->port_id[2];
4147 vha->d_id.b.area = rptid_entry->port_id[1];
4148 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4150 ha->flags.n2n_ae = 1;
4151 spin_lock_irqsave(&ha->vport_slock, flags);
4152 qlt_update_vp_map(vha, SET_AL_PA);
4153 spin_unlock_irqrestore(&ha->vport_slock, flags);
4155 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4156 fcport->scan_state = QLA_FCPORT_SCAN;
4157 fcport->n2n_flag = 0;
4160 fcport = qla2x00_find_fcport_by_wwpn(vha,
4161 rptid_entry->u.f2.port_name, 1);
4164 fcport->login_retry = vha->hw->login_retry_count;
4165 fcport->plogi_nack_done_deadline = jiffies + HZ;
4166 fcport->scan_state = QLA_FCPORT_FOUND;
4167 fcport->keep_nport_handle = 1;
4168 fcport->n2n_flag = 1;
4169 fcport->d_id.b.domain =
4170 rptid_entry->u.f2.remote_nport_id[2];
4171 fcport->d_id.b.area =
4172 rptid_entry->u.f2.remote_nport_id[1];
4173 fcport->d_id.b.al_pa =
4174 rptid_entry->u.f2.remote_nport_id[0];
4180 * qla24xx_modify_vp_config
4181 * Change VP configuration for vha
4184 * vha = adapter block pointer.
4187 * qla2xxx local function return status code.
4193 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4196 struct vp_config_entry_24xx *vpmod;
4197 dma_addr_t vpmod_dma;
4198 struct qla_hw_data *ha = vha->hw;
4199 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4201 /* This can be called by the parent */
4203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4204 "Entered %s.\n", __func__);
4206 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4208 ql_log(ql_log_warn, vha, 0x10bc,
4209 "Failed to allocate modify VP IOCB.\n");
4210 return QLA_MEMORY_ALLOC_FAILED;
4213 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4214 vpmod->entry_count = 1;
4215 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4216 vpmod->vp_count = 1;
4217 vpmod->vp_index1 = vha->vp_idx;
4218 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4220 qlt_modify_vp_config(vha, vpmod);
4222 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4223 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4224 vpmod->entry_count = 1;
4226 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4227 if (rval != QLA_SUCCESS) {
4228 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4229 "Failed to issue VP config IOCB (%x).\n", rval);
4230 } else if (vpmod->comp_status != 0) {
4231 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4232 "Failed to complete IOCB -- error status (%x).\n",
4233 vpmod->comp_status);
4234 rval = QLA_FUNCTION_FAILED;
4235 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4236 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4237 "Failed to complete IOCB -- completion status (%x).\n",
4238 le16_to_cpu(vpmod->comp_status));
4239 rval = QLA_FUNCTION_FAILED;
4242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4243 "Done %s.\n", __func__);
4244 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4246 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4252 * qla2x00_send_change_request
4253 * Receive or disable RSCN request from fabric controller
4256 * ha = adapter block pointer
4257 * format = registration format:
4259 * 1 - Fabric detected registration
4260 * 2 - N_port detected registration
4261 * 3 - Full registration
4262 * FF - clear registration
4263 * vp_idx = Virtual port index
4266 * qla2x00 local function return status code.
4273 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4278 mbx_cmd_t *mcp = &mc;
4280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4281 "Entered %s.\n", __func__);
4283 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4284 mcp->mb[1] = format;
4285 mcp->mb[9] = vp_idx;
4286 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4287 mcp->in_mb = MBX_0|MBX_1;
4288 mcp->tov = MBX_TOV_SECONDS;
4290 rval = qla2x00_mailbox_command(vha, mcp);
4292 if (rval == QLA_SUCCESS) {
4293 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4303 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4308 mbx_cmd_t *mcp = &mc;
4310 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4311 "Entered %s.\n", __func__);
4313 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4314 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4315 mcp->mb[8] = MSW(addr);
4317 mcp->out_mb = MBX_10|MBX_8|MBX_0;
4319 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4320 mcp->out_mb = MBX_0;
4322 mcp->mb[1] = LSW(addr);
4323 mcp->mb[2] = MSW(req_dma);
4324 mcp->mb[3] = LSW(req_dma);
4325 mcp->mb[6] = MSW(MSD(req_dma));
4326 mcp->mb[7] = LSW(MSD(req_dma));
4327 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4328 if (IS_FWI2_CAPABLE(vha->hw)) {
4329 mcp->mb[4] = MSW(size);
4330 mcp->mb[5] = LSW(size);
4331 mcp->out_mb |= MBX_5|MBX_4;
4333 mcp->mb[4] = LSW(size);
4334 mcp->out_mb |= MBX_4;
4338 mcp->tov = MBX_TOV_SECONDS;
4340 rval = qla2x00_mailbox_command(vha, mcp);
4342 if (rval != QLA_SUCCESS) {
4343 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4344 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4347 "Done %s.\n", __func__);
4352 /* 84XX Support **************************************************************/
4354 struct cs84xx_mgmt_cmd {
4356 struct verify_chip_entry_84xx req;
4357 struct verify_chip_rsp_84xx rsp;
4362 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4365 struct cs84xx_mgmt_cmd *mn;
4368 unsigned long flags;
4369 struct qla_hw_data *ha = vha->hw;
4371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4372 "Entered %s.\n", __func__);
4374 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4376 return QLA_MEMORY_ALLOC_FAILED;
4380 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4381 /* Diagnostic firmware? */
4382 /* options |= MENLO_DIAG_FW; */
4383 /* We update the firmware with only one data sequence. */
4384 options |= VCO_END_OF_DATA;
4388 memset(mn, 0, sizeof(*mn));
4389 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4390 mn->p.req.entry_count = 1;
4391 mn->p.req.options = cpu_to_le16(options);
4393 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4394 "Dump of Verify Request.\n");
4395 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4398 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4399 if (rval != QLA_SUCCESS) {
4400 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4401 "Failed to issue verify IOCB (%x).\n", rval);
4405 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4406 "Dump of Verify Response.\n");
4407 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4410 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4411 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4412 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4414 "cs=%x fc=%x.\n", status[0], status[1]);
4416 if (status[0] != CS_COMPLETE) {
4417 rval = QLA_FUNCTION_FAILED;
4418 if (!(options & VCO_DONT_UPDATE_FW)) {
4419 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4420 "Firmware update failed. Retrying "
4421 "without update firmware.\n");
4422 options |= VCO_DONT_UPDATE_FW;
4423 options &= ~VCO_FORCE_UPDATE;
4427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4428 "Firmware updated to %x.\n",
4429 le32_to_cpu(mn->p.rsp.fw_ver));
4431 /* NOTE: we only update OP firmware. */
4432 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4433 ha->cs84xx->op_fw_version =
4434 le32_to_cpu(mn->p.rsp.fw_ver);
4435 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4441 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4443 if (rval != QLA_SUCCESS) {
4444 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4445 "Failed=%x.\n", rval);
4447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4448 "Done %s.\n", __func__);
4455 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4458 unsigned long flags;
4460 mbx_cmd_t *mcp = &mc;
4461 struct qla_hw_data *ha = vha->hw;
4463 if (!ha->flags.fw_started)
4466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4467 "Entered %s.\n", __func__);
4469 if (IS_SHADOW_REG_CAPABLE(ha))
4470 req->options |= BIT_13;
4472 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4473 mcp->mb[1] = req->options;
4474 mcp->mb[2] = MSW(LSD(req->dma));
4475 mcp->mb[3] = LSW(LSD(req->dma));
4476 mcp->mb[6] = MSW(MSD(req->dma));
4477 mcp->mb[7] = LSW(MSD(req->dma));
4478 mcp->mb[5] = req->length;
4480 mcp->mb[10] = req->rsp->id;
4481 mcp->mb[12] = req->qos;
4482 mcp->mb[11] = req->vp_idx;
4483 mcp->mb[13] = req->rid;
4484 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4487 mcp->mb[4] = req->id;
4488 /* que in ptr index */
4490 /* que out ptr index */
4491 mcp->mb[9] = *req->out_ptr = 0;
4492 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4493 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4495 mcp->flags = MBX_DMA_OUT;
4496 mcp->tov = MBX_TOV_SECONDS * 2;
4498 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4500 mcp->in_mb |= MBX_1;
4501 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4502 mcp->out_mb |= MBX_15;
4503 /* debug q create issue in SR-IOV */
4504 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4507 spin_lock_irqsave(&ha->hardware_lock, flags);
4508 if (!(req->options & BIT_0)) {
4509 wrt_reg_dword(req->req_q_in, 0);
4510 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4511 wrt_reg_dword(req->req_q_out, 0);
4513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4515 rval = qla2x00_mailbox_command(vha, mcp);
4516 if (rval != QLA_SUCCESS) {
4517 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4518 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4520 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4521 "Done %s.\n", __func__);
4528 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4531 unsigned long flags;
4533 mbx_cmd_t *mcp = &mc;
4534 struct qla_hw_data *ha = vha->hw;
4536 if (!ha->flags.fw_started)
4539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4540 "Entered %s.\n", __func__);
4542 if (IS_SHADOW_REG_CAPABLE(ha))
4543 rsp->options |= BIT_13;
4545 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4546 mcp->mb[1] = rsp->options;
4547 mcp->mb[2] = MSW(LSD(rsp->dma));
4548 mcp->mb[3] = LSW(LSD(rsp->dma));
4549 mcp->mb[6] = MSW(MSD(rsp->dma));
4550 mcp->mb[7] = LSW(MSD(rsp->dma));
4551 mcp->mb[5] = rsp->length;
4552 mcp->mb[14] = rsp->msix->entry;
4553 mcp->mb[13] = rsp->rid;
4554 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4557 mcp->mb[4] = rsp->id;
4558 /* que in ptr index */
4559 mcp->mb[8] = *rsp->in_ptr = 0;
4560 /* que out ptr index */
4562 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4563 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4565 mcp->flags = MBX_DMA_OUT;
4566 mcp->tov = MBX_TOV_SECONDS * 2;
4568 if (IS_QLA81XX(ha)) {
4569 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4570 mcp->in_mb |= MBX_1;
4571 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4572 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4573 mcp->in_mb |= MBX_1;
4574 /* debug q create issue in SR-IOV */
4575 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4578 spin_lock_irqsave(&ha->hardware_lock, flags);
4579 if (!(rsp->options & BIT_0)) {
4580 wrt_reg_dword(rsp->rsp_q_out, 0);
4581 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4582 wrt_reg_dword(rsp->rsp_q_in, 0);
4585 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4587 rval = qla2x00_mailbox_command(vha, mcp);
4588 if (rval != QLA_SUCCESS) {
4589 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4590 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4592 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4593 "Done %s.\n", __func__);
4600 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4604 mbx_cmd_t *mcp = &mc;
4606 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4607 "Entered %s.\n", __func__);
4609 mcp->mb[0] = MBC_IDC_ACK;
4610 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4611 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4613 mcp->tov = MBX_TOV_SECONDS;
4615 rval = qla2x00_mailbox_command(vha, mcp);
4617 if (rval != QLA_SUCCESS) {
4618 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4619 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4622 "Done %s.\n", __func__);
4629 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4633 mbx_cmd_t *mcp = &mc;
4635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4636 "Entered %s.\n", __func__);
4638 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4639 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4640 return QLA_FUNCTION_FAILED;
4642 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4643 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4644 mcp->out_mb = MBX_1|MBX_0;
4645 mcp->in_mb = MBX_1|MBX_0;
4646 mcp->tov = MBX_TOV_SECONDS;
4648 rval = qla2x00_mailbox_command(vha, mcp);
4650 if (rval != QLA_SUCCESS) {
4651 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4652 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4653 rval, mcp->mb[0], mcp->mb[1]);
4655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4656 "Done %s.\n", __func__);
4657 *sector_size = mcp->mb[1];
4664 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4668 mbx_cmd_t *mcp = &mc;
4670 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4671 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4672 return QLA_FUNCTION_FAILED;
4674 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4675 "Entered %s.\n", __func__);
4677 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4678 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4679 FAC_OPT_CMD_WRITE_PROTECT;
4680 mcp->out_mb = MBX_1|MBX_0;
4681 mcp->in_mb = MBX_1|MBX_0;
4682 mcp->tov = MBX_TOV_SECONDS;
4684 rval = qla2x00_mailbox_command(vha, mcp);
4686 if (rval != QLA_SUCCESS) {
4687 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4688 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4689 rval, mcp->mb[0], mcp->mb[1]);
4691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4692 "Done %s.\n", __func__);
4699 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4703 mbx_cmd_t *mcp = &mc;
4705 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4706 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4707 return QLA_FUNCTION_FAILED;
4709 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4710 "Entered %s.\n", __func__);
4712 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4713 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4714 mcp->mb[2] = LSW(start);
4715 mcp->mb[3] = MSW(start);
4716 mcp->mb[4] = LSW(finish);
4717 mcp->mb[5] = MSW(finish);
4718 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4719 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4720 mcp->tov = MBX_TOV_SECONDS;
4722 rval = qla2x00_mailbox_command(vha, mcp);
4724 if (rval != QLA_SUCCESS) {
4725 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4726 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4727 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4730 "Done %s.\n", __func__);
4737 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4739 int rval = QLA_SUCCESS;
4741 mbx_cmd_t *mcp = &mc;
4742 struct qla_hw_data *ha = vha->hw;
4744 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4745 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4749 "Entered %s.\n", __func__);
4751 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4752 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4753 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4754 mcp->out_mb = MBX_1|MBX_0;
4755 mcp->in_mb = MBX_1|MBX_0;
4756 mcp->tov = MBX_TOV_SECONDS;
4758 rval = qla2x00_mailbox_command(vha, mcp);
4760 if (rval != QLA_SUCCESS) {
4761 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4762 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4763 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4766 "Done %s.\n", __func__);
4773 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4777 mbx_cmd_t *mcp = &mc;
4779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4780 "Entered %s.\n", __func__);
4782 mcp->mb[0] = MBC_RESTART_MPI_FW;
4783 mcp->out_mb = MBX_0;
4784 mcp->in_mb = MBX_0|MBX_1;
4785 mcp->tov = MBX_TOV_SECONDS;
4787 rval = qla2x00_mailbox_command(vha, mcp);
4789 if (rval != QLA_SUCCESS) {
4790 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4791 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4792 rval, mcp->mb[0], mcp->mb[1]);
4794 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4795 "Done %s.\n", __func__);
4802 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4806 mbx_cmd_t *mcp = &mc;
4810 struct qla_hw_data *ha = vha->hw;
4812 if (!IS_P3P_TYPE(ha))
4813 return QLA_FUNCTION_FAILED;
4815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4816 "Entered %s.\n", __func__);
4818 str = (__force __le16 *)version;
4819 len = strlen(version);
4821 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4822 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4823 mcp->out_mb = MBX_1|MBX_0;
4824 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4825 mcp->mb[i] = le16_to_cpup(str);
4826 mcp->out_mb |= 1<<i;
4828 for (; i < 16; i++) {
4830 mcp->out_mb |= 1<<i;
4832 mcp->in_mb = MBX_1|MBX_0;
4833 mcp->tov = MBX_TOV_SECONDS;
4835 rval = qla2x00_mailbox_command(vha, mcp);
4837 if (rval != QLA_SUCCESS) {
4838 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4839 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4842 "Done %s.\n", __func__);
4849 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4853 mbx_cmd_t *mcp = &mc;
4858 struct qla_hw_data *ha = vha->hw;
4860 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4862 return QLA_FUNCTION_FAILED;
4864 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4865 "Entered %s.\n", __func__);
4867 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4869 ql_log(ql_log_warn, vha, 0x117f,
4870 "Failed to allocate driver version param.\n");
4871 return QLA_MEMORY_ALLOC_FAILED;
4874 memcpy(str, "\x7\x3\x11\x0", 4);
4876 len = dwlen * 4 - 4;
4877 memset(str + 4, 0, len);
4878 if (len > strlen(version))
4879 len = strlen(version);
4880 memcpy(str + 4, version, len);
4882 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4883 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4884 mcp->mb[2] = MSW(LSD(str_dma));
4885 mcp->mb[3] = LSW(LSD(str_dma));
4886 mcp->mb[6] = MSW(MSD(str_dma));
4887 mcp->mb[7] = LSW(MSD(str_dma));
4888 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4889 mcp->in_mb = MBX_1|MBX_0;
4890 mcp->tov = MBX_TOV_SECONDS;
4892 rval = qla2x00_mailbox_command(vha, mcp);
4894 if (rval != QLA_SUCCESS) {
4895 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4896 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4898 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4899 "Done %s.\n", __func__);
4902 dma_pool_free(ha->s_dma_pool, str, str_dma);
4908 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4909 void *buf, uint16_t bufsiz)
4913 mbx_cmd_t *mcp = &mc;
4916 if (!IS_FWI2_CAPABLE(vha->hw))
4917 return QLA_FUNCTION_FAILED;
4919 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4920 "Entered %s.\n", __func__);
4922 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4923 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4924 mcp->mb[2] = MSW(buf_dma);
4925 mcp->mb[3] = LSW(buf_dma);
4926 mcp->mb[6] = MSW(MSD(buf_dma));
4927 mcp->mb[7] = LSW(MSD(buf_dma));
4928 mcp->mb[8] = bufsiz/4;
4929 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4930 mcp->in_mb = MBX_1|MBX_0;
4931 mcp->tov = MBX_TOV_SECONDS;
4933 rval = qla2x00_mailbox_command(vha, mcp);
4935 if (rval != QLA_SUCCESS) {
4936 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4937 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4940 "Done %s.\n", __func__);
4941 bp = (uint32_t *) buf;
4942 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4943 *bp = le32_to_cpu((__force __le32)*bp);
4949 #define PUREX_CMD_COUNT 2
4951 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4955 mbx_cmd_t *mcp = &mc;
4956 uint8_t *els_cmd_map;
4957 dma_addr_t els_cmd_map_dma;
4958 uint8_t cmd_opcode[PUREX_CMD_COUNT];
4959 uint8_t i, index, purex_bit;
4960 struct qla_hw_data *ha = vha->hw;
4962 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
4963 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
4967 "Entered %s.\n", __func__);
4969 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
4970 &els_cmd_map_dma, GFP_KERNEL);
4972 ql_log(ql_log_warn, vha, 0x7101,
4973 "Failed to allocate RDP els command param.\n");
4974 return QLA_MEMORY_ALLOC_FAILED;
4977 /* List of Purex ELS */
4978 cmd_opcode[0] = ELS_FPIN;
4979 cmd_opcode[1] = ELS_RDP;
4981 for (i = 0; i < PUREX_CMD_COUNT; i++) {
4982 index = cmd_opcode[i] / 8;
4983 purex_bit = cmd_opcode[i] % 8;
4984 els_cmd_map[index] |= 1 << purex_bit;
4987 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4988 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
4989 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
4990 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
4991 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
4992 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
4993 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4994 mcp->in_mb = MBX_1|MBX_0;
4995 mcp->tov = MBX_TOV_SECONDS;
4996 mcp->flags = MBX_DMA_OUT;
4997 mcp->buf_size = ELS_CMD_MAP_SIZE;
4998 rval = qla2x00_mailbox_command(vha, mcp);
5000 if (rval != QLA_SUCCESS) {
5001 ql_dbg(ql_dbg_mbx, vha, 0x118d,
5002 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
5004 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
5005 "Done %s.\n", __func__);
5008 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5009 els_cmd_map, els_cmd_map_dma);
5015 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
5019 mbx_cmd_t *mcp = &mc;
5021 if (!IS_FWI2_CAPABLE(vha->hw))
5022 return QLA_FUNCTION_FAILED;
5024 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
5025 "Entered %s.\n", __func__);
5027 mcp->mb[0] = MBC_GET_RNID_PARAMS;
5028 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
5029 mcp->out_mb = MBX_1|MBX_0;
5030 mcp->in_mb = MBX_1|MBX_0;
5031 mcp->tov = MBX_TOV_SECONDS;
5033 rval = qla2x00_mailbox_command(vha, mcp);
5036 if (rval != QLA_SUCCESS) {
5037 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5038 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5041 "Done %s.\n", __func__);
5048 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5049 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5053 mbx_cmd_t *mcp = &mc;
5054 struct qla_hw_data *ha = vha->hw;
5056 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5057 "Entered %s.\n", __func__);
5059 if (!IS_FWI2_CAPABLE(ha))
5060 return QLA_FUNCTION_FAILED;
5065 mcp->mb[0] = MBC_READ_SFP;
5067 mcp->mb[2] = MSW(LSD(sfp_dma));
5068 mcp->mb[3] = LSW(LSD(sfp_dma));
5069 mcp->mb[6] = MSW(MSD(sfp_dma));
5070 mcp->mb[7] = LSW(MSD(sfp_dma));
5074 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5075 mcp->in_mb = MBX_1|MBX_0;
5076 mcp->tov = MBX_TOV_SECONDS;
5078 rval = qla2x00_mailbox_command(vha, mcp);
5083 if (rval != QLA_SUCCESS) {
5084 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5085 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5086 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5087 /* sfp is not there */
5088 rval = QLA_INTERFACE_ERROR;
5091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5092 "Done %s.\n", __func__);
5099 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5100 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5104 mbx_cmd_t *mcp = &mc;
5105 struct qla_hw_data *ha = vha->hw;
5107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5108 "Entered %s.\n", __func__);
5110 if (!IS_FWI2_CAPABLE(ha))
5111 return QLA_FUNCTION_FAILED;
5119 mcp->mb[0] = MBC_WRITE_SFP;
5121 mcp->mb[2] = MSW(LSD(sfp_dma));
5122 mcp->mb[3] = LSW(LSD(sfp_dma));
5123 mcp->mb[6] = MSW(MSD(sfp_dma));
5124 mcp->mb[7] = LSW(MSD(sfp_dma));
5128 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5129 mcp->in_mb = MBX_1|MBX_0;
5130 mcp->tov = MBX_TOV_SECONDS;
5132 rval = qla2x00_mailbox_command(vha, mcp);
5134 if (rval != QLA_SUCCESS) {
5135 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5136 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5139 "Done %s.\n", __func__);
5146 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5147 uint16_t size_in_bytes, uint16_t *actual_size)
5151 mbx_cmd_t *mcp = &mc;
5153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5154 "Entered %s.\n", __func__);
5156 if (!IS_CNA_CAPABLE(vha->hw))
5157 return QLA_FUNCTION_FAILED;
5159 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5160 mcp->mb[2] = MSW(stats_dma);
5161 mcp->mb[3] = LSW(stats_dma);
5162 mcp->mb[6] = MSW(MSD(stats_dma));
5163 mcp->mb[7] = LSW(MSD(stats_dma));
5164 mcp->mb[8] = size_in_bytes >> 2;
5165 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5166 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5167 mcp->tov = MBX_TOV_SECONDS;
5169 rval = qla2x00_mailbox_command(vha, mcp);
5171 if (rval != QLA_SUCCESS) {
5172 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5173 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5174 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5177 "Done %s.\n", __func__);
5180 *actual_size = mcp->mb[2] << 2;
5187 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5192 mbx_cmd_t *mcp = &mc;
5194 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5195 "Entered %s.\n", __func__);
5197 if (!IS_CNA_CAPABLE(vha->hw))
5198 return QLA_FUNCTION_FAILED;
5200 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5202 mcp->mb[2] = MSW(tlv_dma);
5203 mcp->mb[3] = LSW(tlv_dma);
5204 mcp->mb[6] = MSW(MSD(tlv_dma));
5205 mcp->mb[7] = LSW(MSD(tlv_dma));
5207 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5208 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5209 mcp->tov = MBX_TOV_SECONDS;
5211 rval = qla2x00_mailbox_command(vha, mcp);
5213 if (rval != QLA_SUCCESS) {
5214 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5215 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5216 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5218 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5219 "Done %s.\n", __func__);
5226 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5230 mbx_cmd_t *mcp = &mc;
5232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5233 "Entered %s.\n", __func__);
5235 if (!IS_FWI2_CAPABLE(vha->hw))
5236 return QLA_FUNCTION_FAILED;
5238 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5239 mcp->mb[1] = LSW(risc_addr);
5240 mcp->mb[8] = MSW(risc_addr);
5241 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5242 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5243 mcp->tov = MBX_TOV_SECONDS;
5245 rval = qla2x00_mailbox_command(vha, mcp);
5246 if (rval != QLA_SUCCESS) {
5247 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5248 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5251 "Done %s.\n", __func__);
5252 *data = mcp->mb[3] << 16 | mcp->mb[2];
5259 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5264 mbx_cmd_t *mcp = &mc;
5266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5267 "Entered %s.\n", __func__);
5269 memset(mcp->mb, 0 , sizeof(mcp->mb));
5270 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5271 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5273 /* transfer count */
5274 mcp->mb[10] = LSW(mreq->transfer_size);
5275 mcp->mb[11] = MSW(mreq->transfer_size);
5277 /* send data address */
5278 mcp->mb[14] = LSW(mreq->send_dma);
5279 mcp->mb[15] = MSW(mreq->send_dma);
5280 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5281 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5283 /* receive data address */
5284 mcp->mb[16] = LSW(mreq->rcv_dma);
5285 mcp->mb[17] = MSW(mreq->rcv_dma);
5286 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5287 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5289 /* Iteration count */
5290 mcp->mb[18] = LSW(mreq->iteration_count);
5291 mcp->mb[19] = MSW(mreq->iteration_count);
5293 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5294 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5295 if (IS_CNA_CAPABLE(vha->hw))
5296 mcp->out_mb |= MBX_2;
5297 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5299 mcp->buf_size = mreq->transfer_size;
5300 mcp->tov = MBX_TOV_SECONDS;
5301 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5303 rval = qla2x00_mailbox_command(vha, mcp);
5305 if (rval != QLA_SUCCESS) {
5306 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5307 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5308 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5309 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5312 "Done %s.\n", __func__);
5315 /* Copy mailbox information */
5316 memcpy( mresp, mcp->mb, 64);
5321 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5326 mbx_cmd_t *mcp = &mc;
5327 struct qla_hw_data *ha = vha->hw;
5329 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5330 "Entered %s.\n", __func__);
5332 memset(mcp->mb, 0 , sizeof(mcp->mb));
5333 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5334 /* BIT_6 specifies 64bit address */
5335 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5336 if (IS_CNA_CAPABLE(ha)) {
5337 mcp->mb[2] = vha->fcoe_fcf_idx;
5339 mcp->mb[16] = LSW(mreq->rcv_dma);
5340 mcp->mb[17] = MSW(mreq->rcv_dma);
5341 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5342 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5344 mcp->mb[10] = LSW(mreq->transfer_size);
5346 mcp->mb[14] = LSW(mreq->send_dma);
5347 mcp->mb[15] = MSW(mreq->send_dma);
5348 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5349 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5351 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5352 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5353 if (IS_CNA_CAPABLE(ha))
5354 mcp->out_mb |= MBX_2;
5357 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5358 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5359 mcp->in_mb |= MBX_1;
5360 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5362 mcp->in_mb |= MBX_3;
5364 mcp->tov = MBX_TOV_SECONDS;
5365 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5366 mcp->buf_size = mreq->transfer_size;
5368 rval = qla2x00_mailbox_command(vha, mcp);
5370 if (rval != QLA_SUCCESS) {
5371 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5372 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5373 rval, mcp->mb[0], mcp->mb[1]);
5375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5376 "Done %s.\n", __func__);
5379 /* Copy mailbox information */
5380 memcpy(mresp, mcp->mb, 64);
5385 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5389 mbx_cmd_t *mcp = &mc;
5391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5392 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5394 mcp->mb[0] = MBC_ISP84XX_RESET;
5395 mcp->mb[1] = enable_diagnostic;
5396 mcp->out_mb = MBX_1|MBX_0;
5397 mcp->in_mb = MBX_1|MBX_0;
5398 mcp->tov = MBX_TOV_SECONDS;
5399 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5400 rval = qla2x00_mailbox_command(vha, mcp);
5402 if (rval != QLA_SUCCESS)
5403 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5406 "Done %s.\n", __func__);
5412 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5416 mbx_cmd_t *mcp = &mc;
5418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5419 "Entered %s.\n", __func__);
5421 if (!IS_FWI2_CAPABLE(vha->hw))
5422 return QLA_FUNCTION_FAILED;
5424 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5425 mcp->mb[1] = LSW(risc_addr);
5426 mcp->mb[2] = LSW(data);
5427 mcp->mb[3] = MSW(data);
5428 mcp->mb[8] = MSW(risc_addr);
5429 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5430 mcp->in_mb = MBX_1|MBX_0;
5431 mcp->tov = MBX_TOV_SECONDS;
5433 rval = qla2x00_mailbox_command(vha, mcp);
5434 if (rval != QLA_SUCCESS) {
5435 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5436 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5437 rval, mcp->mb[0], mcp->mb[1]);
5439 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5440 "Done %s.\n", __func__);
5447 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5450 uint32_t stat, timer;
5452 struct qla_hw_data *ha = vha->hw;
5453 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5458 "Entered %s.\n", __func__);
5460 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5462 /* Write the MBC data to the registers */
5463 wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5464 wrt_reg_word(®->mailbox1, mb[0]);
5465 wrt_reg_word(®->mailbox2, mb[1]);
5466 wrt_reg_word(®->mailbox3, mb[2]);
5467 wrt_reg_word(®->mailbox4, mb[3]);
5469 wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT);
5471 /* Poll for MBC interrupt */
5472 for (timer = 6000000; timer; timer--) {
5473 /* Check for pending interrupts. */
5474 stat = rd_reg_dword(®->host_status);
5475 if (stat & HSRX_RISC_INT) {
5478 if (stat == 0x1 || stat == 0x2 ||
5479 stat == 0x10 || stat == 0x11) {
5480 set_bit(MBX_INTERRUPT,
5481 &ha->mbx_cmd_flags);
5482 mb0 = rd_reg_word(®->mailbox0);
5483 wrt_reg_dword(®->hccr,
5484 HCCRX_CLR_RISC_INT);
5485 rd_reg_dword(®->hccr);
5492 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5493 rval = mb0 & MBS_MASK;
5495 rval = QLA_FUNCTION_FAILED;
5497 if (rval != QLA_SUCCESS) {
5498 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5499 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5502 "Done %s.\n", __func__);
5508 /* Set the specified data rate */
5510 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5514 mbx_cmd_t *mcp = &mc;
5515 struct qla_hw_data *ha = vha->hw;
5518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5519 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5522 if (!IS_FWI2_CAPABLE(ha))
5523 return QLA_FUNCTION_FAILED;
5525 memset(mcp, 0, sizeof(*mcp));
5526 switch (ha->set_data_rate) {
5527 case PORT_SPEED_AUTO:
5528 case PORT_SPEED_4GB:
5529 case PORT_SPEED_8GB:
5530 case PORT_SPEED_16GB:
5531 case PORT_SPEED_32GB:
5532 val = ha->set_data_rate;
5535 ql_log(ql_log_warn, vha, 0x1199,
5536 "Unrecognized speed setting:%d. Setting Autoneg\n",
5538 val = ha->set_data_rate = PORT_SPEED_AUTO;
5542 mcp->mb[0] = MBC_DATA_RATE;
5546 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5547 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5548 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5549 mcp->in_mb |= MBX_4|MBX_3;
5550 mcp->tov = MBX_TOV_SECONDS;
5552 rval = qla2x00_mailbox_command(vha, mcp);
5553 if (rval != QLA_SUCCESS) {
5554 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5555 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5557 if (mcp->mb[1] != 0x7)
5558 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5559 "Speed set:0x%x\n", mcp->mb[1]);
5561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5562 "Done %s.\n", __func__);
5569 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5573 mbx_cmd_t *mcp = &mc;
5574 struct qla_hw_data *ha = vha->hw;
5576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5577 "Entered %s.\n", __func__);
5579 if (!IS_FWI2_CAPABLE(ha))
5580 return QLA_FUNCTION_FAILED;
5582 mcp->mb[0] = MBC_DATA_RATE;
5583 mcp->mb[1] = QLA_GET_DATA_RATE;
5584 mcp->out_mb = MBX_1|MBX_0;
5585 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5586 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5587 mcp->in_mb |= MBX_3;
5588 mcp->tov = MBX_TOV_SECONDS;
5590 rval = qla2x00_mailbox_command(vha, mcp);
5591 if (rval != QLA_SUCCESS) {
5592 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5593 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5595 if (mcp->mb[1] != 0x7)
5596 ha->link_data_rate = mcp->mb[1];
5598 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5599 if (mcp->mb[4] & BIT_0)
5600 ql_log(ql_log_info, vha, 0x11a2,
5601 "FEC=enabled (data rate).\n");
5604 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5605 "Done %s.\n", __func__);
5606 if (mcp->mb[1] != 0x7)
5607 ha->link_data_rate = mcp->mb[1];
5614 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5618 mbx_cmd_t *mcp = &mc;
5619 struct qla_hw_data *ha = vha->hw;
5621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5622 "Entered %s.\n", __func__);
5624 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5625 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5626 return QLA_FUNCTION_FAILED;
5627 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5628 mcp->out_mb = MBX_0;
5629 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5630 mcp->tov = MBX_TOV_SECONDS;
5633 rval = qla2x00_mailbox_command(vha, mcp);
5635 if (rval != QLA_SUCCESS) {
5636 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5637 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5639 /* Copy all bits to preserve original value */
5640 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5643 "Done %s.\n", __func__);
5649 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5653 mbx_cmd_t *mcp = &mc;
5655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5656 "Entered %s.\n", __func__);
5658 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5659 /* Copy all bits to preserve original setting */
5660 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5661 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5663 mcp->tov = MBX_TOV_SECONDS;
5665 rval = qla2x00_mailbox_command(vha, mcp);
5667 if (rval != QLA_SUCCESS) {
5668 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5669 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5672 "Done %s.\n", __func__);
5679 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5684 mbx_cmd_t *mcp = &mc;
5685 struct qla_hw_data *ha = vha->hw;
5687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5688 "Entered %s.\n", __func__);
5690 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5691 return QLA_FUNCTION_FAILED;
5693 mcp->mb[0] = MBC_PORT_PARAMS;
5694 mcp->mb[1] = loop_id;
5695 if (ha->flags.fcp_prio_enabled)
5699 mcp->mb[4] = priority & 0xf;
5700 mcp->mb[9] = vha->vp_idx;
5701 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5702 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5703 mcp->tov = MBX_TOV_SECONDS;
5705 rval = qla2x00_mailbox_command(vha, mcp);
5713 if (rval != QLA_SUCCESS) {
5714 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5717 "Done %s.\n", __func__);
5724 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5726 int rval = QLA_FUNCTION_FAILED;
5727 struct qla_hw_data *ha = vha->hw;
5730 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5731 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5732 "Thermal not supported by this card.\n");
5736 if (IS_QLA25XX(ha)) {
5737 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5738 ha->pdev->subsystem_device == 0x0175) {
5739 rval = qla2x00_read_sfp(vha, 0, &byte,
5740 0x98, 0x1, 1, BIT_13|BIT_0);
5744 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5745 ha->pdev->subsystem_device == 0x338e) {
5746 rval = qla2x00_read_sfp(vha, 0, &byte,
5747 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5751 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5752 "Thermal not supported by this card.\n");
5756 if (IS_QLA82XX(ha)) {
5757 *temp = qla82xx_read_temperature(vha);
5760 } else if (IS_QLA8044(ha)) {
5761 *temp = qla8044_read_temperature(vha);
5766 rval = qla2x00_read_asic_temperature(vha, temp);
5771 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5774 struct qla_hw_data *ha = vha->hw;
5776 mbx_cmd_t *mcp = &mc;
5778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5779 "Entered %s.\n", __func__);
5781 if (!IS_FWI2_CAPABLE(ha))
5782 return QLA_FUNCTION_FAILED;
5784 memset(mcp, 0, sizeof(mbx_cmd_t));
5785 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5788 mcp->out_mb = MBX_1|MBX_0;
5790 mcp->tov = MBX_TOV_SECONDS;
5793 rval = qla2x00_mailbox_command(vha, mcp);
5794 if (rval != QLA_SUCCESS) {
5795 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5796 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5799 "Done %s.\n", __func__);
5806 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5809 struct qla_hw_data *ha = vha->hw;
5811 mbx_cmd_t *mcp = &mc;
5813 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5814 "Entered %s.\n", __func__);
5816 if (!IS_P3P_TYPE(ha))
5817 return QLA_FUNCTION_FAILED;
5819 memset(mcp, 0, sizeof(mbx_cmd_t));
5820 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5823 mcp->out_mb = MBX_1|MBX_0;
5825 mcp->tov = MBX_TOV_SECONDS;
5828 rval = qla2x00_mailbox_command(vha, mcp);
5829 if (rval != QLA_SUCCESS) {
5830 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5831 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5834 "Done %s.\n", __func__);
5841 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5843 struct qla_hw_data *ha = vha->hw;
5845 mbx_cmd_t *mcp = &mc;
5846 int rval = QLA_FUNCTION_FAILED;
5848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5849 "Entered %s.\n", __func__);
5851 memset(mcp->mb, 0 , sizeof(mcp->mb));
5852 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5853 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5854 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5855 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5857 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5858 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5859 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5861 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5862 mcp->tov = MBX_TOV_SECONDS;
5863 rval = qla2x00_mailbox_command(vha, mcp);
5865 /* Always copy back return mailbox values. */
5866 if (rval != QLA_SUCCESS) {
5867 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5868 "mailbox command FAILED=0x%x, subcode=%x.\n",
5869 (mcp->mb[1] << 16) | mcp->mb[0],
5870 (mcp->mb[3] << 16) | mcp->mb[2]);
5872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5873 "Done %s.\n", __func__);
5874 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5875 if (!ha->md_template_size) {
5876 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5877 "Null template size obtained.\n");
5878 rval = QLA_FUNCTION_FAILED;
5885 qla82xx_md_get_template(scsi_qla_host_t *vha)
5887 struct qla_hw_data *ha = vha->hw;
5889 mbx_cmd_t *mcp = &mc;
5890 int rval = QLA_FUNCTION_FAILED;
5892 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5893 "Entered %s.\n", __func__);
5895 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5896 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5897 if (!ha->md_tmplt_hdr) {
5898 ql_log(ql_log_warn, vha, 0x1124,
5899 "Unable to allocate memory for Minidump template.\n");
5903 memset(mcp->mb, 0 , sizeof(mcp->mb));
5904 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5905 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5906 mcp->mb[2] = LSW(RQST_TMPLT);
5907 mcp->mb[3] = MSW(RQST_TMPLT);
5908 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5909 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5910 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5911 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5912 mcp->mb[8] = LSW(ha->md_template_size);
5913 mcp->mb[9] = MSW(ha->md_template_size);
5915 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5916 mcp->tov = MBX_TOV_SECONDS;
5917 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5918 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5919 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5920 rval = qla2x00_mailbox_command(vha, mcp);
5922 if (rval != QLA_SUCCESS) {
5923 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5924 "mailbox command FAILED=0x%x, subcode=%x.\n",
5925 ((mcp->mb[1] << 16) | mcp->mb[0]),
5926 ((mcp->mb[3] << 16) | mcp->mb[2]));
5928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5929 "Done %s.\n", __func__);
5934 qla8044_md_get_template(scsi_qla_host_t *vha)
5936 struct qla_hw_data *ha = vha->hw;
5938 mbx_cmd_t *mcp = &mc;
5939 int rval = QLA_FUNCTION_FAILED;
5940 int offset = 0, size = MINIDUMP_SIZE_36K;
5942 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5943 "Entered %s.\n", __func__);
5945 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5946 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5947 if (!ha->md_tmplt_hdr) {
5948 ql_log(ql_log_warn, vha, 0xb11b,
5949 "Unable to allocate memory for Minidump template.\n");
5953 memset(mcp->mb, 0 , sizeof(mcp->mb));
5954 while (offset < ha->md_template_size) {
5955 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5956 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5957 mcp->mb[2] = LSW(RQST_TMPLT);
5958 mcp->mb[3] = MSW(RQST_TMPLT);
5959 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5960 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5961 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5962 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5963 mcp->mb[8] = LSW(size);
5964 mcp->mb[9] = MSW(size);
5965 mcp->mb[10] = offset & 0x0000FFFF;
5966 mcp->mb[11] = offset & 0xFFFF0000;
5967 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5968 mcp->tov = MBX_TOV_SECONDS;
5969 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5970 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5971 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5972 rval = qla2x00_mailbox_command(vha, mcp);
5974 if (rval != QLA_SUCCESS) {
5975 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5976 "mailbox command FAILED=0x%x, subcode=%x.\n",
5977 ((mcp->mb[1] << 16) | mcp->mb[0]),
5978 ((mcp->mb[3] << 16) | mcp->mb[2]));
5981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5982 "Done %s.\n", __func__);
5983 offset = offset + size;
5989 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5992 struct qla_hw_data *ha = vha->hw;
5994 mbx_cmd_t *mcp = &mc;
5996 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5997 return QLA_FUNCTION_FAILED;
5999 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
6000 "Entered %s.\n", __func__);
6002 memset(mcp, 0, sizeof(mbx_cmd_t));
6003 mcp->mb[0] = MBC_SET_LED_CONFIG;
6004 mcp->mb[1] = led_cfg[0];
6005 mcp->mb[2] = led_cfg[1];
6006 if (IS_QLA8031(ha)) {
6007 mcp->mb[3] = led_cfg[2];
6008 mcp->mb[4] = led_cfg[3];
6009 mcp->mb[5] = led_cfg[4];
6010 mcp->mb[6] = led_cfg[5];
6013 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6015 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6017 mcp->tov = MBX_TOV_SECONDS;
6020 rval = qla2x00_mailbox_command(vha, mcp);
6021 if (rval != QLA_SUCCESS) {
6022 ql_dbg(ql_dbg_mbx, vha, 0x1134,
6023 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6025 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
6026 "Done %s.\n", __func__);
6033 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
6036 struct qla_hw_data *ha = vha->hw;
6038 mbx_cmd_t *mcp = &mc;
6040 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6041 return QLA_FUNCTION_FAILED;
6043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6044 "Entered %s.\n", __func__);
6046 memset(mcp, 0, sizeof(mbx_cmd_t));
6047 mcp->mb[0] = MBC_GET_LED_CONFIG;
6049 mcp->out_mb = MBX_0;
6050 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6052 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6053 mcp->tov = MBX_TOV_SECONDS;
6056 rval = qla2x00_mailbox_command(vha, mcp);
6057 if (rval != QLA_SUCCESS) {
6058 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6059 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6061 led_cfg[0] = mcp->mb[1];
6062 led_cfg[1] = mcp->mb[2];
6063 if (IS_QLA8031(ha)) {
6064 led_cfg[2] = mcp->mb[3];
6065 led_cfg[3] = mcp->mb[4];
6066 led_cfg[4] = mcp->mb[5];
6067 led_cfg[5] = mcp->mb[6];
6069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6070 "Done %s.\n", __func__);
6077 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6080 struct qla_hw_data *ha = vha->hw;
6082 mbx_cmd_t *mcp = &mc;
6084 if (!IS_P3P_TYPE(ha))
6085 return QLA_FUNCTION_FAILED;
6087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6088 "Entered %s.\n", __func__);
6090 memset(mcp, 0, sizeof(mbx_cmd_t));
6091 mcp->mb[0] = MBC_SET_LED_CONFIG;
6097 mcp->out_mb = MBX_7|MBX_0;
6099 mcp->tov = MBX_TOV_SECONDS;
6102 rval = qla2x00_mailbox_command(vha, mcp);
6103 if (rval != QLA_SUCCESS) {
6104 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6105 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6108 "Done %s.\n", __func__);
6115 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6118 struct qla_hw_data *ha = vha->hw;
6120 mbx_cmd_t *mcp = &mc;
6122 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6123 return QLA_FUNCTION_FAILED;
6125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6126 "Entered %s.\n", __func__);
6128 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6129 mcp->mb[1] = LSW(reg);
6130 mcp->mb[2] = MSW(reg);
6131 mcp->mb[3] = LSW(data);
6132 mcp->mb[4] = MSW(data);
6133 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6135 mcp->in_mb = MBX_1|MBX_0;
6136 mcp->tov = MBX_TOV_SECONDS;
6138 rval = qla2x00_mailbox_command(vha, mcp);
6140 if (rval != QLA_SUCCESS) {
6141 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6142 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6145 "Done %s.\n", __func__);
6152 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6155 struct qla_hw_data *ha = vha->hw;
6157 mbx_cmd_t *mcp = &mc;
6159 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6160 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6161 "Implicit LOGO Unsupported.\n");
6162 return QLA_FUNCTION_FAILED;
6166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6167 "Entering %s.\n", __func__);
6169 /* Perform Implicit LOGO. */
6170 mcp->mb[0] = MBC_PORT_LOGOUT;
6171 mcp->mb[1] = fcport->loop_id;
6172 mcp->mb[10] = BIT_15;
6173 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6175 mcp->tov = MBX_TOV_SECONDS;
6177 rval = qla2x00_mailbox_command(vha, mcp);
6178 if (rval != QLA_SUCCESS)
6179 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6180 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6183 "Done %s.\n", __func__);
6189 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6193 mbx_cmd_t *mcp = &mc;
6194 struct qla_hw_data *ha = vha->hw;
6195 unsigned long retry_max_time = jiffies + (2 * HZ);
6197 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6198 return QLA_FUNCTION_FAILED;
6200 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6203 mcp->mb[0] = MBC_READ_REMOTE_REG;
6204 mcp->mb[1] = LSW(reg);
6205 mcp->mb[2] = MSW(reg);
6206 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6207 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6208 mcp->tov = MBX_TOV_SECONDS;
6210 rval = qla2x00_mailbox_command(vha, mcp);
6212 if (rval != QLA_SUCCESS) {
6213 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6214 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6215 rval, mcp->mb[0], mcp->mb[1]);
6217 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6218 if (*data == QLA8XXX_BAD_VALUE) {
6220 * During soft-reset CAMRAM register reads might
6221 * return 0xbad0bad0. So retry for MAX of 2 sec
6222 * while reading camram registers.
6224 if (time_after(jiffies, retry_max_time)) {
6225 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6226 "Failure to read CAMRAM register. "
6227 "data=0x%x.\n", *data);
6228 return QLA_FUNCTION_FAILED;
6233 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6240 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6244 mbx_cmd_t *mcp = &mc;
6245 struct qla_hw_data *ha = vha->hw;
6247 if (!IS_QLA83XX(ha))
6248 return QLA_FUNCTION_FAILED;
6250 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6252 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6253 mcp->out_mb = MBX_0;
6254 mcp->in_mb = MBX_1|MBX_0;
6255 mcp->tov = MBX_TOV_SECONDS;
6257 rval = qla2x00_mailbox_command(vha, mcp);
6259 if (rval != QLA_SUCCESS) {
6260 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6261 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6262 rval, mcp->mb[0], mcp->mb[1]);
6263 qla2xxx_dump_fw(vha);
6265 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6272 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6273 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6277 mbx_cmd_t *mcp = &mc;
6278 uint8_t subcode = (uint8_t)options;
6279 struct qla_hw_data *ha = vha->hw;
6281 if (!IS_QLA8031(ha))
6282 return QLA_FUNCTION_FAILED;
6284 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6286 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6287 mcp->mb[1] = options;
6288 mcp->out_mb = MBX_1|MBX_0;
6289 if (subcode & BIT_2) {
6290 mcp->mb[2] = LSW(start_addr);
6291 mcp->mb[3] = MSW(start_addr);
6292 mcp->mb[4] = LSW(end_addr);
6293 mcp->mb[5] = MSW(end_addr);
6294 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6296 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6297 if (!(subcode & (BIT_2 | BIT_5)))
6298 mcp->in_mb |= MBX_4|MBX_3;
6299 mcp->tov = MBX_TOV_SECONDS;
6301 rval = qla2x00_mailbox_command(vha, mcp);
6303 if (rval != QLA_SUCCESS) {
6304 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6305 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6306 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6308 qla2xxx_dump_fw(vha);
6310 if (subcode & BIT_5)
6311 *sector_size = mcp->mb[1];
6312 else if (subcode & (BIT_6 | BIT_7)) {
6313 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6314 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6315 } else if (subcode & (BIT_3 | BIT_4)) {
6316 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6317 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6319 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6326 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6331 mbx_cmd_t *mcp = &mc;
6333 if (!IS_MCTP_CAPABLE(vha->hw))
6334 return QLA_FUNCTION_FAILED;
6336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6337 "Entered %s.\n", __func__);
6339 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6340 mcp->mb[1] = LSW(addr);
6341 mcp->mb[2] = MSW(req_dma);
6342 mcp->mb[3] = LSW(req_dma);
6343 mcp->mb[4] = MSW(size);
6344 mcp->mb[5] = LSW(size);
6345 mcp->mb[6] = MSW(MSD(req_dma));
6346 mcp->mb[7] = LSW(MSD(req_dma));
6347 mcp->mb[8] = MSW(addr);
6348 /* Setting RAM ID to valid */
6349 /* For MCTP RAM ID is 0x40 */
6350 mcp->mb[10] = BIT_7 | 0x40;
6352 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6356 mcp->tov = MBX_TOV_SECONDS;
6358 rval = qla2x00_mailbox_command(vha, mcp);
6360 if (rval != QLA_SUCCESS) {
6361 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6362 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6364 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6365 "Done %s.\n", __func__);
6372 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6373 void *dd_buf, uint size, uint options)
6377 mbx_cmd_t *mcp = &mc;
6380 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6381 !IS_QLA28XX(vha->hw))
6382 return QLA_FUNCTION_FAILED;
6384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6385 "Entered %s.\n", __func__);
6387 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6388 dd_buf, size, DMA_FROM_DEVICE);
6389 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6390 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6391 return QLA_MEMORY_ALLOC_FAILED;
6394 memset(dd_buf, 0, size);
6396 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6397 mcp->mb[1] = options;
6398 mcp->mb[2] = MSW(LSD(dd_dma));
6399 mcp->mb[3] = LSW(LSD(dd_dma));
6400 mcp->mb[6] = MSW(MSD(dd_dma));
6401 mcp->mb[7] = LSW(MSD(dd_dma));
6403 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6404 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6405 mcp->buf_size = size;
6406 mcp->flags = MBX_DMA_IN;
6407 mcp->tov = MBX_TOV_SECONDS * 4;
6408 rval = qla2x00_mailbox_command(vha, mcp);
6410 if (rval != QLA_SUCCESS) {
6411 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6414 "Done %s.\n", __func__);
6417 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6418 size, DMA_FROM_DEVICE);
6423 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6425 sp->u.iocb_cmd.u.mbx.rc = res;
6427 complete(&sp->u.iocb_cmd.u.mbx.comp);
6428 /* don't free sp here. Let the caller do the free */
6432 * This mailbox uses the iocb interface to send MB command.
6433 * This allows non-critial (non chip setup) command to go
6436 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6438 int rval = QLA_FUNCTION_FAILED;
6442 if (!vha->hw->flags.fw_started)
6445 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6449 sp->type = SRB_MB_IOCB;
6450 sp->name = mb_to_str(mcp->mb[0]);
6452 c = &sp->u.iocb_cmd;
6453 c->timeout = qla2x00_async_iocb_timeout;
6454 init_completion(&c->u.mbx.comp);
6456 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6458 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6460 sp->done = qla2x00_async_mb_sp_done;
6462 rval = qla2x00_start_sp(sp);
6463 if (rval != QLA_SUCCESS) {
6464 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6465 "%s: %s Failed submission. %x.\n",
6466 __func__, sp->name, rval);
6470 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6471 sp->name, sp->handle);
6473 wait_for_completion(&c->u.mbx.comp);
6474 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6478 case QLA_FUNCTION_TIMEOUT:
6479 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6480 __func__, sp->name, rval);
6483 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6484 __func__, sp->name);
6487 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6488 __func__, sp->name, rval);
6500 * NOTE: Do not call this routine from DPC thread
6502 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6504 int rval = QLA_FUNCTION_FAILED;
6506 struct port_database_24xx *pd;
6507 struct qla_hw_data *ha = vha->hw;
6510 if (!vha->hw->flags.fw_started)
6513 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6515 ql_log(ql_log_warn, vha, 0xd047,
6516 "Failed to allocate port database structure.\n");
6520 memset(&mc, 0, sizeof(mc));
6521 mc.mb[0] = MBC_GET_PORT_DATABASE;
6522 mc.mb[1] = fcport->loop_id;
6523 mc.mb[2] = MSW(pd_dma);
6524 mc.mb[3] = LSW(pd_dma);
6525 mc.mb[6] = MSW(MSD(pd_dma));
6526 mc.mb[7] = LSW(MSD(pd_dma));
6527 mc.mb[9] = vha->vp_idx;
6530 rval = qla24xx_send_mb_cmd(vha, &mc);
6531 if (rval != QLA_SUCCESS) {
6532 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6533 "%s: %8phC fail\n", __func__, fcport->port_name);
6537 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6539 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6540 __func__, fcport->port_name);
6544 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6549 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6550 struct port_database_24xx *pd)
6552 int rval = QLA_SUCCESS;
6554 u8 current_login_state, last_login_state;
6556 if (NVME_TARGET(vha->hw, fcport)) {
6557 current_login_state = pd->current_login_state >> 4;
6558 last_login_state = pd->last_login_state >> 4;
6560 current_login_state = pd->current_login_state & 0xf;
6561 last_login_state = pd->last_login_state & 0xf;
6564 /* Check for logged in state. */
6565 if (current_login_state != PDS_PRLI_COMPLETE) {
6566 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6567 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6568 current_login_state, last_login_state, fcport->loop_id);
6569 rval = QLA_FUNCTION_FAILED;
6573 if (fcport->loop_id == FC_NO_LOOP_ID ||
6574 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6575 memcmp(fcport->port_name, pd->port_name, 8))) {
6576 /* We lost the device mid way. */
6577 rval = QLA_NOT_LOGGED_IN;
6581 /* Names are little-endian. */
6582 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6583 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6585 /* Get port_id of device. */
6586 fcport->d_id.b.domain = pd->port_id[0];
6587 fcport->d_id.b.area = pd->port_id[1];
6588 fcport->d_id.b.al_pa = pd->port_id[2];
6589 fcport->d_id.b.rsvd_1 = 0;
6591 if (NVME_TARGET(vha->hw, fcport)) {
6592 fcport->port_type = FCT_NVME;
6593 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6594 fcport->port_type |= FCT_NVME_INITIATOR;
6595 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6596 fcport->port_type |= FCT_NVME_TARGET;
6597 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6598 fcport->port_type |= FCT_NVME_DISCOVERY;
6600 /* If not target must be initiator or unknown type. */
6601 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6602 fcport->port_type = FCT_INITIATOR;
6604 fcport->port_type = FCT_TARGET;
6606 /* Passback COS information. */
6607 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6608 FC_COS_CLASS2 : FC_COS_CLASS3;
6610 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6611 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6612 fcport->conf_compl_supported = 1;
6620 * qla24xx_gidlist__wait
6621 * NOTE: don't call this routine from DPC thread.
6623 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6624 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6626 int rval = QLA_FUNCTION_FAILED;
6629 if (!vha->hw->flags.fw_started)
6632 memset(&mc, 0, sizeof(mc));
6633 mc.mb[0] = MBC_GET_ID_LIST;
6634 mc.mb[2] = MSW(id_list_dma);
6635 mc.mb[3] = LSW(id_list_dma);
6636 mc.mb[6] = MSW(MSD(id_list_dma));
6637 mc.mb[7] = LSW(MSD(id_list_dma));
6639 mc.mb[9] = vha->vp_idx;
6641 rval = qla24xx_send_mb_cmd(vha, &mc);
6642 if (rval != QLA_SUCCESS) {
6643 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6644 "%s: fail\n", __func__);
6646 *entries = mc.mb[1];
6647 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6648 "%s: done\n", __func__);
6654 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6658 mbx_cmd_t *mcp = &mc;
6660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6661 "Entered %s\n", __func__);
6663 memset(mcp->mb, 0 , sizeof(mcp->mb));
6664 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6667 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6668 mcp->in_mb = MBX_2 | MBX_0;
6669 mcp->tov = MBX_TOV_SECONDS;
6672 rval = qla2x00_mailbox_command(vha, mcp);
6674 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6675 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6680 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6684 mbx_cmd_t *mcp = &mc;
6686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6687 "Entered %s\n", __func__);
6689 memset(mcp->mb, 0, sizeof(mcp->mb));
6690 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6692 mcp->out_mb = MBX_1 | MBX_0;
6693 mcp->in_mb = MBX_2 | MBX_0;
6694 mcp->tov = MBX_TOV_SECONDS;
6697 rval = qla2x00_mailbox_command(vha, mcp);
6698 if (rval == QLA_SUCCESS)
6701 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6702 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6708 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6710 struct qla_hw_data *ha = vha->hw;
6711 uint16_t iter, addr, offset;
6712 dma_addr_t phys_addr;
6716 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6718 phys_addr = ha->sfp_data_dma;
6719 sfp_data = ha->sfp_data;
6722 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6724 /* Skip to next device address. */
6729 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6730 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6731 if (rval != QLA_SUCCESS) {
6732 ql_log(ql_log_warn, vha, 0x706d,
6733 "Unable to read SFP data (%x/%x/%x).\n", rval,
6739 if (buf && (c < count)) {
6742 if ((count - c) >= SFP_BLOCK_SIZE)
6743 sz = SFP_BLOCK_SIZE;
6747 memcpy(buf, sfp_data, sz);
6748 buf += SFP_BLOCK_SIZE;
6751 phys_addr += SFP_BLOCK_SIZE;
6752 sfp_data += SFP_BLOCK_SIZE;
6753 offset += SFP_BLOCK_SIZE;
6759 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6760 uint16_t *out_mb, int out_mb_sz)
6762 int rval = QLA_FUNCTION_FAILED;
6765 if (!vha->hw->flags.fw_started)
6768 memset(&mc, 0, sizeof(mc));
6769 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6771 rval = qla24xx_send_mb_cmd(vha, &mc);
6772 if (rval != QLA_SUCCESS) {
6773 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6774 "%s: fail\n", __func__);
6776 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6777 memcpy(out_mb, mc.mb, out_mb_sz);
6779 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6781 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6782 "%s: done\n", __func__);
6788 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6789 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6794 mbx_cmd_t *mcp = &mc;
6796 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6798 mcp->mb[2] = region;
6799 mcp->mb[3] = MSW(len);
6800 mcp->mb[4] = LSW(len);
6801 mcp->mb[5] = MSW(sfub_dma_addr);
6802 mcp->mb[6] = LSW(sfub_dma_addr);
6803 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6804 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6805 mcp->mb[9] = sfub_len;
6807 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6808 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6809 mcp->tov = MBX_TOV_SECONDS;
6811 rval = qla2x00_mailbox_command(vha, mcp);
6813 if (rval != QLA_SUCCESS) {
6814 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6815 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6822 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6827 mbx_cmd_t *mcp = &mc;
6829 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6830 "Entered %s.\n", __func__);
6832 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6833 mcp->mb[1] = LSW(addr);
6834 mcp->mb[2] = MSW(addr);
6835 mcp->mb[3] = LSW(data);
6836 mcp->mb[4] = MSW(data);
6837 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6838 mcp->in_mb = MBX_1|MBX_0;
6839 mcp->tov = MBX_TOV_SECONDS;
6841 rval = qla2x00_mailbox_command(vha, mcp);
6843 if (rval != QLA_SUCCESS) {
6844 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6845 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6847 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6848 "Done %s.\n", __func__);
6854 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6859 mbx_cmd_t *mcp = &mc;
6861 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6862 "Entered %s.\n", __func__);
6864 mcp->mb[0] = MBC_READ_REMOTE_REG;
6865 mcp->mb[1] = LSW(addr);
6866 mcp->mb[2] = MSW(addr);
6867 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6868 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6869 mcp->tov = MBX_TOV_SECONDS;
6871 rval = qla2x00_mailbox_command(vha, mcp);
6873 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6875 if (rval != QLA_SUCCESS) {
6876 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6877 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6879 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6880 "Done %s.\n", __func__);
6887 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6889 struct qla_hw_data *ha = vha->hw;
6891 mbx_cmd_t *mcp = &mc;
6894 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6895 return QLA_FUNCTION_FAILED;
6897 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6900 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6901 mcp->mb[1] = options;
6902 mcp->out_mb = MBX_1|MBX_0;
6903 mcp->in_mb = MBX_1|MBX_0;
6904 if (options & BIT_0) {
6905 if (options & BIT_1) {
6906 mcp->mb[2] = led[2];
6907 mcp->out_mb |= MBX_2;
6909 if (options & BIT_2) {
6910 mcp->mb[3] = led[0];
6911 mcp->out_mb |= MBX_3;
6913 if (options & BIT_3) {
6914 mcp->mb[4] = led[1];
6915 mcp->out_mb |= MBX_4;
6918 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6920 mcp->tov = MBX_TOV_SECONDS;
6922 rval = qla2x00_mailbox_command(vha, mcp);
6924 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6925 __func__, rval, mcp->mb[0], mcp->mb[1]);
6929 if (options & BIT_0) {
6930 ha->beacon_blink_led = 0;
6931 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6933 led[2] = mcp->mb[2];
6934 led[0] = mcp->mb[3];
6935 led[1] = mcp->mb[4];
6936 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6937 __func__, led[0], led[1], led[2]);
6944 * qla_no_op_mb(): This MB is used to check if FW is still alive and
6945 * able to generate an interrupt. Otherwise, a timeout will trigger
6947 * @vha: host adapter pointer
6950 void qla_no_op_mb(struct scsi_qla_host *vha)
6953 mbx_cmd_t *mcp = &mc;
6956 memset(&mc, 0, sizeof(mc));
6957 mcp->mb[0] = 0; // noop cmd= 0
6958 mcp->out_mb = MBX_0;
6962 rval = qla2x00_mailbox_command(vha, mcp);
6965 ql_dbg(ql_dbg_async, vha, 0x7071,
6966 "Failed %s %x\n", __func__, rval);