2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
23 static const char *mb_to_str(uint16_t cmd)
26 struct mb_cmd_name *e;
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
36 static struct rom_cmd {
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
64 static int is_rom_cmd(uint16_t cmd)
69 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
79 * qla2x00_mailbox_command
80 * Issue mailbox command and waits for completion.
83 * ha = adapter block pointer.
84 * mcp = driver internal mbx struct pointer.
87 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
90 * 0 : QLA_SUCCESS = cmd performed success
91 * 1 : QLA_FUNCTION_FAILED (error encountered)
92 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
98 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 unsigned long flags = 0;
103 uint8_t abort_active;
105 uint16_t command = 0;
107 uint16_t __iomem *optr;
110 unsigned long wait_time;
111 struct qla_hw_data *ha = vha->hw;
112 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
117 if (ha->pdev->error_state > pci_channel_io_frozen) {
118 ql_log(ql_log_warn, vha, 0x1001,
119 "error_state is greater than pci_channel_io_frozen, "
121 return QLA_FUNCTION_TIMEOUT;
124 if (vha->device_flags & DFLG_DEV_FAILED) {
125 ql_log(ql_log_warn, vha, 0x1002,
126 "Device in failed state, exiting.\n");
127 return QLA_FUNCTION_TIMEOUT;
130 /* if PCI error, then avoid mbx processing.*/
131 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
132 test_bit(UNLOADING, &base_vha->dpc_flags)) {
133 ql_log(ql_log_warn, vha, 0xd04e,
134 "PCI error, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
139 io_lock_on = base_vha->flags.init_done;
142 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 if (ha->flags.pci_channel_io_perm_failure) {
146 ql_log(ql_log_warn, vha, 0x1003,
147 "Perm failure on EEH timeout MBX, exiting.\n");
148 return QLA_FUNCTION_TIMEOUT;
151 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
152 /* Setting Link-Down error */
153 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
154 ql_log(ql_log_warn, vha, 0x1004,
155 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
156 return QLA_FUNCTION_TIMEOUT;
159 /* check if ISP abort is active and return cmd with timeout */
160 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
161 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
162 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
163 !is_rom_cmd(mcp->mb[0])) {
164 ql_log(ql_log_info, vha, 0x1005,
165 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
167 return QLA_FUNCTION_TIMEOUT;
171 * Wait for active mailbox commands to finish by waiting at most tov
172 * seconds. This is to serialize actual issuing of mailbox cmds during
173 * non ISP abort time.
175 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
176 /* Timeout occurred. Return error. */
177 ql_log(ql_log_warn, vha, 0xd035,
178 "Cmd access timeout, cmd=0x%x, Exiting.\n",
180 return QLA_FUNCTION_TIMEOUT;
183 ha->flags.mbox_busy = 1;
184 /* Save mailbox command for debug */
187 ql_dbg(ql_dbg_mbx, vha, 0x1006,
188 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
190 spin_lock_irqsave(&ha->hardware_lock, flags);
192 /* Load mailbox registers. */
194 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
195 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
196 optr = (uint16_t __iomem *)®->isp24.mailbox0;
198 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
201 command = mcp->mb[0];
202 mboxes = mcp->out_mb;
204 ql_dbg(ql_dbg_mbx, vha, 0x1111,
205 "Mailbox registers (OUT):\n");
206 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
207 if (IS_QLA2200(ha) && cnt == 8)
209 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
210 if (mboxes & BIT_0) {
211 ql_dbg(ql_dbg_mbx, vha, 0x1112,
212 "mbox[%d]<-0x%04x\n", cnt, *iptr);
213 WRT_REG_WORD(optr, *iptr);
221 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
222 "I/O Address = %p.\n", optr);
224 /* Issue set host interrupt command to send cmd out. */
225 ha->flags.mbox_int = 0;
226 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
228 /* Unlock mbx registers and wait for interrupt */
229 ql_dbg(ql_dbg_mbx, vha, 0x100f,
230 "Going to unlock irq & waiting for interrupts. "
231 "jiffies=%lx.\n", jiffies);
233 /* Wait for mbx cmd completion until timeout */
235 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
236 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
238 if (IS_P3P_TYPE(ha)) {
239 if (RD_REG_DWORD(®->isp82.hint) &
240 HINT_MBX_INT_PENDING) {
241 spin_unlock_irqrestore(&ha->hardware_lock,
243 ha->flags.mbox_busy = 0;
244 ql_dbg(ql_dbg_mbx, vha, 0x1010,
245 "Pending mailbox timeout, exiting.\n");
246 rval = QLA_FUNCTION_TIMEOUT;
249 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
250 } else if (IS_FWI2_CAPABLE(ha))
251 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
253 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
254 spin_unlock_irqrestore(&ha->hardware_lock, flags);
257 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
259 ql_dbg(ql_dbg_mbx, vha, 0x117a,
260 "cmd=%x Timeout.\n", command);
261 spin_lock_irqsave(&ha->hardware_lock, flags);
262 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
265 if (time_after(jiffies, wait_time + 5 * HZ))
266 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
267 command, jiffies_to_msecs(jiffies - wait_time));
269 ql_dbg(ql_dbg_mbx, vha, 0x1011,
270 "Cmd=%x Polling Mode.\n", command);
272 if (IS_P3P_TYPE(ha)) {
273 if (RD_REG_DWORD(®->isp82.hint) &
274 HINT_MBX_INT_PENDING) {
275 spin_unlock_irqrestore(&ha->hardware_lock,
277 ha->flags.mbox_busy = 0;
278 ql_dbg(ql_dbg_mbx, vha, 0x1012,
279 "Pending mailbox timeout, exiting.\n");
280 rval = QLA_FUNCTION_TIMEOUT;
283 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
284 } else if (IS_FWI2_CAPABLE(ha))
285 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
287 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
288 spin_unlock_irqrestore(&ha->hardware_lock, flags);
290 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
291 while (!ha->flags.mbox_int) {
292 if (time_after(jiffies, wait_time))
296 * Check if it's UNLOADING, cause we cannot poll in
297 * this case, or else a NULL pointer dereference
300 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
301 return QLA_FUNCTION_TIMEOUT;
303 /* Check for pending interrupts. */
304 qla2x00_poll(ha->rsp_q_map[0]);
306 if (!ha->flags.mbox_int &&
308 command == MBC_LOAD_RISC_RAM_EXTENDED))
311 ql_dbg(ql_dbg_mbx, vha, 0x1013,
313 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
316 /* Check whether we timed out */
317 if (ha->flags.mbox_int) {
320 ql_dbg(ql_dbg_mbx, vha, 0x1014,
321 "Cmd=%x completed.\n", command);
323 /* Got interrupt. Clear the flag. */
324 ha->flags.mbox_int = 0;
325 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
327 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
328 ha->flags.mbox_busy = 0;
329 /* Setting Link-Down error */
330 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
332 rval = QLA_FUNCTION_FAILED;
333 ql_log(ql_log_warn, vha, 0xd048,
334 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
338 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
339 rval = QLA_FUNCTION_FAILED;
341 /* Load return mailbox registers. */
343 iptr = (uint16_t *)&ha->mailbox_out[0];
346 ql_dbg(ql_dbg_mbx, vha, 0x1113,
347 "Mailbox registers (IN):\n");
348 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
349 if (mboxes & BIT_0) {
351 ql_dbg(ql_dbg_mbx, vha, 0x1114,
352 "mbox[%d]->0x%04x\n", cnt, *iptr2);
362 uint32_t ictrl, host_status, hccr;
365 if (IS_FWI2_CAPABLE(ha)) {
366 mb[0] = RD_REG_WORD(®->isp24.mailbox0);
367 mb[1] = RD_REG_WORD(®->isp24.mailbox1);
368 mb[2] = RD_REG_WORD(®->isp24.mailbox2);
369 mb[3] = RD_REG_WORD(®->isp24.mailbox3);
370 mb[7] = RD_REG_WORD(®->isp24.mailbox7);
371 ictrl = RD_REG_DWORD(®->isp24.ictrl);
372 host_status = RD_REG_DWORD(®->isp24.host_status);
373 hccr = RD_REG_DWORD(®->isp24.hccr);
375 ql_log(ql_log_warn, vha, 0xd04c,
376 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
377 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
378 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
379 mb[7], host_status, hccr);
382 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
383 ictrl = RD_REG_WORD(®->isp.ictrl);
384 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
385 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
386 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
388 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
390 /* Capture FW dump only, if PCI device active */
391 if (!pci_channel_offline(vha->hw->pdev)) {
392 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
393 if (w == 0xffff || ictrl == 0xffffffff) {
394 /* This is special case if there is unload
395 * of driver happening and if PCI device go
396 * into bad state due to PCI error condition
397 * then only PCI ERR flag would be set.
398 * we will do premature exit for above case.
400 ha->flags.mbox_busy = 0;
401 rval = QLA_FUNCTION_TIMEOUT;
405 /* Attempt to capture firmware dump for further
406 * anallysis of the current formware state. we do not
407 * need to do this if we are intentionally generating
410 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
411 ha->isp_ops->fw_dump(vha, 0);
412 rval = QLA_FUNCTION_TIMEOUT;
416 ha->flags.mbox_busy = 0;
421 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
422 ql_dbg(ql_dbg_mbx, vha, 0x101a,
423 "Checking for additional resp interrupt.\n");
425 /* polling mode for non isp_abort commands. */
426 qla2x00_poll(ha->rsp_q_map[0]);
429 if (rval == QLA_FUNCTION_TIMEOUT &&
430 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
431 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
432 ha->flags.eeh_busy) {
433 /* not in dpc. schedule it for dpc to take over. */
434 ql_dbg(ql_dbg_mbx, vha, 0x101b,
435 "Timeout, schedule isp_abort_needed.\n");
437 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
438 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
439 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
440 if (IS_QLA82XX(ha)) {
441 ql_dbg(ql_dbg_mbx, vha, 0x112a,
442 "disabling pause transmit on port "
445 QLA82XX_CRB_NIU + 0x98,
446 CRB_NIU_XG_PAUSE_CTL_P0|
447 CRB_NIU_XG_PAUSE_CTL_P1);
449 ql_log(ql_log_info, base_vha, 0x101c,
450 "Mailbox cmd timeout occurred, cmd=0x%x, "
451 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
452 "abort.\n", command, mcp->mb[0],
454 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
455 qla2xxx_wake_dpc(vha);
457 } else if (!abort_active) {
458 /* call abort directly since we are in the DPC thread */
459 ql_dbg(ql_dbg_mbx, vha, 0x101d,
460 "Timeout, calling abort_isp.\n");
462 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
463 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
464 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
465 if (IS_QLA82XX(ha)) {
466 ql_dbg(ql_dbg_mbx, vha, 0x112b,
467 "disabling pause transmit on port "
470 QLA82XX_CRB_NIU + 0x98,
471 CRB_NIU_XG_PAUSE_CTL_P0|
472 CRB_NIU_XG_PAUSE_CTL_P1);
474 ql_log(ql_log_info, base_vha, 0x101e,
475 "Mailbox cmd timeout occurred, cmd=0x%x, "
476 "mb[0]=0x%x. Scheduling ISP abort ",
477 command, mcp->mb[0]);
478 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
479 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
480 /* Allow next mbx cmd to come in. */
481 complete(&ha->mbx_cmd_comp);
482 if (ha->isp_ops->abort_isp(vha)) {
483 /* Failed. retry later. */
484 set_bit(ISP_ABORT_NEEDED,
487 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
488 ql_dbg(ql_dbg_mbx, vha, 0x101f,
489 "Finished abort_isp.\n");
496 /* Allow next mbx cmd to come in. */
497 complete(&ha->mbx_cmd_comp);
501 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
502 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
503 dev_name(&ha->pdev->dev), 0x1020+0x800,
507 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
508 if (mboxes & BIT_0) {
509 printk(" mb[%u]=%x", i, mcp->mb[i]);
512 pr_warn(" cmd=%x ****\n", command);
514 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
515 ql_dbg(ql_dbg_mbx, vha, 0x1198,
516 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
517 RD_REG_DWORD(®->isp24.host_status),
518 RD_REG_DWORD(®->isp24.ictrl),
519 RD_REG_DWORD(®->isp24.istatus));
521 ql_dbg(ql_dbg_mbx, vha, 0x1206,
522 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
523 RD_REG_WORD(®->isp.ctrl_status),
524 RD_REG_WORD(®->isp.ictrl),
525 RD_REG_WORD(®->isp.istatus));
528 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
535 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
536 uint32_t risc_code_size)
539 struct qla_hw_data *ha = vha->hw;
541 mbx_cmd_t *mcp = &mc;
543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
544 "Entered %s.\n", __func__);
546 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
547 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
548 mcp->mb[8] = MSW(risc_addr);
549 mcp->out_mb = MBX_8|MBX_0;
551 mcp->mb[0] = MBC_LOAD_RISC_RAM;
554 mcp->mb[1] = LSW(risc_addr);
555 mcp->mb[2] = MSW(req_dma);
556 mcp->mb[3] = LSW(req_dma);
557 mcp->mb[6] = MSW(MSD(req_dma));
558 mcp->mb[7] = LSW(MSD(req_dma));
559 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
560 if (IS_FWI2_CAPABLE(ha)) {
561 mcp->mb[4] = MSW(risc_code_size);
562 mcp->mb[5] = LSW(risc_code_size);
563 mcp->out_mb |= MBX_5|MBX_4;
565 mcp->mb[4] = LSW(risc_code_size);
566 mcp->out_mb |= MBX_4;
570 mcp->tov = MBX_TOV_SECONDS;
572 rval = qla2x00_mailbox_command(vha, mcp);
574 if (rval != QLA_SUCCESS) {
575 ql_dbg(ql_dbg_mbx, vha, 0x1023,
576 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
579 "Done %s.\n", __func__);
585 #define EXTENDED_BB_CREDITS BIT_0
586 #define NVME_ENABLE_FLAG BIT_3
587 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
589 uint16_t mb4 = BIT_0;
591 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
592 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
597 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
599 uint16_t mb4 = BIT_0;
601 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
602 struct nvram_81xx *nv = ha->nvram;
604 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
612 * Start adapter firmware.
615 * ha = adapter block pointer.
616 * TARGET_QUEUE_LOCK must be released.
617 * ADAPTER_STATE_LOCK must be released.
620 * qla2x00 local function return status code.
626 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
629 struct qla_hw_data *ha = vha->hw;
631 mbx_cmd_t *mcp = &mc;
633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
634 "Entered %s.\n", __func__);
636 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
639 if (IS_FWI2_CAPABLE(ha)) {
640 mcp->mb[1] = MSW(risc_addr);
641 mcp->mb[2] = LSW(risc_addr);
644 ha->flags.using_lr_setting = 0;
645 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
647 if (ql2xautodetectsfp) {
648 if (ha->flags.detected_lr_sfp) {
650 qla25xx_set_sfp_lr_dist(ha);
651 ha->flags.using_lr_setting = 1;
654 struct nvram_81xx *nv = ha->nvram;
655 /* set LR distance if specified in nvram */
656 if (nv->enhanced_features &
657 NEF_LR_DIST_ENABLE) {
659 qla25xx_set_nvr_lr_dist(ha);
660 ha->flags.using_lr_setting = 1;
665 if (ql2xnvmeenable && IS_QLA27XX(ha))
666 mcp->mb[4] |= NVME_ENABLE_FLAG;
668 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
669 struct nvram_81xx *nv = ha->nvram;
670 /* set minimum speed if specified in nvram */
671 if (nv->min_link_speed >= 2 &&
672 nv->min_link_speed <= 5) {
674 mcp->mb[11] = nv->min_link_speed;
675 mcp->out_mb |= MBX_11;
677 vha->min_link_speed_feat = nv->min_link_speed;
681 if (ha->flags.exlogins_enabled)
682 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
684 if (ha->flags.exchoffld_enabled)
685 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
687 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
688 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
690 mcp->mb[1] = LSW(risc_addr);
691 mcp->out_mb |= MBX_1;
692 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
694 mcp->out_mb |= MBX_2;
698 mcp->tov = MBX_TOV_SECONDS;
700 rval = qla2x00_mailbox_command(vha, mcp);
702 if (rval != QLA_SUCCESS) {
703 ql_dbg(ql_dbg_mbx, vha, 0x1026,
704 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
706 if (IS_FWI2_CAPABLE(ha)) {
707 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
708 ql_dbg(ql_dbg_mbx, vha, 0x119a,
709 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
710 ql_dbg(ql_dbg_mbx, vha, 0x1027,
711 "exchanges=%x.\n", mcp->mb[1]);
712 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
713 ha->max_speed_sup = mcp->mb[2] & BIT_0;
714 ql_dbg(ql_dbg_mbx, vha, 0x119b,
715 "Maximum speed supported=%s.\n",
716 ha->max_speed_sup ? "32Gps" : "16Gps");
717 if (vha->min_link_speed_feat) {
718 ha->min_link_speed = mcp->mb[5];
719 ql_dbg(ql_dbg_mbx, vha, 0x119c,
720 "Minimum speed set=%s.\n",
721 mcp->mb[5] == 5 ? "32Gps" :
722 mcp->mb[5] == 4 ? "16Gps" :
723 mcp->mb[5] == 3 ? "8Gps" :
724 mcp->mb[5] == 2 ? "4Gps" :
729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
737 * qla_get_exlogin_status
738 * Get extended login status
739 * uses the memory offload control/status Mailbox
742 * ha: adapter state pointer.
743 * fwopt: firmware options
746 * qla2x00 local function status
751 #define FETCH_XLOGINS_STAT 0x8
753 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
754 uint16_t *ex_logins_cnt)
758 mbx_cmd_t *mcp = &mc;
760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
761 "Entered %s\n", __func__);
763 memset(mcp->mb, 0 , sizeof(mcp->mb));
764 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
765 mcp->mb[1] = FETCH_XLOGINS_STAT;
766 mcp->out_mb = MBX_1|MBX_0;
767 mcp->in_mb = MBX_10|MBX_4|MBX_0;
768 mcp->tov = MBX_TOV_SECONDS;
771 rval = qla2x00_mailbox_command(vha, mcp);
772 if (rval != QLA_SUCCESS) {
773 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
775 *buf_sz = mcp->mb[4];
776 *ex_logins_cnt = mcp->mb[10];
778 ql_log(ql_log_info, vha, 0x1190,
779 "buffer size 0x%x, exchange login count=%d\n",
780 mcp->mb[4], mcp->mb[10]);
782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
783 "Done %s.\n", __func__);
790 * qla_set_exlogin_mem_cfg
791 * set extended login memory configuration
792 * Mbx needs to be issues before init_cb is set
795 * ha: adapter state pointer.
796 * buffer: buffer pointer
797 * phys_addr: physical address of buffer
798 * size: size of buffer
799 * TARGET_QUEUE_LOCK must be released
800 * ADAPTER_STATE_LOCK must be release
803 * qla2x00 local funxtion status code.
808 #define CONFIG_XLOGINS_MEM 0x3
810 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
814 mbx_cmd_t *mcp = &mc;
815 struct qla_hw_data *ha = vha->hw;
817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
818 "Entered %s.\n", __func__);
820 memset(mcp->mb, 0 , sizeof(mcp->mb));
821 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
822 mcp->mb[1] = CONFIG_XLOGINS_MEM;
823 mcp->mb[2] = MSW(phys_addr);
824 mcp->mb[3] = LSW(phys_addr);
825 mcp->mb[6] = MSW(MSD(phys_addr));
826 mcp->mb[7] = LSW(MSD(phys_addr));
827 mcp->mb[8] = MSW(ha->exlogin_size);
828 mcp->mb[9] = LSW(ha->exlogin_size);
829 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
830 mcp->in_mb = MBX_11|MBX_0;
831 mcp->tov = MBX_TOV_SECONDS;
833 rval = qla2x00_mailbox_command(vha, mcp);
834 if (rval != QLA_SUCCESS) {
836 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
838 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
839 "Done %s.\n", __func__);
846 * qla_get_exchoffld_status
847 * Get exchange offload status
848 * uses the memory offload control/status Mailbox
851 * ha: adapter state pointer.
852 * fwopt: firmware options
855 * qla2x00 local function status
860 #define FETCH_XCHOFFLD_STAT 0x2
862 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
863 uint16_t *ex_logins_cnt)
867 mbx_cmd_t *mcp = &mc;
869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
870 "Entered %s\n", __func__);
872 memset(mcp->mb, 0 , sizeof(mcp->mb));
873 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
874 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
875 mcp->out_mb = MBX_1|MBX_0;
876 mcp->in_mb = MBX_10|MBX_4|MBX_0;
877 mcp->tov = MBX_TOV_SECONDS;
880 rval = qla2x00_mailbox_command(vha, mcp);
881 if (rval != QLA_SUCCESS) {
882 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
884 *buf_sz = mcp->mb[4];
885 *ex_logins_cnt = mcp->mb[10];
887 ql_log(ql_log_info, vha, 0x118e,
888 "buffer size 0x%x, exchange offload count=%d\n",
889 mcp->mb[4], mcp->mb[10]);
891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
892 "Done %s.\n", __func__);
899 * qla_set_exchoffld_mem_cfg
900 * Set exchange offload memory configuration
901 * Mbx needs to be issues before init_cb is set
904 * ha: adapter state pointer.
905 * buffer: buffer pointer
906 * phys_addr: physical address of buffer
907 * size: size of buffer
908 * TARGET_QUEUE_LOCK must be released
909 * ADAPTER_STATE_LOCK must be release
912 * qla2x00 local funxtion status code.
917 #define CONFIG_XCHOFFLD_MEM 0x3
919 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
923 mbx_cmd_t *mcp = &mc;
924 struct qla_hw_data *ha = vha->hw;
926 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
927 "Entered %s.\n", __func__);
929 memset(mcp->mb, 0 , sizeof(mcp->mb));
930 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
931 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
932 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
933 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
934 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
935 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
936 mcp->mb[8] = MSW(ha->exchoffld_size);
937 mcp->mb[9] = LSW(ha->exchoffld_size);
938 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
939 mcp->in_mb = MBX_11|MBX_0;
940 mcp->tov = MBX_TOV_SECONDS;
942 rval = qla2x00_mailbox_command(vha, mcp);
943 if (rval != QLA_SUCCESS) {
945 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
947 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
948 "Done %s.\n", __func__);
955 * qla2x00_get_fw_version
956 * Get firmware version.
959 * ha: adapter state pointer.
960 * major: pointer for major number.
961 * minor: pointer for minor number.
962 * subminor: pointer for subminor number.
965 * qla2x00 local function return status code.
971 qla2x00_get_fw_version(scsi_qla_host_t *vha)
975 mbx_cmd_t *mcp = &mc;
976 struct qla_hw_data *ha = vha->hw;
978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
979 "Entered %s.\n", __func__);
981 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
983 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
984 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
985 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
986 if (IS_FWI2_CAPABLE(ha))
987 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
990 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
991 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
994 mcp->tov = MBX_TOV_SECONDS;
995 rval = qla2x00_mailbox_command(vha, mcp);
996 if (rval != QLA_SUCCESS)
999 /* Return mailbox data. */
1000 ha->fw_major_version = mcp->mb[1];
1001 ha->fw_minor_version = mcp->mb[2];
1002 ha->fw_subminor_version = mcp->mb[3];
1003 ha->fw_attributes = mcp->mb[6];
1004 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1005 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1007 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1009 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1010 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1011 ha->mpi_version[1] = mcp->mb[11] >> 8;
1012 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1013 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1014 ha->phy_version[0] = mcp->mb[8] & 0xff;
1015 ha->phy_version[1] = mcp->mb[9] >> 8;
1016 ha->phy_version[2] = mcp->mb[9] & 0xff;
1019 if (IS_FWI2_CAPABLE(ha)) {
1020 ha->fw_attributes_h = mcp->mb[15];
1021 ha->fw_attributes_ext[0] = mcp->mb[16];
1022 ha->fw_attributes_ext[1] = mcp->mb[17];
1023 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1024 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1025 __func__, mcp->mb[15], mcp->mb[6]);
1026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1027 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1028 __func__, mcp->mb[17], mcp->mb[16]);
1030 if (ha->fw_attributes_h & 0x4)
1031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1032 "%s: Firmware supports Extended Login 0x%x\n",
1033 __func__, ha->fw_attributes_h);
1035 if (ha->fw_attributes_h & 0x8)
1036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1037 "%s: Firmware supports Exchange Offload 0x%x\n",
1038 __func__, ha->fw_attributes_h);
1041 * FW supports nvme and driver load parameter requested nvme.
1042 * BIT 26 of fw_attributes indicates NVMe support.
1044 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
1045 vha->flags.nvme_enabled = 1;
1046 ql_log(ql_log_info, vha, 0xd302,
1047 "%s: FC-NVMe is Enabled (0x%x)\n",
1048 __func__, ha->fw_attributes_h);
1052 if (IS_QLA27XX(ha)) {
1053 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1054 ha->mpi_version[1] = mcp->mb[11] >> 8;
1055 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1056 ha->pep_version[0] = mcp->mb[13] & 0xff;
1057 ha->pep_version[1] = mcp->mb[14] >> 8;
1058 ha->pep_version[2] = mcp->mb[14] & 0xff;
1059 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1060 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1061 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1062 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1066 if (rval != QLA_SUCCESS) {
1068 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1072 "Done %s.\n", __func__);
1078 * qla2x00_get_fw_options
1079 * Set firmware options.
1082 * ha = adapter block pointer.
1083 * fwopt = pointer for firmware options.
1086 * qla2x00 local function return status code.
1092 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1096 mbx_cmd_t *mcp = &mc;
1098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1099 "Entered %s.\n", __func__);
1101 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1102 mcp->out_mb = MBX_0;
1103 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1104 mcp->tov = MBX_TOV_SECONDS;
1106 rval = qla2x00_mailbox_command(vha, mcp);
1108 if (rval != QLA_SUCCESS) {
1110 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1112 fwopts[0] = mcp->mb[0];
1113 fwopts[1] = mcp->mb[1];
1114 fwopts[2] = mcp->mb[2];
1115 fwopts[3] = mcp->mb[3];
1117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1118 "Done %s.\n", __func__);
1126 * qla2x00_set_fw_options
1127 * Set firmware options.
1130 * ha = adapter block pointer.
1131 * fwopt = pointer for firmware options.
1134 * qla2x00 local function return status code.
1140 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1144 mbx_cmd_t *mcp = &mc;
1146 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1147 "Entered %s.\n", __func__);
1149 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1150 mcp->mb[1] = fwopts[1];
1151 mcp->mb[2] = fwopts[2];
1152 mcp->mb[3] = fwopts[3];
1153 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1155 if (IS_FWI2_CAPABLE(vha->hw)) {
1156 mcp->in_mb |= MBX_1;
1157 mcp->mb[10] = fwopts[10];
1158 mcp->out_mb |= MBX_10;
1160 mcp->mb[10] = fwopts[10];
1161 mcp->mb[11] = fwopts[11];
1162 mcp->mb[12] = 0; /* Undocumented, but used */
1163 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1165 mcp->tov = MBX_TOV_SECONDS;
1167 rval = qla2x00_mailbox_command(vha, mcp);
1169 fwopts[0] = mcp->mb[0];
1171 if (rval != QLA_SUCCESS) {
1173 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1174 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1178 "Done %s.\n", __func__);
1185 * qla2x00_mbx_reg_test
1186 * Mailbox register wrap test.
1189 * ha = adapter block pointer.
1190 * TARGET_QUEUE_LOCK must be released.
1191 * ADAPTER_STATE_LOCK must be released.
1194 * qla2x00 local function return status code.
1200 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1204 mbx_cmd_t *mcp = &mc;
1206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1207 "Entered %s.\n", __func__);
1209 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1210 mcp->mb[1] = 0xAAAA;
1211 mcp->mb[2] = 0x5555;
1212 mcp->mb[3] = 0xAA55;
1213 mcp->mb[4] = 0x55AA;
1214 mcp->mb[5] = 0xA5A5;
1215 mcp->mb[6] = 0x5A5A;
1216 mcp->mb[7] = 0x2525;
1217 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1218 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1219 mcp->tov = MBX_TOV_SECONDS;
1221 rval = qla2x00_mailbox_command(vha, mcp);
1223 if (rval == QLA_SUCCESS) {
1224 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1225 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1226 rval = QLA_FUNCTION_FAILED;
1227 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1228 mcp->mb[7] != 0x2525)
1229 rval = QLA_FUNCTION_FAILED;
1232 if (rval != QLA_SUCCESS) {
1234 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1238 "Done %s.\n", __func__);
1245 * qla2x00_verify_checksum
1246 * Verify firmware checksum.
1249 * ha = adapter block pointer.
1250 * TARGET_QUEUE_LOCK must be released.
1251 * ADAPTER_STATE_LOCK must be released.
1254 * qla2x00 local function return status code.
1260 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1264 mbx_cmd_t *mcp = &mc;
1266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1267 "Entered %s.\n", __func__);
1269 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1270 mcp->out_mb = MBX_0;
1272 if (IS_FWI2_CAPABLE(vha->hw)) {
1273 mcp->mb[1] = MSW(risc_addr);
1274 mcp->mb[2] = LSW(risc_addr);
1275 mcp->out_mb |= MBX_2|MBX_1;
1276 mcp->in_mb |= MBX_2|MBX_1;
1278 mcp->mb[1] = LSW(risc_addr);
1279 mcp->out_mb |= MBX_1;
1280 mcp->in_mb |= MBX_1;
1283 mcp->tov = MBX_TOV_SECONDS;
1285 rval = qla2x00_mailbox_command(vha, mcp);
1287 if (rval != QLA_SUCCESS) {
1288 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1289 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1290 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1293 "Done %s.\n", __func__);
1300 * qla2x00_issue_iocb
1301 * Issue IOCB using mailbox command
1304 * ha = adapter state pointer.
1305 * buffer = buffer pointer.
1306 * phys_addr = physical address of buffer.
1307 * size = size of buffer.
1308 * TARGET_QUEUE_LOCK must be released.
1309 * ADAPTER_STATE_LOCK must be released.
1312 * qla2x00 local function return status code.
1318 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1319 dma_addr_t phys_addr, size_t size, uint32_t tov)
1323 mbx_cmd_t *mcp = &mc;
1325 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1326 "Entered %s.\n", __func__);
1328 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1330 mcp->mb[2] = MSW(phys_addr);
1331 mcp->mb[3] = LSW(phys_addr);
1332 mcp->mb[6] = MSW(MSD(phys_addr));
1333 mcp->mb[7] = LSW(MSD(phys_addr));
1334 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1335 mcp->in_mb = MBX_2|MBX_0;
1338 rval = qla2x00_mailbox_command(vha, mcp);
1340 if (rval != QLA_SUCCESS) {
1342 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1344 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1346 /* Mask reserved bits. */
1347 sts_entry->entry_status &=
1348 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1350 "Done %s.\n", __func__);
1357 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1360 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1365 * qla2x00_abort_command
1366 * Abort command aborts a specified IOCB.
1369 * ha = adapter block pointer.
1370 * sp = SB structure pointer.
1373 * qla2x00 local function return status code.
1379 qla2x00_abort_command(srb_t *sp)
1381 unsigned long flags = 0;
1383 uint32_t handle = 0;
1385 mbx_cmd_t *mcp = &mc;
1386 fc_port_t *fcport = sp->fcport;
1387 scsi_qla_host_t *vha = fcport->vha;
1388 struct qla_hw_data *ha = vha->hw;
1389 struct req_que *req;
1390 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1392 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1393 "Entered %s.\n", __func__);
1395 if (vha->flags.qpairs_available && sp->qpair)
1396 req = sp->qpair->req;
1400 spin_lock_irqsave(&ha->hardware_lock, flags);
1401 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1402 if (req->outstanding_cmds[handle] == sp)
1405 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1407 if (handle == req->num_outstanding_cmds) {
1408 /* command not found */
1409 return QLA_FUNCTION_FAILED;
1412 mcp->mb[0] = MBC_ABORT_COMMAND;
1413 if (HAS_EXTENDED_IDS(ha))
1414 mcp->mb[1] = fcport->loop_id;
1416 mcp->mb[1] = fcport->loop_id << 8;
1417 mcp->mb[2] = (uint16_t)handle;
1418 mcp->mb[3] = (uint16_t)(handle >> 16);
1419 mcp->mb[6] = (uint16_t)cmd->device->lun;
1420 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1422 mcp->tov = MBX_TOV_SECONDS;
1424 rval = qla2x00_mailbox_command(vha, mcp);
1426 if (rval != QLA_SUCCESS) {
1427 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1430 "Done %s.\n", __func__);
1437 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1441 mbx_cmd_t *mcp = &mc;
1442 scsi_qla_host_t *vha;
1443 struct req_que *req;
1444 struct rsp_que *rsp;
1449 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1450 "Entered %s.\n", __func__);
1452 req = vha->hw->req_q_map[0];
1454 mcp->mb[0] = MBC_ABORT_TARGET;
1455 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1456 if (HAS_EXTENDED_IDS(vha->hw)) {
1457 mcp->mb[1] = fcport->loop_id;
1459 mcp->out_mb |= MBX_10;
1461 mcp->mb[1] = fcport->loop_id << 8;
1463 mcp->mb[2] = vha->hw->loop_reset_delay;
1464 mcp->mb[9] = vha->vp_idx;
1467 mcp->tov = MBX_TOV_SECONDS;
1469 rval = qla2x00_mailbox_command(vha, mcp);
1470 if (rval != QLA_SUCCESS) {
1471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1472 "Failed=%x.\n", rval);
1475 /* Issue marker IOCB. */
1476 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1478 if (rval2 != QLA_SUCCESS) {
1479 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1480 "Failed to issue marker IOCB (%x).\n", rval2);
1482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1483 "Done %s.\n", __func__);
1490 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1494 mbx_cmd_t *mcp = &mc;
1495 scsi_qla_host_t *vha;
1496 struct req_que *req;
1497 struct rsp_que *rsp;
1501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1502 "Entered %s.\n", __func__);
1504 req = vha->hw->req_q_map[0];
1506 mcp->mb[0] = MBC_LUN_RESET;
1507 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1508 if (HAS_EXTENDED_IDS(vha->hw))
1509 mcp->mb[1] = fcport->loop_id;
1511 mcp->mb[1] = fcport->loop_id << 8;
1512 mcp->mb[2] = (u32)l;
1514 mcp->mb[9] = vha->vp_idx;
1517 mcp->tov = MBX_TOV_SECONDS;
1519 rval = qla2x00_mailbox_command(vha, mcp);
1520 if (rval != QLA_SUCCESS) {
1521 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1524 /* Issue marker IOCB. */
1525 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1527 if (rval2 != QLA_SUCCESS) {
1528 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1529 "Failed to issue marker IOCB (%x).\n", rval2);
1531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1532 "Done %s.\n", __func__);
1539 * qla2x00_get_adapter_id
1540 * Get adapter ID and topology.
1543 * ha = adapter block pointer.
1544 * id = pointer for loop ID.
1545 * al_pa = pointer for AL_PA.
1546 * area = pointer for area.
1547 * domain = pointer for domain.
1548 * top = pointer for topology.
1549 * TARGET_QUEUE_LOCK must be released.
1550 * ADAPTER_STATE_LOCK must be released.
1553 * qla2x00 local function return status code.
1559 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1560 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1564 mbx_cmd_t *mcp = &mc;
1566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1567 "Entered %s.\n", __func__);
1569 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1570 mcp->mb[9] = vha->vp_idx;
1571 mcp->out_mb = MBX_9|MBX_0;
1572 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1573 if (IS_CNA_CAPABLE(vha->hw))
1574 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1575 if (IS_FWI2_CAPABLE(vha->hw))
1576 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1577 if (IS_QLA27XX(vha->hw))
1578 mcp->in_mb |= MBX_15;
1579 mcp->tov = MBX_TOV_SECONDS;
1581 rval = qla2x00_mailbox_command(vha, mcp);
1582 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1583 rval = QLA_COMMAND_ERROR;
1584 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1585 rval = QLA_INVALID_COMMAND;
1589 *al_pa = LSB(mcp->mb[2]);
1590 *area = MSB(mcp->mb[2]);
1591 *domain = LSB(mcp->mb[3]);
1593 *sw_cap = mcp->mb[7];
1595 if (rval != QLA_SUCCESS) {
1597 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1600 "Done %s.\n", __func__);
1602 if (IS_CNA_CAPABLE(vha->hw)) {
1603 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1604 vha->fcoe_fcf_idx = mcp->mb[10];
1605 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1606 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1607 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1608 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1609 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1610 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1612 /* If FA-WWN supported */
1613 if (IS_FAWWN_CAPABLE(vha->hw)) {
1614 if (mcp->mb[7] & BIT_14) {
1615 vha->port_name[0] = MSB(mcp->mb[16]);
1616 vha->port_name[1] = LSB(mcp->mb[16]);
1617 vha->port_name[2] = MSB(mcp->mb[17]);
1618 vha->port_name[3] = LSB(mcp->mb[17]);
1619 vha->port_name[4] = MSB(mcp->mb[18]);
1620 vha->port_name[5] = LSB(mcp->mb[18]);
1621 vha->port_name[6] = MSB(mcp->mb[19]);
1622 vha->port_name[7] = LSB(mcp->mb[19]);
1623 fc_host_port_name(vha->host) =
1624 wwn_to_u64(vha->port_name);
1625 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1626 "FA-WWN acquired %016llx\n",
1627 wwn_to_u64(vha->port_name));
1631 if (IS_QLA27XX(vha->hw))
1632 vha->bbcr = mcp->mb[15];
1639 * qla2x00_get_retry_cnt
1640 * Get current firmware login retry count and delay.
1643 * ha = adapter block pointer.
1644 * retry_cnt = pointer to login retry count.
1645 * tov = pointer to login timeout value.
1648 * qla2x00 local function return status code.
1654 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1660 mbx_cmd_t *mcp = &mc;
1662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1663 "Entered %s.\n", __func__);
1665 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1666 mcp->out_mb = MBX_0;
1667 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1668 mcp->tov = MBX_TOV_SECONDS;
1670 rval = qla2x00_mailbox_command(vha, mcp);
1672 if (rval != QLA_SUCCESS) {
1674 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1675 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1677 /* Convert returned data and check our values. */
1678 *r_a_tov = mcp->mb[3] / 2;
1679 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1680 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1681 /* Update to the larger values */
1682 *retry_cnt = (uint8_t)mcp->mb[1];
1686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1687 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1694 * qla2x00_init_firmware
1695 * Initialize adapter firmware.
1698 * ha = adapter block pointer.
1699 * dptr = Initialization control block pointer.
1700 * size = size of initialization control block.
1701 * TARGET_QUEUE_LOCK must be released.
1702 * ADAPTER_STATE_LOCK must be released.
1705 * qla2x00 local function return status code.
1711 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1715 mbx_cmd_t *mcp = &mc;
1716 struct qla_hw_data *ha = vha->hw;
1718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1719 "Entered %s.\n", __func__);
1721 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1722 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1723 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1725 if (ha->flags.npiv_supported)
1726 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1728 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1731 mcp->mb[2] = MSW(ha->init_cb_dma);
1732 mcp->mb[3] = LSW(ha->init_cb_dma);
1733 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1734 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1735 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1736 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1738 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1739 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1740 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1741 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1742 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1743 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1745 /* 1 and 2 should normally be captured. */
1746 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1747 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1748 /* mb3 is additional info about the installed SFP. */
1749 mcp->in_mb |= MBX_3;
1750 mcp->buf_size = size;
1751 mcp->flags = MBX_DMA_OUT;
1752 mcp->tov = MBX_TOV_SECONDS;
1753 rval = qla2x00_mailbox_command(vha, mcp);
1755 if (rval != QLA_SUCCESS) {
1757 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1758 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1759 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1761 if (IS_QLA27XX(ha)) {
1762 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1763 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1764 "Invalid SFP/Validation Failed\n");
1766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1767 "Done %s.\n", __func__);
1775 * qla2x00_get_port_database
1776 * Issue normal/enhanced get port database mailbox command
1777 * and copy device name as necessary.
1780 * ha = adapter state pointer.
1781 * dev = structure pointer.
1782 * opt = enhanced cmd option byte.
1785 * qla2x00 local function return status code.
1791 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1795 mbx_cmd_t *mcp = &mc;
1796 port_database_t *pd;
1797 struct port_database_24xx *pd24;
1799 struct qla_hw_data *ha = vha->hw;
1801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1802 "Entered %s.\n", __func__);
1805 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1807 ql_log(ql_log_warn, vha, 0x1050,
1808 "Failed to allocate port database structure.\n");
1810 return QLA_MEMORY_ALLOC_FAILED;
1813 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1814 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1815 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1816 mcp->mb[2] = MSW(pd_dma);
1817 mcp->mb[3] = LSW(pd_dma);
1818 mcp->mb[6] = MSW(MSD(pd_dma));
1819 mcp->mb[7] = LSW(MSD(pd_dma));
1820 mcp->mb[9] = vha->vp_idx;
1821 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1823 if (IS_FWI2_CAPABLE(ha)) {
1824 mcp->mb[1] = fcport->loop_id;
1826 mcp->out_mb |= MBX_10|MBX_1;
1827 mcp->in_mb |= MBX_1;
1828 } else if (HAS_EXTENDED_IDS(ha)) {
1829 mcp->mb[1] = fcport->loop_id;
1831 mcp->out_mb |= MBX_10|MBX_1;
1833 mcp->mb[1] = fcport->loop_id << 8 | opt;
1834 mcp->out_mb |= MBX_1;
1836 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1837 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1838 mcp->flags = MBX_DMA_IN;
1839 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1840 rval = qla2x00_mailbox_command(vha, mcp);
1841 if (rval != QLA_SUCCESS)
1844 if (IS_FWI2_CAPABLE(ha)) {
1846 u8 current_login_state, last_login_state;
1848 pd24 = (struct port_database_24xx *) pd;
1850 /* Check for logged in state. */
1851 if (fcport->fc4f_nvme) {
1852 current_login_state = pd24->current_login_state >> 4;
1853 last_login_state = pd24->last_login_state >> 4;
1855 current_login_state = pd24->current_login_state & 0xf;
1856 last_login_state = pd24->last_login_state & 0xf;
1858 fcport->current_login_state = pd24->current_login_state;
1859 fcport->last_login_state = pd24->last_login_state;
1861 /* Check for logged in state. */
1862 if (current_login_state != PDS_PRLI_COMPLETE &&
1863 last_login_state != PDS_PRLI_COMPLETE) {
1864 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1865 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1866 current_login_state, last_login_state,
1868 rval = QLA_FUNCTION_FAILED;
1874 if (fcport->loop_id == FC_NO_LOOP_ID ||
1875 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1876 memcmp(fcport->port_name, pd24->port_name, 8))) {
1877 /* We lost the device mid way. */
1878 rval = QLA_NOT_LOGGED_IN;
1882 /* Names are little-endian. */
1883 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1884 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1886 /* Get port_id of device. */
1887 fcport->d_id.b.domain = pd24->port_id[0];
1888 fcport->d_id.b.area = pd24->port_id[1];
1889 fcport->d_id.b.al_pa = pd24->port_id[2];
1890 fcport->d_id.b.rsvd_1 = 0;
1892 /* If not target must be initiator or unknown type. */
1893 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1894 fcport->port_type = FCT_INITIATOR;
1896 fcport->port_type = FCT_TARGET;
1898 /* Passback COS information. */
1899 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1900 FC_COS_CLASS2 : FC_COS_CLASS3;
1902 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1903 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1907 /* Check for logged in state. */
1908 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1909 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1910 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1911 "Unable to verify login-state (%x/%x) - "
1912 "portid=%02x%02x%02x.\n", pd->master_state,
1913 pd->slave_state, fcport->d_id.b.domain,
1914 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1915 rval = QLA_FUNCTION_FAILED;
1919 if (fcport->loop_id == FC_NO_LOOP_ID ||
1920 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1921 memcmp(fcport->port_name, pd->port_name, 8))) {
1922 /* We lost the device mid way. */
1923 rval = QLA_NOT_LOGGED_IN;
1927 /* Names are little-endian. */
1928 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1929 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1931 /* Get port_id of device. */
1932 fcport->d_id.b.domain = pd->port_id[0];
1933 fcport->d_id.b.area = pd->port_id[3];
1934 fcport->d_id.b.al_pa = pd->port_id[2];
1935 fcport->d_id.b.rsvd_1 = 0;
1937 /* If not target must be initiator or unknown type. */
1938 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1939 fcport->port_type = FCT_INITIATOR;
1941 fcport->port_type = FCT_TARGET;
1943 /* Passback COS information. */
1944 fcport->supported_classes = (pd->options & BIT_4) ?
1945 FC_COS_CLASS2: FC_COS_CLASS3;
1949 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1952 if (rval != QLA_SUCCESS) {
1953 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1954 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1955 mcp->mb[0], mcp->mb[1]);
1957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1958 "Done %s.\n", __func__);
1965 * qla2x00_get_firmware_state
1966 * Get adapter firmware state.
1969 * ha = adapter block pointer.
1970 * dptr = pointer for firmware state.
1971 * TARGET_QUEUE_LOCK must be released.
1972 * ADAPTER_STATE_LOCK must be released.
1975 * qla2x00 local function return status code.
1981 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1985 mbx_cmd_t *mcp = &mc;
1986 struct qla_hw_data *ha = vha->hw;
1988 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1989 "Entered %s.\n", __func__);
1991 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1992 mcp->out_mb = MBX_0;
1993 if (IS_FWI2_CAPABLE(vha->hw))
1994 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1996 mcp->in_mb = MBX_1|MBX_0;
1997 mcp->tov = MBX_TOV_SECONDS;
1999 rval = qla2x00_mailbox_command(vha, mcp);
2001 /* Return firmware states. */
2002 states[0] = mcp->mb[1];
2003 if (IS_FWI2_CAPABLE(vha->hw)) {
2004 states[1] = mcp->mb[2];
2005 states[2] = mcp->mb[3]; /* SFP info */
2006 states[3] = mcp->mb[4];
2007 states[4] = mcp->mb[5];
2008 states[5] = mcp->mb[6]; /* DPORT status */
2011 if (rval != QLA_SUCCESS) {
2013 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2015 if (IS_QLA27XX(ha)) {
2016 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2017 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2018 "Invalid SFP/Validation Failed\n");
2020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2021 "Done %s.\n", __func__);
2028 * qla2x00_get_port_name
2029 * Issue get port name mailbox command.
2030 * Returned name is in big endian format.
2033 * ha = adapter block pointer.
2034 * loop_id = loop ID of device.
2035 * name = pointer for name.
2036 * TARGET_QUEUE_LOCK must be released.
2037 * ADAPTER_STATE_LOCK must be released.
2040 * qla2x00 local function return status code.
2046 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2051 mbx_cmd_t *mcp = &mc;
2053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2054 "Entered %s.\n", __func__);
2056 mcp->mb[0] = MBC_GET_PORT_NAME;
2057 mcp->mb[9] = vha->vp_idx;
2058 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2059 if (HAS_EXTENDED_IDS(vha->hw)) {
2060 mcp->mb[1] = loop_id;
2062 mcp->out_mb |= MBX_10;
2064 mcp->mb[1] = loop_id << 8 | opt;
2067 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2068 mcp->tov = MBX_TOV_SECONDS;
2070 rval = qla2x00_mailbox_command(vha, mcp);
2072 if (rval != QLA_SUCCESS) {
2074 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2077 /* This function returns name in big endian. */
2078 name[0] = MSB(mcp->mb[2]);
2079 name[1] = LSB(mcp->mb[2]);
2080 name[2] = MSB(mcp->mb[3]);
2081 name[3] = LSB(mcp->mb[3]);
2082 name[4] = MSB(mcp->mb[6]);
2083 name[5] = LSB(mcp->mb[6]);
2084 name[6] = MSB(mcp->mb[7]);
2085 name[7] = LSB(mcp->mb[7]);
2088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2089 "Done %s.\n", __func__);
2096 * qla24xx_link_initialization
2097 * Issue link initialization mailbox command.
2100 * ha = adapter block pointer.
2101 * TARGET_QUEUE_LOCK must be released.
2102 * ADAPTER_STATE_LOCK must be released.
2105 * qla2x00 local function return status code.
2111 qla24xx_link_initialize(scsi_qla_host_t *vha)
2115 mbx_cmd_t *mcp = &mc;
2117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2118 "Entered %s.\n", __func__);
2120 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2121 return QLA_FUNCTION_FAILED;
2123 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2125 if (vha->hw->operating_mode == LOOP)
2126 mcp->mb[1] |= BIT_6;
2128 mcp->mb[1] |= BIT_5;
2131 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2133 mcp->tov = MBX_TOV_SECONDS;
2135 rval = qla2x00_mailbox_command(vha, mcp);
2137 if (rval != QLA_SUCCESS) {
2138 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2140 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2141 "Done %s.\n", __func__);
2149 * Issue LIP reset mailbox command.
2152 * ha = adapter block pointer.
2153 * TARGET_QUEUE_LOCK must be released.
2154 * ADAPTER_STATE_LOCK must be released.
2157 * qla2x00 local function return status code.
2163 qla2x00_lip_reset(scsi_qla_host_t *vha)
2167 mbx_cmd_t *mcp = &mc;
2169 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2170 "Entered %s.\n", __func__);
2172 if (IS_CNA_CAPABLE(vha->hw)) {
2173 /* Logout across all FCFs. */
2174 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2177 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2178 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2179 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2182 mcp->mb[3] = vha->hw->loop_reset_delay;
2183 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2185 mcp->mb[0] = MBC_LIP_RESET;
2186 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2187 if (HAS_EXTENDED_IDS(vha->hw)) {
2188 mcp->mb[1] = 0x00ff;
2190 mcp->out_mb |= MBX_10;
2192 mcp->mb[1] = 0xff00;
2194 mcp->mb[2] = vha->hw->loop_reset_delay;
2198 mcp->tov = MBX_TOV_SECONDS;
2200 rval = qla2x00_mailbox_command(vha, mcp);
2202 if (rval != QLA_SUCCESS) {
2204 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2207 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2208 "Done %s.\n", __func__);
2219 * ha = adapter block pointer.
2220 * sns = pointer for command.
2221 * cmd_size = command size.
2222 * buf_size = response/command size.
2223 * TARGET_QUEUE_LOCK must be released.
2224 * ADAPTER_STATE_LOCK must be released.
2227 * qla2x00 local function return status code.
2233 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2234 uint16_t cmd_size, size_t buf_size)
2238 mbx_cmd_t *mcp = &mc;
2240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2241 "Entered %s.\n", __func__);
2243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2244 "Retry cnt=%d ratov=%d total tov=%d.\n",
2245 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2247 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2248 mcp->mb[1] = cmd_size;
2249 mcp->mb[2] = MSW(sns_phys_address);
2250 mcp->mb[3] = LSW(sns_phys_address);
2251 mcp->mb[6] = MSW(MSD(sns_phys_address));
2252 mcp->mb[7] = LSW(MSD(sns_phys_address));
2253 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2254 mcp->in_mb = MBX_0|MBX_1;
2255 mcp->buf_size = buf_size;
2256 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2257 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2258 rval = qla2x00_mailbox_command(vha, mcp);
2260 if (rval != QLA_SUCCESS) {
2262 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2263 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2264 rval, mcp->mb[0], mcp->mb[1]);
2267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2268 "Done %s.\n", __func__);
2275 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2276 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2280 struct logio_entry_24xx *lg;
2283 struct qla_hw_data *ha = vha->hw;
2284 struct req_que *req;
2286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2287 "Entered %s.\n", __func__);
2289 if (vha->vp_idx && vha->qpair)
2290 req = vha->qpair->req;
2292 req = ha->req_q_map[0];
2294 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2296 ql_log(ql_log_warn, vha, 0x1062,
2297 "Failed to allocate login IOCB.\n");
2298 return QLA_MEMORY_ALLOC_FAILED;
2301 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2302 lg->entry_count = 1;
2303 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2304 lg->nport_handle = cpu_to_le16(loop_id);
2305 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2307 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2309 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2310 lg->port_id[0] = al_pa;
2311 lg->port_id[1] = area;
2312 lg->port_id[2] = domain;
2313 lg->vp_index = vha->vp_idx;
2314 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2315 (ha->r_a_tov / 10 * 2) + 2);
2316 if (rval != QLA_SUCCESS) {
2317 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2318 "Failed to issue login IOCB (%x).\n", rval);
2319 } else if (lg->entry_status != 0) {
2320 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2321 "Failed to complete IOCB -- error status (%x).\n",
2323 rval = QLA_FUNCTION_FAILED;
2324 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2325 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2326 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2328 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2329 "Failed to complete IOCB -- completion status (%x) "
2330 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2334 case LSC_SCODE_PORTID_USED:
2335 mb[0] = MBS_PORT_ID_USED;
2336 mb[1] = LSW(iop[1]);
2338 case LSC_SCODE_NPORT_USED:
2339 mb[0] = MBS_LOOP_ID_USED;
2341 case LSC_SCODE_NOLINK:
2342 case LSC_SCODE_NOIOCB:
2343 case LSC_SCODE_NOXCB:
2344 case LSC_SCODE_CMD_FAILED:
2345 case LSC_SCODE_NOFABRIC:
2346 case LSC_SCODE_FW_NOT_READY:
2347 case LSC_SCODE_NOT_LOGGED_IN:
2348 case LSC_SCODE_NOPCB:
2349 case LSC_SCODE_ELS_REJECT:
2350 case LSC_SCODE_CMD_PARAM_ERR:
2351 case LSC_SCODE_NONPORT:
2352 case LSC_SCODE_LOGGED_IN:
2353 case LSC_SCODE_NOFLOGI_ACC:
2355 mb[0] = MBS_COMMAND_ERROR;
2359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2360 "Done %s.\n", __func__);
2362 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2364 mb[0] = MBS_COMMAND_COMPLETE;
2366 if (iop[0] & BIT_4) {
2372 /* Passback COS information. */
2374 if (lg->io_parameter[7] || lg->io_parameter[8])
2375 mb[10] |= BIT_0; /* Class 2. */
2376 if (lg->io_parameter[9] || lg->io_parameter[10])
2377 mb[10] |= BIT_1; /* Class 3. */
2378 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2379 mb[10] |= BIT_7; /* Confirmed Completion
2384 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2390 * qla2x00_login_fabric
2391 * Issue login fabric port mailbox command.
2394 * ha = adapter block pointer.
2395 * loop_id = device loop ID.
2396 * domain = device domain.
2397 * area = device area.
2398 * al_pa = device AL_PA.
2399 * status = pointer for return status.
2400 * opt = command options.
2401 * TARGET_QUEUE_LOCK must be released.
2402 * ADAPTER_STATE_LOCK must be released.
2405 * qla2x00 local function return status code.
2411 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2412 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2416 mbx_cmd_t *mcp = &mc;
2417 struct qla_hw_data *ha = vha->hw;
2419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2420 "Entered %s.\n", __func__);
2422 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2423 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2424 if (HAS_EXTENDED_IDS(ha)) {
2425 mcp->mb[1] = loop_id;
2427 mcp->out_mb |= MBX_10;
2429 mcp->mb[1] = (loop_id << 8) | opt;
2431 mcp->mb[2] = domain;
2432 mcp->mb[3] = area << 8 | al_pa;
2434 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2435 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2437 rval = qla2x00_mailbox_command(vha, mcp);
2439 /* Return mailbox statuses. */
2446 /* COS retrieved from Get-Port-Database mailbox command. */
2450 if (rval != QLA_SUCCESS) {
2451 /* RLU tmp code: need to change main mailbox_command function to
2452 * return ok even when the mailbox completion value is not
2453 * SUCCESS. The caller needs to be responsible to interpret
2454 * the return values of this mailbox command if we're not
2455 * to change too much of the existing code.
2457 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2458 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2459 mcp->mb[0] == 0x4006)
2463 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2464 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2465 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2469 "Done %s.\n", __func__);
2476 * qla2x00_login_local_device
2477 * Issue login loop port mailbox command.
2480 * ha = adapter block pointer.
2481 * loop_id = device loop ID.
2482 * opt = command options.
2485 * Return status code.
2492 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2493 uint16_t *mb_ret, uint8_t opt)
2497 mbx_cmd_t *mcp = &mc;
2498 struct qla_hw_data *ha = vha->hw;
2500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2501 "Entered %s.\n", __func__);
2503 if (IS_FWI2_CAPABLE(ha))
2504 return qla24xx_login_fabric(vha, fcport->loop_id,
2505 fcport->d_id.b.domain, fcport->d_id.b.area,
2506 fcport->d_id.b.al_pa, mb_ret, opt);
2508 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2509 if (HAS_EXTENDED_IDS(ha))
2510 mcp->mb[1] = fcport->loop_id;
2512 mcp->mb[1] = fcport->loop_id << 8;
2514 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2515 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2516 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2518 rval = qla2x00_mailbox_command(vha, mcp);
2520 /* Return mailbox statuses. */
2521 if (mb_ret != NULL) {
2522 mb_ret[0] = mcp->mb[0];
2523 mb_ret[1] = mcp->mb[1];
2524 mb_ret[6] = mcp->mb[6];
2525 mb_ret[7] = mcp->mb[7];
2528 if (rval != QLA_SUCCESS) {
2529 /* AV tmp code: need to change main mailbox_command function to
2530 * return ok even when the mailbox completion value is not
2531 * SUCCESS. The caller needs to be responsible to interpret
2532 * the return values of this mailbox command if we're not
2533 * to change too much of the existing code.
2535 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2538 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2539 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2540 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2543 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2544 "Done %s.\n", __func__);
2551 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2552 uint8_t area, uint8_t al_pa)
2555 struct logio_entry_24xx *lg;
2557 struct qla_hw_data *ha = vha->hw;
2558 struct req_que *req;
2560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2561 "Entered %s.\n", __func__);
2563 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2565 ql_log(ql_log_warn, vha, 0x106e,
2566 "Failed to allocate logout IOCB.\n");
2567 return QLA_MEMORY_ALLOC_FAILED;
2571 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2572 lg->entry_count = 1;
2573 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2574 lg->nport_handle = cpu_to_le16(loop_id);
2576 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2578 lg->port_id[0] = al_pa;
2579 lg->port_id[1] = area;
2580 lg->port_id[2] = domain;
2581 lg->vp_index = vha->vp_idx;
2582 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2583 (ha->r_a_tov / 10 * 2) + 2);
2584 if (rval != QLA_SUCCESS) {
2585 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2586 "Failed to issue logout IOCB (%x).\n", rval);
2587 } else if (lg->entry_status != 0) {
2588 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2589 "Failed to complete IOCB -- error status (%x).\n",
2591 rval = QLA_FUNCTION_FAILED;
2592 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2593 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2594 "Failed to complete IOCB -- completion status (%x) "
2595 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2596 le32_to_cpu(lg->io_parameter[0]),
2597 le32_to_cpu(lg->io_parameter[1]));
2600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2601 "Done %s.\n", __func__);
2604 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2610 * qla2x00_fabric_logout
2611 * Issue logout fabric port mailbox command.
2614 * ha = adapter block pointer.
2615 * loop_id = device loop ID.
2616 * TARGET_QUEUE_LOCK must be released.
2617 * ADAPTER_STATE_LOCK must be released.
2620 * qla2x00 local function return status code.
2626 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2627 uint8_t area, uint8_t al_pa)
2631 mbx_cmd_t *mcp = &mc;
2633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2634 "Entered %s.\n", __func__);
2636 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2637 mcp->out_mb = MBX_1|MBX_0;
2638 if (HAS_EXTENDED_IDS(vha->hw)) {
2639 mcp->mb[1] = loop_id;
2641 mcp->out_mb |= MBX_10;
2643 mcp->mb[1] = loop_id << 8;
2646 mcp->in_mb = MBX_1|MBX_0;
2647 mcp->tov = MBX_TOV_SECONDS;
2649 rval = qla2x00_mailbox_command(vha, mcp);
2651 if (rval != QLA_SUCCESS) {
2653 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2654 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2658 "Done %s.\n", __func__);
2665 * qla2x00_full_login_lip
2666 * Issue full login LIP mailbox command.
2669 * ha = adapter block pointer.
2670 * TARGET_QUEUE_LOCK must be released.
2671 * ADAPTER_STATE_LOCK must be released.
2674 * qla2x00 local function return status code.
2680 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2684 mbx_cmd_t *mcp = &mc;
2686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2687 "Entered %s.\n", __func__);
2689 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2690 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2693 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2695 mcp->tov = MBX_TOV_SECONDS;
2697 rval = qla2x00_mailbox_command(vha, mcp);
2699 if (rval != QLA_SUCCESS) {
2701 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2705 "Done %s.\n", __func__);
2712 * qla2x00_get_id_list
2715 * ha = adapter block pointer.
2718 * qla2x00 local function return status code.
2724 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2729 mbx_cmd_t *mcp = &mc;
2731 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2732 "Entered %s.\n", __func__);
2734 if (id_list == NULL)
2735 return QLA_FUNCTION_FAILED;
2737 mcp->mb[0] = MBC_GET_ID_LIST;
2738 mcp->out_mb = MBX_0;
2739 if (IS_FWI2_CAPABLE(vha->hw)) {
2740 mcp->mb[2] = MSW(id_list_dma);
2741 mcp->mb[3] = LSW(id_list_dma);
2742 mcp->mb[6] = MSW(MSD(id_list_dma));
2743 mcp->mb[7] = LSW(MSD(id_list_dma));
2745 mcp->mb[9] = vha->vp_idx;
2746 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2748 mcp->mb[1] = MSW(id_list_dma);
2749 mcp->mb[2] = LSW(id_list_dma);
2750 mcp->mb[3] = MSW(MSD(id_list_dma));
2751 mcp->mb[6] = LSW(MSD(id_list_dma));
2752 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2754 mcp->in_mb = MBX_1|MBX_0;
2755 mcp->tov = MBX_TOV_SECONDS;
2757 rval = qla2x00_mailbox_command(vha, mcp);
2759 if (rval != QLA_SUCCESS) {
2761 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2763 *entries = mcp->mb[1];
2764 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2765 "Done %s.\n", __func__);
2772 * qla2x00_get_resource_cnts
2773 * Get current firmware resource counts.
2776 * ha = adapter block pointer.
2779 * qla2x00 local function return status code.
2785 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2787 struct qla_hw_data *ha = vha->hw;
2790 mbx_cmd_t *mcp = &mc;
2792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2793 "Entered %s.\n", __func__);
2795 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2796 mcp->out_mb = MBX_0;
2797 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2798 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2799 mcp->in_mb |= MBX_12;
2800 mcp->tov = MBX_TOV_SECONDS;
2802 rval = qla2x00_mailbox_command(vha, mcp);
2804 if (rval != QLA_SUCCESS) {
2806 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2807 "Failed mb[0]=%x.\n", mcp->mb[0]);
2809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2810 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2811 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2812 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2813 mcp->mb[11], mcp->mb[12]);
2815 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2816 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2817 ha->cur_fw_xcb_count = mcp->mb[3];
2818 ha->orig_fw_xcb_count = mcp->mb[6];
2819 ha->cur_fw_iocb_count = mcp->mb[7];
2820 ha->orig_fw_iocb_count = mcp->mb[10];
2821 if (ha->flags.npiv_supported)
2822 ha->max_npiv_vports = mcp->mb[11];
2823 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2824 ha->fw_max_fcf_count = mcp->mb[12];
2831 * qla2x00_get_fcal_position_map
2832 * Get FCAL (LILP) position map using mailbox command
2835 * ha = adapter state pointer.
2836 * pos_map = buffer pointer (can be NULL).
2839 * qla2x00 local function return status code.
2845 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2849 mbx_cmd_t *mcp = &mc;
2851 dma_addr_t pmap_dma;
2852 struct qla_hw_data *ha = vha->hw;
2854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2855 "Entered %s.\n", __func__);
2857 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2859 ql_log(ql_log_warn, vha, 0x1080,
2860 "Memory alloc failed.\n");
2861 return QLA_MEMORY_ALLOC_FAILED;
2864 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2865 mcp->mb[2] = MSW(pmap_dma);
2866 mcp->mb[3] = LSW(pmap_dma);
2867 mcp->mb[6] = MSW(MSD(pmap_dma));
2868 mcp->mb[7] = LSW(MSD(pmap_dma));
2869 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2870 mcp->in_mb = MBX_1|MBX_0;
2871 mcp->buf_size = FCAL_MAP_SIZE;
2872 mcp->flags = MBX_DMA_IN;
2873 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2874 rval = qla2x00_mailbox_command(vha, mcp);
2876 if (rval == QLA_SUCCESS) {
2877 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2878 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2879 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2880 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2884 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2886 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2888 if (rval != QLA_SUCCESS) {
2889 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2892 "Done %s.\n", __func__);
2899 * qla2x00_get_link_status
2902 * ha = adapter block pointer.
2903 * loop_id = device loop ID.
2904 * ret_buf = pointer to link status return buffer.
2908 * BIT_0 = mem alloc error.
2909 * BIT_1 = mailbox error.
2912 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2913 struct link_statistics *stats, dma_addr_t stats_dma)
2917 mbx_cmd_t *mcp = &mc;
2918 uint32_t *iter = (void *)stats;
2919 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2920 struct qla_hw_data *ha = vha->hw;
2922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2923 "Entered %s.\n", __func__);
2925 mcp->mb[0] = MBC_GET_LINK_STATUS;
2926 mcp->mb[2] = MSW(LSD(stats_dma));
2927 mcp->mb[3] = LSW(LSD(stats_dma));
2928 mcp->mb[6] = MSW(MSD(stats_dma));
2929 mcp->mb[7] = LSW(MSD(stats_dma));
2930 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2932 if (IS_FWI2_CAPABLE(ha)) {
2933 mcp->mb[1] = loop_id;
2936 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2937 mcp->in_mb |= MBX_1;
2938 } else if (HAS_EXTENDED_IDS(ha)) {
2939 mcp->mb[1] = loop_id;
2941 mcp->out_mb |= MBX_10|MBX_1;
2943 mcp->mb[1] = loop_id << 8;
2944 mcp->out_mb |= MBX_1;
2946 mcp->tov = MBX_TOV_SECONDS;
2947 mcp->flags = IOCTL_CMD;
2948 rval = qla2x00_mailbox_command(vha, mcp);
2950 if (rval == QLA_SUCCESS) {
2951 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2952 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2953 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2954 rval = QLA_FUNCTION_FAILED;
2956 /* Re-endianize - firmware data is le32. */
2957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2958 "Done %s.\n", __func__);
2959 for ( ; dwords--; iter++)
2964 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2971 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2972 dma_addr_t stats_dma, uint16_t options)
2976 mbx_cmd_t *mcp = &mc;
2977 uint32_t *iter, dwords;
2979 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2980 "Entered %s.\n", __func__);
2982 memset(&mc, 0, sizeof(mc));
2983 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2984 mc.mb[2] = MSW(stats_dma);
2985 mc.mb[3] = LSW(stats_dma);
2986 mc.mb[6] = MSW(MSD(stats_dma));
2987 mc.mb[7] = LSW(MSD(stats_dma));
2988 mc.mb[8] = sizeof(struct link_statistics) / 4;
2989 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2990 mc.mb[10] = cpu_to_le16(options);
2992 rval = qla24xx_send_mb_cmd(vha, &mc);
2994 if (rval == QLA_SUCCESS) {
2995 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2996 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2997 "Failed mb[0]=%x.\n", mcp->mb[0]);
2998 rval = QLA_FUNCTION_FAILED;
3000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3001 "Done %s.\n", __func__);
3002 /* Re-endianize - firmware data is le32. */
3003 dwords = sizeof(struct link_statistics) / 4;
3004 iter = &stats->link_fail_cnt;
3005 for ( ; dwords--; iter++)
3010 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3017 qla24xx_abort_command(srb_t *sp)
3020 unsigned long flags = 0;
3022 struct abort_entry_24xx *abt;
3025 fc_port_t *fcport = sp->fcport;
3026 struct scsi_qla_host *vha = fcport->vha;
3027 struct qla_hw_data *ha = vha->hw;
3028 struct req_que *req = vha->req;
3030 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3031 "Entered %s.\n", __func__);
3033 if (vha->flags.qpairs_available && sp->qpair)
3034 req = sp->qpair->req;
3036 if (ql2xasynctmfenable)
3037 return qla24xx_async_abort_command(sp);
3039 spin_lock_irqsave(&ha->hardware_lock, flags);
3040 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3041 if (req->outstanding_cmds[handle] == sp)
3044 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3045 if (handle == req->num_outstanding_cmds) {
3046 /* Command not found. */
3047 return QLA_FUNCTION_FAILED;
3050 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3052 ql_log(ql_log_warn, vha, 0x108d,
3053 "Failed to allocate abort IOCB.\n");
3054 return QLA_MEMORY_ALLOC_FAILED;
3057 abt->entry_type = ABORT_IOCB_TYPE;
3058 abt->entry_count = 1;
3059 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3060 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3061 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3062 abt->port_id[0] = fcport->d_id.b.al_pa;
3063 abt->port_id[1] = fcport->d_id.b.area;
3064 abt->port_id[2] = fcport->d_id.b.domain;
3065 abt->vp_index = fcport->vha->vp_idx;
3067 abt->req_que_no = cpu_to_le16(req->id);
3069 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3070 if (rval != QLA_SUCCESS) {
3071 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3072 "Failed to issue IOCB (%x).\n", rval);
3073 } else if (abt->entry_status != 0) {
3074 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3075 "Failed to complete IOCB -- error status (%x).\n",
3077 rval = QLA_FUNCTION_FAILED;
3078 } else if (abt->nport_handle != cpu_to_le16(0)) {
3079 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3080 "Failed to complete IOCB -- completion status (%x).\n",
3081 le16_to_cpu(abt->nport_handle));
3082 if (abt->nport_handle == CS_IOCB_ERROR)
3083 rval = QLA_FUNCTION_PARAMETER_ERROR;
3085 rval = QLA_FUNCTION_FAILED;
3087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3088 "Done %s.\n", __func__);
3091 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3096 struct tsk_mgmt_cmd {
3098 struct tsk_mgmt_entry tsk;
3099 struct sts_entry_24xx sts;
3104 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3105 uint64_t l, int tag)
3108 struct tsk_mgmt_cmd *tsk;
3109 struct sts_entry_24xx *sts;
3111 scsi_qla_host_t *vha;
3112 struct qla_hw_data *ha;
3113 struct req_que *req;
3114 struct rsp_que *rsp;
3115 struct qla_qpair *qpair;
3121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3122 "Entered %s.\n", __func__);
3124 if (vha->vp_idx && vha->qpair) {
3133 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3135 ql_log(ql_log_warn, vha, 0x1093,
3136 "Failed to allocate task management IOCB.\n");
3137 return QLA_MEMORY_ALLOC_FAILED;
3140 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3141 tsk->p.tsk.entry_count = 1;
3142 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3143 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3144 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3145 tsk->p.tsk.control_flags = cpu_to_le32(type);
3146 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3147 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3148 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3149 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3150 if (type == TCF_LUN_RESET) {
3151 int_to_scsilun(l, &tsk->p.tsk.lun);
3152 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3153 sizeof(tsk->p.tsk.lun));
3157 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3158 if (rval != QLA_SUCCESS) {
3159 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3160 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3161 } else if (sts->entry_status != 0) {
3162 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3163 "Failed to complete IOCB -- error status (%x).\n",
3165 rval = QLA_FUNCTION_FAILED;
3166 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3167 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3168 "Failed to complete IOCB -- completion status (%x).\n",
3169 le16_to_cpu(sts->comp_status));
3170 rval = QLA_FUNCTION_FAILED;
3171 } else if (le16_to_cpu(sts->scsi_status) &
3172 SS_RESPONSE_INFO_LEN_VALID) {
3173 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3174 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3175 "Ignoring inconsistent data length -- not enough "
3176 "response info (%d).\n",
3177 le32_to_cpu(sts->rsp_data_len));
3178 } else if (sts->data[3]) {
3179 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3180 "Failed to complete IOCB -- response (%x).\n",
3182 rval = QLA_FUNCTION_FAILED;
3186 /* Issue marker IOCB. */
3187 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3188 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3189 if (rval2 != QLA_SUCCESS) {
3190 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3191 "Failed to issue marker IOCB (%x).\n", rval2);
3193 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3194 "Done %s.\n", __func__);
3197 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3203 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3205 struct qla_hw_data *ha = fcport->vha->hw;
3207 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3208 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3210 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3214 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3216 struct qla_hw_data *ha = fcport->vha->hw;
3218 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3219 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3221 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3225 qla2x00_system_error(scsi_qla_host_t *vha)
3229 mbx_cmd_t *mcp = &mc;
3230 struct qla_hw_data *ha = vha->hw;
3232 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3233 return QLA_FUNCTION_FAILED;
3235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3236 "Entered %s.\n", __func__);
3238 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3239 mcp->out_mb = MBX_0;
3243 rval = qla2x00_mailbox_command(vha, mcp);
3245 if (rval != QLA_SUCCESS) {
3246 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3249 "Done %s.\n", __func__);
3256 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3260 mbx_cmd_t *mcp = &mc;
3262 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3263 !IS_QLA27XX(vha->hw))
3264 return QLA_FUNCTION_FAILED;
3266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3267 "Entered %s.\n", __func__);
3269 mcp->mb[0] = MBC_WRITE_SERDES;
3271 if (IS_QLA2031(vha->hw))
3272 mcp->mb[2] = data & 0xff;
3277 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3279 mcp->tov = MBX_TOV_SECONDS;
3281 rval = qla2x00_mailbox_command(vha, mcp);
3283 if (rval != QLA_SUCCESS) {
3284 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3285 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3288 "Done %s.\n", __func__);
3295 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3299 mbx_cmd_t *mcp = &mc;
3301 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3302 !IS_QLA27XX(vha->hw))
3303 return QLA_FUNCTION_FAILED;
3305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3306 "Entered %s.\n", __func__);
3308 mcp->mb[0] = MBC_READ_SERDES;
3311 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3312 mcp->in_mb = MBX_1|MBX_0;
3313 mcp->tov = MBX_TOV_SECONDS;
3315 rval = qla2x00_mailbox_command(vha, mcp);
3317 if (IS_QLA2031(vha->hw))
3318 *data = mcp->mb[1] & 0xff;
3322 if (rval != QLA_SUCCESS) {
3323 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3324 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3327 "Done %s.\n", __func__);
3334 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3338 mbx_cmd_t *mcp = &mc;
3340 if (!IS_QLA8044(vha->hw))
3341 return QLA_FUNCTION_FAILED;
3343 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3344 "Entered %s.\n", __func__);
3346 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3347 mcp->mb[1] = HCS_WRITE_SERDES;
3348 mcp->mb[3] = LSW(addr);
3349 mcp->mb[4] = MSW(addr);
3350 mcp->mb[5] = LSW(data);
3351 mcp->mb[6] = MSW(data);
3352 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3354 mcp->tov = MBX_TOV_SECONDS;
3356 rval = qla2x00_mailbox_command(vha, mcp);
3358 if (rval != QLA_SUCCESS) {
3359 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3360 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3362 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3363 "Done %s.\n", __func__);
3370 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3374 mbx_cmd_t *mcp = &mc;
3376 if (!IS_QLA8044(vha->hw))
3377 return QLA_FUNCTION_FAILED;
3379 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3380 "Entered %s.\n", __func__);
3382 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3383 mcp->mb[1] = HCS_READ_SERDES;
3384 mcp->mb[3] = LSW(addr);
3385 mcp->mb[4] = MSW(addr);
3386 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3387 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3388 mcp->tov = MBX_TOV_SECONDS;
3390 rval = qla2x00_mailbox_command(vha, mcp);
3392 *data = mcp->mb[2] << 16 | mcp->mb[1];
3394 if (rval != QLA_SUCCESS) {
3395 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3399 "Done %s.\n", __func__);
3406 * qla2x00_set_serdes_params() -
3415 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3416 uint16_t sw_em_2g, uint16_t sw_em_4g)
3420 mbx_cmd_t *mcp = &mc;
3422 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3423 "Entered %s.\n", __func__);
3425 mcp->mb[0] = MBC_SERDES_PARAMS;
3427 mcp->mb[2] = sw_em_1g | BIT_15;
3428 mcp->mb[3] = sw_em_2g | BIT_15;
3429 mcp->mb[4] = sw_em_4g | BIT_15;
3430 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3432 mcp->tov = MBX_TOV_SECONDS;
3434 rval = qla2x00_mailbox_command(vha, mcp);
3436 if (rval != QLA_SUCCESS) {
3438 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3439 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3443 "Done %s.\n", __func__);
3450 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3454 mbx_cmd_t *mcp = &mc;
3456 if (!IS_FWI2_CAPABLE(vha->hw))
3457 return QLA_FUNCTION_FAILED;
3459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3460 "Entered %s.\n", __func__);
3462 mcp->mb[0] = MBC_STOP_FIRMWARE;
3464 mcp->out_mb = MBX_1|MBX_0;
3468 rval = qla2x00_mailbox_command(vha, mcp);
3470 if (rval != QLA_SUCCESS) {
3471 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3472 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3473 rval = QLA_INVALID_COMMAND;
3475 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3476 "Done %s.\n", __func__);
3483 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3488 mbx_cmd_t *mcp = &mc;
3490 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3491 "Entered %s.\n", __func__);
3493 if (!IS_FWI2_CAPABLE(vha->hw))
3494 return QLA_FUNCTION_FAILED;
3496 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3497 return QLA_FUNCTION_FAILED;
3499 mcp->mb[0] = MBC_TRACE_CONTROL;
3500 mcp->mb[1] = TC_EFT_ENABLE;
3501 mcp->mb[2] = LSW(eft_dma);
3502 mcp->mb[3] = MSW(eft_dma);
3503 mcp->mb[4] = LSW(MSD(eft_dma));
3504 mcp->mb[5] = MSW(MSD(eft_dma));
3505 mcp->mb[6] = buffers;
3506 mcp->mb[7] = TC_AEN_DISABLE;
3507 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3508 mcp->in_mb = MBX_1|MBX_0;
3509 mcp->tov = MBX_TOV_SECONDS;
3511 rval = qla2x00_mailbox_command(vha, mcp);
3512 if (rval != QLA_SUCCESS) {
3513 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3514 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3515 rval, mcp->mb[0], mcp->mb[1]);
3517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3518 "Done %s.\n", __func__);
3525 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3529 mbx_cmd_t *mcp = &mc;
3531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3532 "Entered %s.\n", __func__);
3534 if (!IS_FWI2_CAPABLE(vha->hw))
3535 return QLA_FUNCTION_FAILED;
3537 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3538 return QLA_FUNCTION_FAILED;
3540 mcp->mb[0] = MBC_TRACE_CONTROL;
3541 mcp->mb[1] = TC_EFT_DISABLE;
3542 mcp->out_mb = MBX_1|MBX_0;
3543 mcp->in_mb = MBX_1|MBX_0;
3544 mcp->tov = MBX_TOV_SECONDS;
3546 rval = qla2x00_mailbox_command(vha, mcp);
3547 if (rval != QLA_SUCCESS) {
3548 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3549 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3550 rval, mcp->mb[0], mcp->mb[1]);
3552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3553 "Done %s.\n", __func__);
3560 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3561 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3565 mbx_cmd_t *mcp = &mc;
3567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3568 "Entered %s.\n", __func__);
3570 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3571 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3572 return QLA_FUNCTION_FAILED;
3574 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3575 return QLA_FUNCTION_FAILED;
3577 mcp->mb[0] = MBC_TRACE_CONTROL;
3578 mcp->mb[1] = TC_FCE_ENABLE;
3579 mcp->mb[2] = LSW(fce_dma);
3580 mcp->mb[3] = MSW(fce_dma);
3581 mcp->mb[4] = LSW(MSD(fce_dma));
3582 mcp->mb[5] = MSW(MSD(fce_dma));
3583 mcp->mb[6] = buffers;
3584 mcp->mb[7] = TC_AEN_DISABLE;
3586 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3587 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3588 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3590 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3591 mcp->tov = MBX_TOV_SECONDS;
3593 rval = qla2x00_mailbox_command(vha, mcp);
3594 if (rval != QLA_SUCCESS) {
3595 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3596 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3597 rval, mcp->mb[0], mcp->mb[1]);
3599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3600 "Done %s.\n", __func__);
3603 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3612 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3616 mbx_cmd_t *mcp = &mc;
3618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3619 "Entered %s.\n", __func__);
3621 if (!IS_FWI2_CAPABLE(vha->hw))
3622 return QLA_FUNCTION_FAILED;
3624 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3625 return QLA_FUNCTION_FAILED;
3627 mcp->mb[0] = MBC_TRACE_CONTROL;
3628 mcp->mb[1] = TC_FCE_DISABLE;
3629 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3630 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3631 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3633 mcp->tov = MBX_TOV_SECONDS;
3635 rval = qla2x00_mailbox_command(vha, mcp);
3636 if (rval != QLA_SUCCESS) {
3637 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3638 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3639 rval, mcp->mb[0], mcp->mb[1]);
3641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3642 "Done %s.\n", __func__);
3645 *wr = (uint64_t) mcp->mb[5] << 48 |
3646 (uint64_t) mcp->mb[4] << 32 |
3647 (uint64_t) mcp->mb[3] << 16 |
3648 (uint64_t) mcp->mb[2];
3650 *rd = (uint64_t) mcp->mb[9] << 48 |
3651 (uint64_t) mcp->mb[8] << 32 |
3652 (uint64_t) mcp->mb[7] << 16 |
3653 (uint64_t) mcp->mb[6];
3660 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3661 uint16_t *port_speed, uint16_t *mb)
3665 mbx_cmd_t *mcp = &mc;
3667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3668 "Entered %s.\n", __func__);
3670 if (!IS_IIDMA_CAPABLE(vha->hw))
3671 return QLA_FUNCTION_FAILED;
3673 mcp->mb[0] = MBC_PORT_PARAMS;
3674 mcp->mb[1] = loop_id;
3675 mcp->mb[2] = mcp->mb[3] = 0;
3676 mcp->mb[9] = vha->vp_idx;
3677 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3678 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3679 mcp->tov = MBX_TOV_SECONDS;
3681 rval = qla2x00_mailbox_command(vha, mcp);
3683 /* Return mailbox statuses. */
3690 if (rval != QLA_SUCCESS) {
3691 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3694 "Done %s.\n", __func__);
3696 *port_speed = mcp->mb[3];
3703 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3704 uint16_t port_speed, uint16_t *mb)
3708 mbx_cmd_t *mcp = &mc;
3710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3711 "Entered %s.\n", __func__);
3713 if (!IS_IIDMA_CAPABLE(vha->hw))
3714 return QLA_FUNCTION_FAILED;
3716 mcp->mb[0] = MBC_PORT_PARAMS;
3717 mcp->mb[1] = loop_id;
3719 if (IS_CNA_CAPABLE(vha->hw))
3720 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3722 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3723 mcp->mb[9] = vha->vp_idx;
3724 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3725 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3726 mcp->tov = MBX_TOV_SECONDS;
3728 rval = qla2x00_mailbox_command(vha, mcp);
3730 /* Return mailbox statuses. */
3737 if (rval != QLA_SUCCESS) {
3738 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3739 "Failed=%x.\n", rval);
3741 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3742 "Done %s.\n", __func__);
3749 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3750 struct vp_rpt_id_entry_24xx *rptid_entry)
3752 struct qla_hw_data *ha = vha->hw;
3753 scsi_qla_host_t *vp = NULL;
3754 unsigned long flags;
3757 struct fc_port *fcport;
3759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3760 "Entered %s.\n", __func__);
3762 if (rptid_entry->entry_status != 0)
3765 id.b.domain = rptid_entry->port_id[2];
3766 id.b.area = rptid_entry->port_id[1];
3767 id.b.al_pa = rptid_entry->port_id[0];
3769 ha->flags.n2n_ae = 0;
3771 if (rptid_entry->format == 0) {
3773 ql_dbg(ql_dbg_async, vha, 0x10b7,
3774 "Format 0 : Number of VPs setup %d, number of "
3775 "VPs acquired %d.\n", rptid_entry->vp_setup,
3776 rptid_entry->vp_acquired);
3777 ql_dbg(ql_dbg_async, vha, 0x10b8,
3778 "Primary port id %02x%02x%02x.\n",
3779 rptid_entry->port_id[2], rptid_entry->port_id[1],
3780 rptid_entry->port_id[0]);
3781 ha->current_topology = ISP_CFG_NL;
3782 qlt_update_host_map(vha, id);
3784 } else if (rptid_entry->format == 1) {
3786 ql_dbg(ql_dbg_async, vha, 0x10b9,
3787 "Format 1: VP[%d] enabled - status %d - with "
3788 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3789 rptid_entry->vp_status,
3790 rptid_entry->port_id[2], rptid_entry->port_id[1],
3791 rptid_entry->port_id[0]);
3792 ql_dbg(ql_dbg_async, vha, 0x5075,
3793 "Format 1: Remote WWPN %8phC.\n",
3794 rptid_entry->u.f1.port_name);
3796 ql_dbg(ql_dbg_async, vha, 0x5075,
3797 "Format 1: WWPN %8phC.\n",
3800 /* N2N. direct connect */
3801 if (IS_QLA27XX(ha) &&
3802 ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
3803 /* if our portname is higher then initiate N2N login */
3804 if (wwn_to_u64(vha->port_name) >
3805 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3806 // ??? qlt_update_host_map(vha, id);
3808 ql_dbg(ql_dbg_async, vha, 0x5075,
3809 "Format 1: Setting n2n_update_needed for id %d\n",
3812 ql_dbg(ql_dbg_async, vha, 0x5075,
3813 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3814 rptid_entry->u.f1.port_name);
3817 memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
3819 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3820 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3821 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3822 ha->flags.n2n_ae = 1;
3826 ha->flags.gpsc_supported = 1;
3827 ha->current_topology = ISP_CFG_F;
3828 /* buffer to buffer credit flag */
3829 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3831 if (rptid_entry->vp_idx == 0) {
3832 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3833 /* FA-WWN is only for physical port */
3834 if (qla_ini_mode_enabled(vha) &&
3835 ha->flags.fawwpn_enabled &&
3836 (rptid_entry->u.f1.flags &
3838 memcpy(vha->port_name,
3839 rptid_entry->u.f1.port_name,
3843 qlt_update_host_map(vha, id);
3846 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3847 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3849 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3850 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3851 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3852 "Could not acquire ID for VP[%d].\n",
3853 rptid_entry->vp_idx);
3858 spin_lock_irqsave(&ha->vport_slock, flags);
3859 list_for_each_entry(vp, &ha->vp_list, list) {
3860 if (rptid_entry->vp_idx == vp->vp_idx) {
3865 spin_unlock_irqrestore(&ha->vport_slock, flags);
3870 qlt_update_host_map(vp, id);
3873 * Cannot configure here as we are still sitting on the
3874 * response queue. Handle it in dpc context.
3876 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3877 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3878 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3880 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3881 qla2xxx_wake_dpc(vha);
3882 } else if (rptid_entry->format == 2) {
3883 ql_dbg(ql_dbg_async, vha, 0x505f,
3884 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3885 rptid_entry->port_id[2], rptid_entry->port_id[1],
3886 rptid_entry->port_id[0]);
3888 ql_dbg(ql_dbg_async, vha, 0x5075,
3889 "N2N: Remote WWPN %8phC.\n",
3890 rptid_entry->u.f2.port_name);
3892 /* N2N. direct connect */
3893 ha->current_topology = ISP_CFG_N;
3894 ha->flags.rida_fmt2 = 1;
3895 vha->d_id.b.domain = rptid_entry->port_id[2];
3896 vha->d_id.b.area = rptid_entry->port_id[1];
3897 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3899 ha->flags.n2n_ae = 1;
3900 spin_lock_irqsave(&ha->vport_slock, flags);
3901 qlt_update_vp_map(vha, SET_AL_PA);
3902 spin_unlock_irqrestore(&ha->vport_slock, flags);
3904 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3905 fcport->scan_state = QLA_FCPORT_SCAN;
3908 fcport = qla2x00_find_fcport_by_wwpn(vha,
3909 rptid_entry->u.f2.port_name, 1);
3912 fcport->plogi_nack_done_deadline = jiffies + HZ;
3913 fcport->scan_state = QLA_FCPORT_FOUND;
3914 switch (fcport->disc_state) {
3916 ql_dbg(ql_dbg_disc, vha, 0x210d,
3917 "%s %d %8phC login\n",
3918 __func__, __LINE__, fcport->port_name);
3919 qla24xx_fcport_handle_login(vha, fcport);
3921 case DSC_DELETE_PEND:
3924 qlt_schedule_sess_for_deletion(fcport);
3928 id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
3929 id.b.area = rptid_entry->u.f2.remote_nport_id[1];
3930 id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
3931 qla24xx_post_newsess_work(vha, &id,
3932 rptid_entry->u.f2.port_name,
3933 rptid_entry->u.f2.node_name,
3941 * qla24xx_modify_vp_config
3942 * Change VP configuration for vha
3945 * vha = adapter block pointer.
3948 * qla2xxx local function return status code.
3954 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3957 struct vp_config_entry_24xx *vpmod;
3958 dma_addr_t vpmod_dma;
3959 struct qla_hw_data *ha = vha->hw;
3960 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3962 /* This can be called by the parent */
3964 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3965 "Entered %s.\n", __func__);
3967 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3969 ql_log(ql_log_warn, vha, 0x10bc,
3970 "Failed to allocate modify VP IOCB.\n");
3971 return QLA_MEMORY_ALLOC_FAILED;
3974 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3975 vpmod->entry_count = 1;
3976 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3977 vpmod->vp_count = 1;
3978 vpmod->vp_index1 = vha->vp_idx;
3979 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3981 qlt_modify_vp_config(vha, vpmod);
3983 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3984 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3985 vpmod->entry_count = 1;
3987 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3988 if (rval != QLA_SUCCESS) {
3989 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3990 "Failed to issue VP config IOCB (%x).\n", rval);
3991 } else if (vpmod->comp_status != 0) {
3992 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3993 "Failed to complete IOCB -- error status (%x).\n",
3994 vpmod->comp_status);
3995 rval = QLA_FUNCTION_FAILED;
3996 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3997 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3998 "Failed to complete IOCB -- completion status (%x).\n",
3999 le16_to_cpu(vpmod->comp_status));
4000 rval = QLA_FUNCTION_FAILED;
4003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4004 "Done %s.\n", __func__);
4005 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4007 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4013 * qla2x00_send_change_request
4014 * Receive or disable RSCN request from fabric controller
4017 * ha = adapter block pointer
4018 * format = registration format:
4020 * 1 - Fabric detected registration
4021 * 2 - N_port detected registration
4022 * 3 - Full registration
4023 * FF - clear registration
4024 * vp_idx = Virtual port index
4027 * qla2x00 local function return status code.
4034 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4039 mbx_cmd_t *mcp = &mc;
4041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4042 "Entered %s.\n", __func__);
4044 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4045 mcp->mb[1] = format;
4046 mcp->mb[9] = vp_idx;
4047 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4048 mcp->in_mb = MBX_0|MBX_1;
4049 mcp->tov = MBX_TOV_SECONDS;
4051 rval = qla2x00_mailbox_command(vha, mcp);
4053 if (rval == QLA_SUCCESS) {
4054 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4064 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4069 mbx_cmd_t *mcp = &mc;
4071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4072 "Entered %s.\n", __func__);
4074 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4075 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4076 mcp->mb[8] = MSW(addr);
4077 mcp->out_mb = MBX_8|MBX_0;
4079 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4080 mcp->out_mb = MBX_0;
4082 mcp->mb[1] = LSW(addr);
4083 mcp->mb[2] = MSW(req_dma);
4084 mcp->mb[3] = LSW(req_dma);
4085 mcp->mb[6] = MSW(MSD(req_dma));
4086 mcp->mb[7] = LSW(MSD(req_dma));
4087 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4088 if (IS_FWI2_CAPABLE(vha->hw)) {
4089 mcp->mb[4] = MSW(size);
4090 mcp->mb[5] = LSW(size);
4091 mcp->out_mb |= MBX_5|MBX_4;
4093 mcp->mb[4] = LSW(size);
4094 mcp->out_mb |= MBX_4;
4098 mcp->tov = MBX_TOV_SECONDS;
4100 rval = qla2x00_mailbox_command(vha, mcp);
4102 if (rval != QLA_SUCCESS) {
4103 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4104 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4107 "Done %s.\n", __func__);
4112 /* 84XX Support **************************************************************/
4114 struct cs84xx_mgmt_cmd {
4116 struct verify_chip_entry_84xx req;
4117 struct verify_chip_rsp_84xx rsp;
4122 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4125 struct cs84xx_mgmt_cmd *mn;
4128 unsigned long flags;
4129 struct qla_hw_data *ha = vha->hw;
4131 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4132 "Entered %s.\n", __func__);
4134 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4136 return QLA_MEMORY_ALLOC_FAILED;
4140 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4141 /* Diagnostic firmware? */
4142 /* options |= MENLO_DIAG_FW; */
4143 /* We update the firmware with only one data sequence. */
4144 options |= VCO_END_OF_DATA;
4148 memset(mn, 0, sizeof(*mn));
4149 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4150 mn->p.req.entry_count = 1;
4151 mn->p.req.options = cpu_to_le16(options);
4153 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4154 "Dump of Verify Request.\n");
4155 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4156 (uint8_t *)mn, sizeof(*mn));
4158 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4159 if (rval != QLA_SUCCESS) {
4160 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4161 "Failed to issue verify IOCB (%x).\n", rval);
4165 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4166 "Dump of Verify Response.\n");
4167 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4168 (uint8_t *)mn, sizeof(*mn));
4170 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4171 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4172 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4174 "cs=%x fc=%x.\n", status[0], status[1]);
4176 if (status[0] != CS_COMPLETE) {
4177 rval = QLA_FUNCTION_FAILED;
4178 if (!(options & VCO_DONT_UPDATE_FW)) {
4179 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4180 "Firmware update failed. Retrying "
4181 "without update firmware.\n");
4182 options |= VCO_DONT_UPDATE_FW;
4183 options &= ~VCO_FORCE_UPDATE;
4187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4188 "Firmware updated to %x.\n",
4189 le32_to_cpu(mn->p.rsp.fw_ver));
4191 /* NOTE: we only update OP firmware. */
4192 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4193 ha->cs84xx->op_fw_version =
4194 le32_to_cpu(mn->p.rsp.fw_ver);
4195 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4201 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4203 if (rval != QLA_SUCCESS) {
4204 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4205 "Failed=%x.\n", rval);
4207 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4208 "Done %s.\n", __func__);
4215 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4218 unsigned long flags;
4220 mbx_cmd_t *mcp = &mc;
4221 struct qla_hw_data *ha = vha->hw;
4223 if (!ha->flags.fw_started)
4226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4227 "Entered %s.\n", __func__);
4229 if (IS_SHADOW_REG_CAPABLE(ha))
4230 req->options |= BIT_13;
4232 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4233 mcp->mb[1] = req->options;
4234 mcp->mb[2] = MSW(LSD(req->dma));
4235 mcp->mb[3] = LSW(LSD(req->dma));
4236 mcp->mb[6] = MSW(MSD(req->dma));
4237 mcp->mb[7] = LSW(MSD(req->dma));
4238 mcp->mb[5] = req->length;
4240 mcp->mb[10] = req->rsp->id;
4241 mcp->mb[12] = req->qos;
4242 mcp->mb[11] = req->vp_idx;
4243 mcp->mb[13] = req->rid;
4244 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4247 mcp->mb[4] = req->id;
4248 /* que in ptr index */
4250 /* que out ptr index */
4251 mcp->mb[9] = *req->out_ptr = 0;
4252 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4253 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4255 mcp->flags = MBX_DMA_OUT;
4256 mcp->tov = MBX_TOV_SECONDS * 2;
4258 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4259 mcp->in_mb |= MBX_1;
4260 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4261 mcp->out_mb |= MBX_15;
4262 /* debug q create issue in SR-IOV */
4263 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4266 spin_lock_irqsave(&ha->hardware_lock, flags);
4267 if (!(req->options & BIT_0)) {
4268 WRT_REG_DWORD(req->req_q_in, 0);
4269 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4270 WRT_REG_DWORD(req->req_q_out, 0);
4272 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4274 rval = qla2x00_mailbox_command(vha, mcp);
4275 if (rval != QLA_SUCCESS) {
4276 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4277 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4280 "Done %s.\n", __func__);
4287 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4290 unsigned long flags;
4292 mbx_cmd_t *mcp = &mc;
4293 struct qla_hw_data *ha = vha->hw;
4295 if (!ha->flags.fw_started)
4298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4299 "Entered %s.\n", __func__);
4301 if (IS_SHADOW_REG_CAPABLE(ha))
4302 rsp->options |= BIT_13;
4304 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4305 mcp->mb[1] = rsp->options;
4306 mcp->mb[2] = MSW(LSD(rsp->dma));
4307 mcp->mb[3] = LSW(LSD(rsp->dma));
4308 mcp->mb[6] = MSW(MSD(rsp->dma));
4309 mcp->mb[7] = LSW(MSD(rsp->dma));
4310 mcp->mb[5] = rsp->length;
4311 mcp->mb[14] = rsp->msix->entry;
4312 mcp->mb[13] = rsp->rid;
4313 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4316 mcp->mb[4] = rsp->id;
4317 /* que in ptr index */
4318 mcp->mb[8] = *rsp->in_ptr = 0;
4319 /* que out ptr index */
4321 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4322 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4324 mcp->flags = MBX_DMA_OUT;
4325 mcp->tov = MBX_TOV_SECONDS * 2;
4327 if (IS_QLA81XX(ha)) {
4328 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4329 mcp->in_mb |= MBX_1;
4330 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4331 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4332 mcp->in_mb |= MBX_1;
4333 /* debug q create issue in SR-IOV */
4334 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4337 spin_lock_irqsave(&ha->hardware_lock, flags);
4338 if (!(rsp->options & BIT_0)) {
4339 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4340 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4341 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4344 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4346 rval = qla2x00_mailbox_command(vha, mcp);
4347 if (rval != QLA_SUCCESS) {
4348 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4349 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4352 "Done %s.\n", __func__);
4359 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4363 mbx_cmd_t *mcp = &mc;
4365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4366 "Entered %s.\n", __func__);
4368 mcp->mb[0] = MBC_IDC_ACK;
4369 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4370 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4372 mcp->tov = MBX_TOV_SECONDS;
4374 rval = qla2x00_mailbox_command(vha, mcp);
4376 if (rval != QLA_SUCCESS) {
4377 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4378 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4381 "Done %s.\n", __func__);
4388 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4392 mbx_cmd_t *mcp = &mc;
4394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4395 "Entered %s.\n", __func__);
4397 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4398 !IS_QLA27XX(vha->hw))
4399 return QLA_FUNCTION_FAILED;
4401 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4402 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4403 mcp->out_mb = MBX_1|MBX_0;
4404 mcp->in_mb = MBX_1|MBX_0;
4405 mcp->tov = MBX_TOV_SECONDS;
4407 rval = qla2x00_mailbox_command(vha, mcp);
4409 if (rval != QLA_SUCCESS) {
4410 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4411 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4412 rval, mcp->mb[0], mcp->mb[1]);
4414 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4415 "Done %s.\n", __func__);
4416 *sector_size = mcp->mb[1];
4423 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4427 mbx_cmd_t *mcp = &mc;
4429 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4430 !IS_QLA27XX(vha->hw))
4431 return QLA_FUNCTION_FAILED;
4433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4434 "Entered %s.\n", __func__);
4436 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4437 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4438 FAC_OPT_CMD_WRITE_PROTECT;
4439 mcp->out_mb = MBX_1|MBX_0;
4440 mcp->in_mb = MBX_1|MBX_0;
4441 mcp->tov = MBX_TOV_SECONDS;
4443 rval = qla2x00_mailbox_command(vha, mcp);
4445 if (rval != QLA_SUCCESS) {
4446 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4447 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4448 rval, mcp->mb[0], mcp->mb[1]);
4450 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4451 "Done %s.\n", __func__);
4458 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4462 mbx_cmd_t *mcp = &mc;
4464 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4465 !IS_QLA27XX(vha->hw))
4466 return QLA_FUNCTION_FAILED;
4468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4469 "Entered %s.\n", __func__);
4471 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4472 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4473 mcp->mb[2] = LSW(start);
4474 mcp->mb[3] = MSW(start);
4475 mcp->mb[4] = LSW(finish);
4476 mcp->mb[5] = MSW(finish);
4477 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4478 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4479 mcp->tov = MBX_TOV_SECONDS;
4481 rval = qla2x00_mailbox_command(vha, mcp);
4483 if (rval != QLA_SUCCESS) {
4484 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4485 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4486 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4488 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4489 "Done %s.\n", __func__);
4496 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4500 mbx_cmd_t *mcp = &mc;
4502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4503 "Entered %s.\n", __func__);
4505 mcp->mb[0] = MBC_RESTART_MPI_FW;
4506 mcp->out_mb = MBX_0;
4507 mcp->in_mb = MBX_0|MBX_1;
4508 mcp->tov = MBX_TOV_SECONDS;
4510 rval = qla2x00_mailbox_command(vha, mcp);
4512 if (rval != QLA_SUCCESS) {
4513 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4514 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4515 rval, mcp->mb[0], mcp->mb[1]);
4517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4518 "Done %s.\n", __func__);
4525 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4529 mbx_cmd_t *mcp = &mc;
4533 struct qla_hw_data *ha = vha->hw;
4535 if (!IS_P3P_TYPE(ha))
4536 return QLA_FUNCTION_FAILED;
4538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4539 "Entered %s.\n", __func__);
4541 str = (void *)version;
4542 len = strlen(version);
4544 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4545 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4546 mcp->out_mb = MBX_1|MBX_0;
4547 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4548 mcp->mb[i] = cpu_to_le16p(str);
4549 mcp->out_mb |= 1<<i;
4551 for (; i < 16; i++) {
4553 mcp->out_mb |= 1<<i;
4555 mcp->in_mb = MBX_1|MBX_0;
4556 mcp->tov = MBX_TOV_SECONDS;
4558 rval = qla2x00_mailbox_command(vha, mcp);
4560 if (rval != QLA_SUCCESS) {
4561 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4562 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4565 "Done %s.\n", __func__);
4572 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4576 mbx_cmd_t *mcp = &mc;
4581 struct qla_hw_data *ha = vha->hw;
4583 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4585 return QLA_FUNCTION_FAILED;
4587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4588 "Entered %s.\n", __func__);
4590 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4592 ql_log(ql_log_warn, vha, 0x117f,
4593 "Failed to allocate driver version param.\n");
4594 return QLA_MEMORY_ALLOC_FAILED;
4597 memcpy(str, "\x7\x3\x11\x0", 4);
4599 len = dwlen * 4 - 4;
4600 memset(str + 4, 0, len);
4601 if (len > strlen(version))
4602 len = strlen(version);
4603 memcpy(str + 4, version, len);
4605 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4606 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4607 mcp->mb[2] = MSW(LSD(str_dma));
4608 mcp->mb[3] = LSW(LSD(str_dma));
4609 mcp->mb[6] = MSW(MSD(str_dma));
4610 mcp->mb[7] = LSW(MSD(str_dma));
4611 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4612 mcp->in_mb = MBX_1|MBX_0;
4613 mcp->tov = MBX_TOV_SECONDS;
4615 rval = qla2x00_mailbox_command(vha, mcp);
4617 if (rval != QLA_SUCCESS) {
4618 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4619 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4622 "Done %s.\n", __func__);
4625 dma_pool_free(ha->s_dma_pool, str, str_dma);
4631 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4632 void *buf, uint16_t bufsiz)
4636 mbx_cmd_t *mcp = &mc;
4639 if (!IS_FWI2_CAPABLE(vha->hw))
4640 return QLA_FUNCTION_FAILED;
4642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4643 "Entered %s.\n", __func__);
4645 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4646 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4647 mcp->mb[2] = MSW(buf_dma);
4648 mcp->mb[3] = LSW(buf_dma);
4649 mcp->mb[6] = MSW(MSD(buf_dma));
4650 mcp->mb[7] = LSW(MSD(buf_dma));
4651 mcp->mb[8] = bufsiz/4;
4652 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4653 mcp->in_mb = MBX_1|MBX_0;
4654 mcp->tov = MBX_TOV_SECONDS;
4656 rval = qla2x00_mailbox_command(vha, mcp);
4658 if (rval != QLA_SUCCESS) {
4659 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4660 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4663 "Done %s.\n", __func__);
4664 bp = (uint32_t *) buf;
4665 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4666 *bp = cpu_to_be32(*bp);
4673 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4677 mbx_cmd_t *mcp = &mc;
4679 if (!IS_FWI2_CAPABLE(vha->hw))
4680 return QLA_FUNCTION_FAILED;
4682 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4683 "Entered %s.\n", __func__);
4685 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4686 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4687 mcp->out_mb = MBX_1|MBX_0;
4688 mcp->in_mb = MBX_1|MBX_0;
4689 mcp->tov = MBX_TOV_SECONDS;
4691 rval = qla2x00_mailbox_command(vha, mcp);
4694 if (rval != QLA_SUCCESS) {
4695 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4696 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4699 "Done %s.\n", __func__);
4706 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4707 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4711 mbx_cmd_t *mcp = &mc;
4712 struct qla_hw_data *ha = vha->hw;
4714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4715 "Entered %s.\n", __func__);
4717 if (!IS_FWI2_CAPABLE(ha))
4718 return QLA_FUNCTION_FAILED;
4723 mcp->mb[0] = MBC_READ_SFP;
4725 mcp->mb[2] = MSW(sfp_dma);
4726 mcp->mb[3] = LSW(sfp_dma);
4727 mcp->mb[6] = MSW(MSD(sfp_dma));
4728 mcp->mb[7] = LSW(MSD(sfp_dma));
4732 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4733 mcp->in_mb = MBX_1|MBX_0;
4734 mcp->tov = MBX_TOV_SECONDS;
4736 rval = qla2x00_mailbox_command(vha, mcp);
4741 if (rval != QLA_SUCCESS) {
4742 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4743 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4744 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4746 /* sfp is not there */
4747 rval = QLA_INTERFACE_ERROR;
4749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4750 "Done %s.\n", __func__);
4757 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4758 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4762 mbx_cmd_t *mcp = &mc;
4763 struct qla_hw_data *ha = vha->hw;
4765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4766 "Entered %s.\n", __func__);
4768 if (!IS_FWI2_CAPABLE(ha))
4769 return QLA_FUNCTION_FAILED;
4777 mcp->mb[0] = MBC_WRITE_SFP;
4779 mcp->mb[2] = MSW(sfp_dma);
4780 mcp->mb[3] = LSW(sfp_dma);
4781 mcp->mb[6] = MSW(MSD(sfp_dma));
4782 mcp->mb[7] = LSW(MSD(sfp_dma));
4786 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4787 mcp->in_mb = MBX_1|MBX_0;
4788 mcp->tov = MBX_TOV_SECONDS;
4790 rval = qla2x00_mailbox_command(vha, mcp);
4792 if (rval != QLA_SUCCESS) {
4793 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4794 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4797 "Done %s.\n", __func__);
4804 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4805 uint16_t size_in_bytes, uint16_t *actual_size)
4809 mbx_cmd_t *mcp = &mc;
4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4812 "Entered %s.\n", __func__);
4814 if (!IS_CNA_CAPABLE(vha->hw))
4815 return QLA_FUNCTION_FAILED;
4817 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4818 mcp->mb[2] = MSW(stats_dma);
4819 mcp->mb[3] = LSW(stats_dma);
4820 mcp->mb[6] = MSW(MSD(stats_dma));
4821 mcp->mb[7] = LSW(MSD(stats_dma));
4822 mcp->mb[8] = size_in_bytes >> 2;
4823 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4824 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4825 mcp->tov = MBX_TOV_SECONDS;
4827 rval = qla2x00_mailbox_command(vha, mcp);
4829 if (rval != QLA_SUCCESS) {
4830 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4831 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4832 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4835 "Done %s.\n", __func__);
4838 *actual_size = mcp->mb[2] << 2;
4845 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4850 mbx_cmd_t *mcp = &mc;
4852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4853 "Entered %s.\n", __func__);
4855 if (!IS_CNA_CAPABLE(vha->hw))
4856 return QLA_FUNCTION_FAILED;
4858 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4860 mcp->mb[2] = MSW(tlv_dma);
4861 mcp->mb[3] = LSW(tlv_dma);
4862 mcp->mb[6] = MSW(MSD(tlv_dma));
4863 mcp->mb[7] = LSW(MSD(tlv_dma));
4865 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4866 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4867 mcp->tov = MBX_TOV_SECONDS;
4869 rval = qla2x00_mailbox_command(vha, mcp);
4871 if (rval != QLA_SUCCESS) {
4872 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4873 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4874 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4877 "Done %s.\n", __func__);
4884 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4888 mbx_cmd_t *mcp = &mc;
4890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4891 "Entered %s.\n", __func__);
4893 if (!IS_FWI2_CAPABLE(vha->hw))
4894 return QLA_FUNCTION_FAILED;
4896 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4897 mcp->mb[1] = LSW(risc_addr);
4898 mcp->mb[8] = MSW(risc_addr);
4899 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4900 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4903 rval = qla2x00_mailbox_command(vha, mcp);
4904 if (rval != QLA_SUCCESS) {
4905 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4906 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4909 "Done %s.\n", __func__);
4910 *data = mcp->mb[3] << 16 | mcp->mb[2];
4917 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4922 mbx_cmd_t *mcp = &mc;
4924 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4925 "Entered %s.\n", __func__);
4927 memset(mcp->mb, 0 , sizeof(mcp->mb));
4928 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4929 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4931 /* transfer count */
4932 mcp->mb[10] = LSW(mreq->transfer_size);
4933 mcp->mb[11] = MSW(mreq->transfer_size);
4935 /* send data address */
4936 mcp->mb[14] = LSW(mreq->send_dma);
4937 mcp->mb[15] = MSW(mreq->send_dma);
4938 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4939 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4941 /* receive data address */
4942 mcp->mb[16] = LSW(mreq->rcv_dma);
4943 mcp->mb[17] = MSW(mreq->rcv_dma);
4944 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4945 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4947 /* Iteration count */
4948 mcp->mb[18] = LSW(mreq->iteration_count);
4949 mcp->mb[19] = MSW(mreq->iteration_count);
4951 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4952 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4953 if (IS_CNA_CAPABLE(vha->hw))
4954 mcp->out_mb |= MBX_2;
4955 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4957 mcp->buf_size = mreq->transfer_size;
4958 mcp->tov = MBX_TOV_SECONDS;
4959 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4961 rval = qla2x00_mailbox_command(vha, mcp);
4963 if (rval != QLA_SUCCESS) {
4964 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4965 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4966 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4967 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4970 "Done %s.\n", __func__);
4973 /* Copy mailbox information */
4974 memcpy( mresp, mcp->mb, 64);
4979 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4984 mbx_cmd_t *mcp = &mc;
4985 struct qla_hw_data *ha = vha->hw;
4987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4988 "Entered %s.\n", __func__);
4990 memset(mcp->mb, 0 , sizeof(mcp->mb));
4991 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4992 /* BIT_6 specifies 64bit address */
4993 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4994 if (IS_CNA_CAPABLE(ha)) {
4995 mcp->mb[2] = vha->fcoe_fcf_idx;
4997 mcp->mb[16] = LSW(mreq->rcv_dma);
4998 mcp->mb[17] = MSW(mreq->rcv_dma);
4999 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5000 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5002 mcp->mb[10] = LSW(mreq->transfer_size);
5004 mcp->mb[14] = LSW(mreq->send_dma);
5005 mcp->mb[15] = MSW(mreq->send_dma);
5006 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5007 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5009 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5010 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5011 if (IS_CNA_CAPABLE(ha))
5012 mcp->out_mb |= MBX_2;
5015 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5016 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5017 mcp->in_mb |= MBX_1;
5018 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5019 mcp->in_mb |= MBX_3;
5021 mcp->tov = MBX_TOV_SECONDS;
5022 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5023 mcp->buf_size = mreq->transfer_size;
5025 rval = qla2x00_mailbox_command(vha, mcp);
5027 if (rval != QLA_SUCCESS) {
5028 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5029 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5030 rval, mcp->mb[0], mcp->mb[1]);
5032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5033 "Done %s.\n", __func__);
5036 /* Copy mailbox information */
5037 memcpy(mresp, mcp->mb, 64);
5042 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5046 mbx_cmd_t *mcp = &mc;
5048 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5049 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5051 mcp->mb[0] = MBC_ISP84XX_RESET;
5052 mcp->mb[1] = enable_diagnostic;
5053 mcp->out_mb = MBX_1|MBX_0;
5054 mcp->in_mb = MBX_1|MBX_0;
5055 mcp->tov = MBX_TOV_SECONDS;
5056 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5057 rval = qla2x00_mailbox_command(vha, mcp);
5059 if (rval != QLA_SUCCESS)
5060 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5062 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5063 "Done %s.\n", __func__);
5069 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5073 mbx_cmd_t *mcp = &mc;
5075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5076 "Entered %s.\n", __func__);
5078 if (!IS_FWI2_CAPABLE(vha->hw))
5079 return QLA_FUNCTION_FAILED;
5081 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5082 mcp->mb[1] = LSW(risc_addr);
5083 mcp->mb[2] = LSW(data);
5084 mcp->mb[3] = MSW(data);
5085 mcp->mb[8] = MSW(risc_addr);
5086 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5090 rval = qla2x00_mailbox_command(vha, mcp);
5091 if (rval != QLA_SUCCESS) {
5092 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5093 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5096 "Done %s.\n", __func__);
5103 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5106 uint32_t stat, timer;
5108 struct qla_hw_data *ha = vha->hw;
5109 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5113 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5114 "Entered %s.\n", __func__);
5116 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5118 /* Write the MBC data to the registers */
5119 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5120 WRT_REG_WORD(®->mailbox1, mb[0]);
5121 WRT_REG_WORD(®->mailbox2, mb[1]);
5122 WRT_REG_WORD(®->mailbox3, mb[2]);
5123 WRT_REG_WORD(®->mailbox4, mb[3]);
5125 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
5127 /* Poll for MBC interrupt */
5128 for (timer = 6000000; timer; timer--) {
5129 /* Check for pending interrupts. */
5130 stat = RD_REG_DWORD(®->host_status);
5131 if (stat & HSRX_RISC_INT) {
5134 if (stat == 0x1 || stat == 0x2 ||
5135 stat == 0x10 || stat == 0x11) {
5136 set_bit(MBX_INTERRUPT,
5137 &ha->mbx_cmd_flags);
5138 mb0 = RD_REG_WORD(®->mailbox0);
5139 WRT_REG_DWORD(®->hccr,
5140 HCCRX_CLR_RISC_INT);
5141 RD_REG_DWORD(®->hccr);
5148 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5149 rval = mb0 & MBS_MASK;
5151 rval = QLA_FUNCTION_FAILED;
5153 if (rval != QLA_SUCCESS) {
5154 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5155 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5158 "Done %s.\n", __func__);
5165 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5169 mbx_cmd_t *mcp = &mc;
5170 struct qla_hw_data *ha = vha->hw;
5172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5173 "Entered %s.\n", __func__);
5175 if (!IS_FWI2_CAPABLE(ha))
5176 return QLA_FUNCTION_FAILED;
5178 mcp->mb[0] = MBC_DATA_RATE;
5180 mcp->out_mb = MBX_1|MBX_0;
5181 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5182 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5183 mcp->in_mb |= MBX_3;
5184 mcp->tov = MBX_TOV_SECONDS;
5186 rval = qla2x00_mailbox_command(vha, mcp);
5187 if (rval != QLA_SUCCESS) {
5188 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5189 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5191 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5192 "Done %s.\n", __func__);
5193 if (mcp->mb[1] != 0x7)
5194 ha->link_data_rate = mcp->mb[1];
5201 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5205 mbx_cmd_t *mcp = &mc;
5206 struct qla_hw_data *ha = vha->hw;
5208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5209 "Entered %s.\n", __func__);
5211 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5213 return QLA_FUNCTION_FAILED;
5214 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5215 mcp->out_mb = MBX_0;
5216 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5217 mcp->tov = MBX_TOV_SECONDS;
5220 rval = qla2x00_mailbox_command(vha, mcp);
5222 if (rval != QLA_SUCCESS) {
5223 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5224 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5226 /* Copy all bits to preserve original value */
5227 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5230 "Done %s.\n", __func__);
5236 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5240 mbx_cmd_t *mcp = &mc;
5242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5243 "Entered %s.\n", __func__);
5245 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5246 /* Copy all bits to preserve original setting */
5247 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5248 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5250 mcp->tov = MBX_TOV_SECONDS;
5252 rval = qla2x00_mailbox_command(vha, mcp);
5254 if (rval != QLA_SUCCESS) {
5255 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5256 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5259 "Done %s.\n", __func__);
5266 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5271 mbx_cmd_t *mcp = &mc;
5272 struct qla_hw_data *ha = vha->hw;
5274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5275 "Entered %s.\n", __func__);
5277 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5278 return QLA_FUNCTION_FAILED;
5280 mcp->mb[0] = MBC_PORT_PARAMS;
5281 mcp->mb[1] = loop_id;
5282 if (ha->flags.fcp_prio_enabled)
5286 mcp->mb[4] = priority & 0xf;
5287 mcp->mb[9] = vha->vp_idx;
5288 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5289 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5292 rval = qla2x00_mailbox_command(vha, mcp);
5300 if (rval != QLA_SUCCESS) {
5301 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5303 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5304 "Done %s.\n", __func__);
5311 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5313 int rval = QLA_FUNCTION_FAILED;
5314 struct qla_hw_data *ha = vha->hw;
5317 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5318 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5319 "Thermal not supported by this card.\n");
5323 if (IS_QLA25XX(ha)) {
5324 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5325 ha->pdev->subsystem_device == 0x0175) {
5326 rval = qla2x00_read_sfp(vha, 0, &byte,
5327 0x98, 0x1, 1, BIT_13|BIT_0);
5331 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5332 ha->pdev->subsystem_device == 0x338e) {
5333 rval = qla2x00_read_sfp(vha, 0, &byte,
5334 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5338 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5339 "Thermal not supported by this card.\n");
5343 if (IS_QLA82XX(ha)) {
5344 *temp = qla82xx_read_temperature(vha);
5347 } else if (IS_QLA8044(ha)) {
5348 *temp = qla8044_read_temperature(vha);
5353 rval = qla2x00_read_asic_temperature(vha, temp);
5358 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5361 struct qla_hw_data *ha = vha->hw;
5363 mbx_cmd_t *mcp = &mc;
5365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5366 "Entered %s.\n", __func__);
5368 if (!IS_FWI2_CAPABLE(ha))
5369 return QLA_FUNCTION_FAILED;
5371 memset(mcp, 0, sizeof(mbx_cmd_t));
5372 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5375 mcp->out_mb = MBX_1|MBX_0;
5380 rval = qla2x00_mailbox_command(vha, mcp);
5381 if (rval != QLA_SUCCESS) {
5382 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5383 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5385 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5386 "Done %s.\n", __func__);
5393 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5396 struct qla_hw_data *ha = vha->hw;
5398 mbx_cmd_t *mcp = &mc;
5400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5401 "Entered %s.\n", __func__);
5403 if (!IS_P3P_TYPE(ha))
5404 return QLA_FUNCTION_FAILED;
5406 memset(mcp, 0, sizeof(mbx_cmd_t));
5407 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5410 mcp->out_mb = MBX_1|MBX_0;
5415 rval = qla2x00_mailbox_command(vha, mcp);
5416 if (rval != QLA_SUCCESS) {
5417 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5418 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5421 "Done %s.\n", __func__);
5428 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5430 struct qla_hw_data *ha = vha->hw;
5432 mbx_cmd_t *mcp = &mc;
5433 int rval = QLA_FUNCTION_FAILED;
5435 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5436 "Entered %s.\n", __func__);
5438 memset(mcp->mb, 0 , sizeof(mcp->mb));
5439 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5440 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5441 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5442 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5444 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5445 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5446 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5448 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5449 mcp->tov = MBX_TOV_SECONDS;
5450 rval = qla2x00_mailbox_command(vha, mcp);
5452 /* Always copy back return mailbox values. */
5453 if (rval != QLA_SUCCESS) {
5454 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5455 "mailbox command FAILED=0x%x, subcode=%x.\n",
5456 (mcp->mb[1] << 16) | mcp->mb[0],
5457 (mcp->mb[3] << 16) | mcp->mb[2]);
5459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5460 "Done %s.\n", __func__);
5461 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5462 if (!ha->md_template_size) {
5463 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5464 "Null template size obtained.\n");
5465 rval = QLA_FUNCTION_FAILED;
5472 qla82xx_md_get_template(scsi_qla_host_t *vha)
5474 struct qla_hw_data *ha = vha->hw;
5476 mbx_cmd_t *mcp = &mc;
5477 int rval = QLA_FUNCTION_FAILED;
5479 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5480 "Entered %s.\n", __func__);
5482 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5483 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5484 if (!ha->md_tmplt_hdr) {
5485 ql_log(ql_log_warn, vha, 0x1124,
5486 "Unable to allocate memory for Minidump template.\n");
5490 memset(mcp->mb, 0 , sizeof(mcp->mb));
5491 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5492 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5493 mcp->mb[2] = LSW(RQST_TMPLT);
5494 mcp->mb[3] = MSW(RQST_TMPLT);
5495 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5496 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5497 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5498 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5499 mcp->mb[8] = LSW(ha->md_template_size);
5500 mcp->mb[9] = MSW(ha->md_template_size);
5502 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5503 mcp->tov = MBX_TOV_SECONDS;
5504 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5505 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5506 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5507 rval = qla2x00_mailbox_command(vha, mcp);
5509 if (rval != QLA_SUCCESS) {
5510 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5511 "mailbox command FAILED=0x%x, subcode=%x.\n",
5512 ((mcp->mb[1] << 16) | mcp->mb[0]),
5513 ((mcp->mb[3] << 16) | mcp->mb[2]));
5515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5516 "Done %s.\n", __func__);
5521 qla8044_md_get_template(scsi_qla_host_t *vha)
5523 struct qla_hw_data *ha = vha->hw;
5525 mbx_cmd_t *mcp = &mc;
5526 int rval = QLA_FUNCTION_FAILED;
5527 int offset = 0, size = MINIDUMP_SIZE_36K;
5528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5529 "Entered %s.\n", __func__);
5531 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5532 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5533 if (!ha->md_tmplt_hdr) {
5534 ql_log(ql_log_warn, vha, 0xb11b,
5535 "Unable to allocate memory for Minidump template.\n");
5539 memset(mcp->mb, 0 , sizeof(mcp->mb));
5540 while (offset < ha->md_template_size) {
5541 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5542 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5543 mcp->mb[2] = LSW(RQST_TMPLT);
5544 mcp->mb[3] = MSW(RQST_TMPLT);
5545 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5546 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5547 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5548 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5549 mcp->mb[8] = LSW(size);
5550 mcp->mb[9] = MSW(size);
5551 mcp->mb[10] = offset & 0x0000FFFF;
5552 mcp->mb[11] = offset & 0xFFFF0000;
5553 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5554 mcp->tov = MBX_TOV_SECONDS;
5555 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5556 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5557 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5558 rval = qla2x00_mailbox_command(vha, mcp);
5560 if (rval != QLA_SUCCESS) {
5561 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5562 "mailbox command FAILED=0x%x, subcode=%x.\n",
5563 ((mcp->mb[1] << 16) | mcp->mb[0]),
5564 ((mcp->mb[3] << 16) | mcp->mb[2]));
5567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5568 "Done %s.\n", __func__);
5569 offset = offset + size;
5575 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5578 struct qla_hw_data *ha = vha->hw;
5580 mbx_cmd_t *mcp = &mc;
5582 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5583 return QLA_FUNCTION_FAILED;
5585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5586 "Entered %s.\n", __func__);
5588 memset(mcp, 0, sizeof(mbx_cmd_t));
5589 mcp->mb[0] = MBC_SET_LED_CONFIG;
5590 mcp->mb[1] = led_cfg[0];
5591 mcp->mb[2] = led_cfg[1];
5592 if (IS_QLA8031(ha)) {
5593 mcp->mb[3] = led_cfg[2];
5594 mcp->mb[4] = led_cfg[3];
5595 mcp->mb[5] = led_cfg[4];
5596 mcp->mb[6] = led_cfg[5];
5599 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5601 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5606 rval = qla2x00_mailbox_command(vha, mcp);
5607 if (rval != QLA_SUCCESS) {
5608 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5609 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5612 "Done %s.\n", __func__);
5619 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5622 struct qla_hw_data *ha = vha->hw;
5624 mbx_cmd_t *mcp = &mc;
5626 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5627 return QLA_FUNCTION_FAILED;
5629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5630 "Entered %s.\n", __func__);
5632 memset(mcp, 0, sizeof(mbx_cmd_t));
5633 mcp->mb[0] = MBC_GET_LED_CONFIG;
5635 mcp->out_mb = MBX_0;
5636 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5638 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5642 rval = qla2x00_mailbox_command(vha, mcp);
5643 if (rval != QLA_SUCCESS) {
5644 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5645 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5647 led_cfg[0] = mcp->mb[1];
5648 led_cfg[1] = mcp->mb[2];
5649 if (IS_QLA8031(ha)) {
5650 led_cfg[2] = mcp->mb[3];
5651 led_cfg[3] = mcp->mb[4];
5652 led_cfg[4] = mcp->mb[5];
5653 led_cfg[5] = mcp->mb[6];
5655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5656 "Done %s.\n", __func__);
5663 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5666 struct qla_hw_data *ha = vha->hw;
5668 mbx_cmd_t *mcp = &mc;
5670 if (!IS_P3P_TYPE(ha))
5671 return QLA_FUNCTION_FAILED;
5673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5674 "Entered %s.\n", __func__);
5676 memset(mcp, 0, sizeof(mbx_cmd_t));
5677 mcp->mb[0] = MBC_SET_LED_CONFIG;
5683 mcp->out_mb = MBX_7|MBX_0;
5685 mcp->tov = MBX_TOV_SECONDS;
5688 rval = qla2x00_mailbox_command(vha, mcp);
5689 if (rval != QLA_SUCCESS) {
5690 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5691 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5694 "Done %s.\n", __func__);
5701 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5704 struct qla_hw_data *ha = vha->hw;
5706 mbx_cmd_t *mcp = &mc;
5708 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5709 return QLA_FUNCTION_FAILED;
5711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5712 "Entered %s.\n", __func__);
5714 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5715 mcp->mb[1] = LSW(reg);
5716 mcp->mb[2] = MSW(reg);
5717 mcp->mb[3] = LSW(data);
5718 mcp->mb[4] = MSW(data);
5719 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5721 mcp->in_mb = MBX_1|MBX_0;
5722 mcp->tov = MBX_TOV_SECONDS;
5724 rval = qla2x00_mailbox_command(vha, mcp);
5726 if (rval != QLA_SUCCESS) {
5727 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5728 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5731 "Done %s.\n", __func__);
5738 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5741 struct qla_hw_data *ha = vha->hw;
5743 mbx_cmd_t *mcp = &mc;
5745 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5747 "Implicit LOGO Unsupported.\n");
5748 return QLA_FUNCTION_FAILED;
5752 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5753 "Entering %s.\n", __func__);
5755 /* Perform Implicit LOGO. */
5756 mcp->mb[0] = MBC_PORT_LOGOUT;
5757 mcp->mb[1] = fcport->loop_id;
5758 mcp->mb[10] = BIT_15;
5759 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5761 mcp->tov = MBX_TOV_SECONDS;
5763 rval = qla2x00_mailbox_command(vha, mcp);
5764 if (rval != QLA_SUCCESS)
5765 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5766 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5769 "Done %s.\n", __func__);
5775 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5779 mbx_cmd_t *mcp = &mc;
5780 struct qla_hw_data *ha = vha->hw;
5781 unsigned long retry_max_time = jiffies + (2 * HZ);
5783 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5784 return QLA_FUNCTION_FAILED;
5786 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5789 mcp->mb[0] = MBC_READ_REMOTE_REG;
5790 mcp->mb[1] = LSW(reg);
5791 mcp->mb[2] = MSW(reg);
5792 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5793 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5794 mcp->tov = MBX_TOV_SECONDS;
5796 rval = qla2x00_mailbox_command(vha, mcp);
5798 if (rval != QLA_SUCCESS) {
5799 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5800 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5801 rval, mcp->mb[0], mcp->mb[1]);
5803 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5804 if (*data == QLA8XXX_BAD_VALUE) {
5806 * During soft-reset CAMRAM register reads might
5807 * return 0xbad0bad0. So retry for MAX of 2 sec
5808 * while reading camram registers.
5810 if (time_after(jiffies, retry_max_time)) {
5811 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5812 "Failure to read CAMRAM register. "
5813 "data=0x%x.\n", *data);
5814 return QLA_FUNCTION_FAILED;
5819 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5826 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5830 mbx_cmd_t *mcp = &mc;
5831 struct qla_hw_data *ha = vha->hw;
5833 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5834 return QLA_FUNCTION_FAILED;
5836 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5838 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5839 mcp->out_mb = MBX_0;
5840 mcp->in_mb = MBX_1|MBX_0;
5841 mcp->tov = MBX_TOV_SECONDS;
5843 rval = qla2x00_mailbox_command(vha, mcp);
5845 if (rval != QLA_SUCCESS) {
5846 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5847 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5848 rval, mcp->mb[0], mcp->mb[1]);
5849 ha->isp_ops->fw_dump(vha, 0);
5851 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5858 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5859 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5863 mbx_cmd_t *mcp = &mc;
5864 uint8_t subcode = (uint8_t)options;
5865 struct qla_hw_data *ha = vha->hw;
5867 if (!IS_QLA8031(ha))
5868 return QLA_FUNCTION_FAILED;
5870 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5872 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5873 mcp->mb[1] = options;
5874 mcp->out_mb = MBX_1|MBX_0;
5875 if (subcode & BIT_2) {
5876 mcp->mb[2] = LSW(start_addr);
5877 mcp->mb[3] = MSW(start_addr);
5878 mcp->mb[4] = LSW(end_addr);
5879 mcp->mb[5] = MSW(end_addr);
5880 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5882 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5883 if (!(subcode & (BIT_2 | BIT_5)))
5884 mcp->in_mb |= MBX_4|MBX_3;
5885 mcp->tov = MBX_TOV_SECONDS;
5887 rval = qla2x00_mailbox_command(vha, mcp);
5889 if (rval != QLA_SUCCESS) {
5890 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5891 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5892 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5894 ha->isp_ops->fw_dump(vha, 0);
5896 if (subcode & BIT_5)
5897 *sector_size = mcp->mb[1];
5898 else if (subcode & (BIT_6 | BIT_7)) {
5899 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5900 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5901 } else if (subcode & (BIT_3 | BIT_4)) {
5902 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5903 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5905 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5912 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5917 mbx_cmd_t *mcp = &mc;
5919 if (!IS_MCTP_CAPABLE(vha->hw))
5920 return QLA_FUNCTION_FAILED;
5922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5923 "Entered %s.\n", __func__);
5925 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5926 mcp->mb[1] = LSW(addr);
5927 mcp->mb[2] = MSW(req_dma);
5928 mcp->mb[3] = LSW(req_dma);
5929 mcp->mb[4] = MSW(size);
5930 mcp->mb[5] = LSW(size);
5931 mcp->mb[6] = MSW(MSD(req_dma));
5932 mcp->mb[7] = LSW(MSD(req_dma));
5933 mcp->mb[8] = MSW(addr);
5934 /* Setting RAM ID to valid */
5935 mcp->mb[10] |= BIT_7;
5936 /* For MCTP RAM ID is 0x40 */
5937 mcp->mb[10] |= 0x40;
5939 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5943 mcp->tov = MBX_TOV_SECONDS;
5945 rval = qla2x00_mailbox_command(vha, mcp);
5947 if (rval != QLA_SUCCESS) {
5948 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5949 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5951 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5952 "Done %s.\n", __func__);
5959 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5960 void *dd_buf, uint size, uint options)
5964 mbx_cmd_t *mcp = &mc;
5967 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
5968 return QLA_FUNCTION_FAILED;
5970 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
5971 "Entered %s.\n", __func__);
5973 dd_dma = dma_map_single(&vha->hw->pdev->dev,
5974 dd_buf, size, DMA_FROM_DEVICE);
5975 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
5976 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
5977 return QLA_MEMORY_ALLOC_FAILED;
5980 memset(dd_buf, 0, size);
5982 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
5983 mcp->mb[1] = options;
5984 mcp->mb[2] = MSW(LSD(dd_dma));
5985 mcp->mb[3] = LSW(LSD(dd_dma));
5986 mcp->mb[6] = MSW(MSD(dd_dma));
5987 mcp->mb[7] = LSW(MSD(dd_dma));
5989 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5990 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5991 mcp->buf_size = size;
5992 mcp->flags = MBX_DMA_IN;
5993 mcp->tov = MBX_TOV_SECONDS * 4;
5994 rval = qla2x00_mailbox_command(vha, mcp);
5996 if (rval != QLA_SUCCESS) {
5997 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
5999 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6000 "Done %s.\n", __func__);
6003 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6004 size, DMA_FROM_DEVICE);
6009 static void qla2x00_async_mb_sp_done(void *s, int res)
6013 sp->u.iocb_cmd.u.mbx.rc = res;
6015 complete(&sp->u.iocb_cmd.u.mbx.comp);
6016 /* don't free sp here. Let the caller do the free */
6020 * This mailbox uses the iocb interface to send MB command.
6021 * This allows non-critial (non chip setup) command to go
6024 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6026 int rval = QLA_FUNCTION_FAILED;
6030 if (!vha->hw->flags.fw_started)
6033 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6037 sp->type = SRB_MB_IOCB;
6038 sp->name = mb_to_str(mcp->mb[0]);
6040 c = &sp->u.iocb_cmd;
6041 c->timeout = qla2x00_async_iocb_timeout;
6042 init_completion(&c->u.mbx.comp);
6044 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6046 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6048 sp->done = qla2x00_async_mb_sp_done;
6050 rval = qla2x00_start_sp(sp);
6051 if (rval != QLA_SUCCESS) {
6052 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6053 "%s: %s Failed submission. %x.\n",
6054 __func__, sp->name, rval);
6058 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6059 sp->name, sp->handle);
6061 wait_for_completion(&c->u.mbx.comp);
6062 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6066 case QLA_FUNCTION_TIMEOUT:
6067 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6068 __func__, sp->name, rval);
6071 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6072 __func__, sp->name);
6076 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6077 __func__, sp->name, rval);
6092 * NOTE: Do not call this routine from DPC thread
6094 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6096 int rval = QLA_FUNCTION_FAILED;
6098 struct port_database_24xx *pd;
6099 struct qla_hw_data *ha = vha->hw;
6102 if (!vha->hw->flags.fw_started)
6105 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6107 ql_log(ql_log_warn, vha, 0xd047,
6108 "Failed to allocate port database structure.\n");
6112 memset(&mc, 0, sizeof(mc));
6113 mc.mb[0] = MBC_GET_PORT_DATABASE;
6114 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6115 mc.mb[2] = MSW(pd_dma);
6116 mc.mb[3] = LSW(pd_dma);
6117 mc.mb[6] = MSW(MSD(pd_dma));
6118 mc.mb[7] = LSW(MSD(pd_dma));
6119 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6120 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6122 rval = qla24xx_send_mb_cmd(vha, &mc);
6123 if (rval != QLA_SUCCESS) {
6124 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6125 "%s: %8phC fail\n", __func__, fcport->port_name);
6129 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6131 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6132 __func__, fcport->port_name);
6136 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6141 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6142 struct port_database_24xx *pd)
6144 int rval = QLA_SUCCESS;
6146 u8 current_login_state, last_login_state;
6148 if (fcport->fc4f_nvme) {
6149 current_login_state = pd->current_login_state >> 4;
6150 last_login_state = pd->last_login_state >> 4;
6152 current_login_state = pd->current_login_state & 0xf;
6153 last_login_state = pd->last_login_state & 0xf;
6156 /* Check for logged in state. */
6157 if (current_login_state != PDS_PRLI_COMPLETE) {
6158 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6159 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6160 current_login_state, last_login_state, fcport->loop_id);
6161 rval = QLA_FUNCTION_FAILED;
6165 if (fcport->loop_id == FC_NO_LOOP_ID ||
6166 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6167 memcmp(fcport->port_name, pd->port_name, 8))) {
6168 /* We lost the device mid way. */
6169 rval = QLA_NOT_LOGGED_IN;
6173 /* Names are little-endian. */
6174 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6175 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6177 /* Get port_id of device. */
6178 fcport->d_id.b.domain = pd->port_id[0];
6179 fcport->d_id.b.area = pd->port_id[1];
6180 fcport->d_id.b.al_pa = pd->port_id[2];
6181 fcport->d_id.b.rsvd_1 = 0;
6183 if (fcport->fc4f_nvme) {
6184 fcport->nvme_prli_service_param =
6185 pd->prli_nvme_svc_param_word_3;
6186 fcport->port_type = FCT_NVME;
6188 /* If not target must be initiator or unknown type. */
6189 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6190 fcport->port_type = FCT_INITIATOR;
6192 fcport->port_type = FCT_TARGET;
6194 /* Passback COS information. */
6195 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6196 FC_COS_CLASS2 : FC_COS_CLASS3;
6198 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6199 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6200 fcport->conf_compl_supported = 1;
6208 * qla24xx_gidlist__wait
6209 * NOTE: don't call this routine from DPC thread.
6211 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6212 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6214 int rval = QLA_FUNCTION_FAILED;
6217 if (!vha->hw->flags.fw_started)
6220 memset(&mc, 0, sizeof(mc));
6221 mc.mb[0] = MBC_GET_ID_LIST;
6222 mc.mb[2] = MSW(id_list_dma);
6223 mc.mb[3] = LSW(id_list_dma);
6224 mc.mb[6] = MSW(MSD(id_list_dma));
6225 mc.mb[7] = LSW(MSD(id_list_dma));
6227 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6229 rval = qla24xx_send_mb_cmd(vha, &mc);
6230 if (rval != QLA_SUCCESS) {
6231 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6232 "%s: fail\n", __func__);
6234 *entries = mc.mb[1];
6235 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6236 "%s: done\n", __func__);
6242 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6246 mbx_cmd_t *mcp = &mc;
6248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6249 "Entered %s\n", __func__);
6251 memset(mcp->mb, 0 , sizeof(mcp->mb));
6252 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6253 mcp->mb[1] = cpu_to_le16(1);
6254 mcp->mb[2] = cpu_to_le16(value);
6255 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6256 mcp->in_mb = MBX_2 | MBX_0;
6257 mcp->tov = MBX_TOV_SECONDS;
6260 rval = qla2x00_mailbox_command(vha, mcp);
6262 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6263 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6268 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6272 mbx_cmd_t *mcp = &mc;
6274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6275 "Entered %s\n", __func__);
6277 memset(mcp->mb, 0, sizeof(mcp->mb));
6278 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6279 mcp->mb[1] = cpu_to_le16(0);
6280 mcp->out_mb = MBX_1 | MBX_0;
6281 mcp->in_mb = MBX_2 | MBX_0;
6282 mcp->tov = MBX_TOV_SECONDS;
6285 rval = qla2x00_mailbox_command(vha, mcp);
6286 if (rval == QLA_SUCCESS)
6289 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6290 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6296 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6298 struct qla_hw_data *ha = vha->hw;
6299 uint16_t iter, addr, offset;
6300 dma_addr_t phys_addr;
6304 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6306 phys_addr = ha->sfp_data_dma;
6307 sfp_data = ha->sfp_data;
6310 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6312 /* Skip to next device address. */
6317 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6318 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6319 if (rval != QLA_SUCCESS) {
6320 ql_log(ql_log_warn, vha, 0x706d,
6321 "Unable to read SFP data (%x/%x/%x).\n", rval,
6327 if (buf && (c < count)) {
6330 if ((count - c) >= SFP_BLOCK_SIZE)
6331 sz = SFP_BLOCK_SIZE;
6335 memcpy(buf, sfp_data, sz);
6336 buf += SFP_BLOCK_SIZE;
6339 phys_addr += SFP_BLOCK_SIZE;
6340 sfp_data += SFP_BLOCK_SIZE;
6341 offset += SFP_BLOCK_SIZE;
6347 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6348 uint16_t *out_mb, int out_mb_sz)
6350 int rval = QLA_FUNCTION_FAILED;
6353 if (!vha->hw->flags.fw_started)
6356 memset(&mc, 0, sizeof(mc));
6357 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6359 rval = qla24xx_send_mb_cmd(vha, &mc);
6360 if (rval != QLA_SUCCESS) {
6361 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6362 "%s: fail\n", __func__);
6364 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6365 memcpy(out_mb, mc.mb, out_mb_sz);
6367 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6369 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6370 "%s: done\n", __func__);